summaryrefslogtreecommitdiffstats
path: root/contrib/pg_stat_statements
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/pg_stat_statements')
-rw-r--r--contrib/pg_stat_statements/.gitignore4
-rw-r--r--contrib/pg_stat_statements/Makefile33
-rw-r--r--contrib/pg_stat_statements/expected/pg_stat_statements.out806
-rw-r--r--contrib/pg_stat_statements/pg_stat_statements--1.0--1.1.sql42
-rw-r--r--contrib/pg_stat_statements/pg_stat_statements--1.1--1.2.sql43
-rw-r--r--contrib/pg_stat_statements/pg_stat_statements--1.2--1.3.sql47
-rw-r--r--contrib/pg_stat_statements/pg_stat_statements--1.3--1.4.sql7
-rw-r--r--contrib/pg_stat_statements/pg_stat_statements--1.4--1.5.sql6
-rw-r--r--contrib/pg_stat_statements/pg_stat_statements--1.4.sql48
-rw-r--r--contrib/pg_stat_statements/pg_stat_statements--1.5--1.6.sql7
-rw-r--r--contrib/pg_stat_statements/pg_stat_statements--1.6--1.7.sql22
-rw-r--r--contrib/pg_stat_statements/pg_stat_statements--1.7--1.8.sql56
-rw-r--r--contrib/pg_stat_statements/pg_stat_statements.c3447
-rw-r--r--contrib/pg_stat_statements/pg_stat_statements.conf1
-rw-r--r--contrib/pg_stat_statements/pg_stat_statements.control5
-rw-r--r--contrib/pg_stat_statements/sql/pg_stat_statements.sql338
16 files changed, 4912 insertions, 0 deletions
diff --git a/contrib/pg_stat_statements/.gitignore b/contrib/pg_stat_statements/.gitignore
new file mode 100644
index 0000000..5dcb3ff
--- /dev/null
+++ b/contrib/pg_stat_statements/.gitignore
@@ -0,0 +1,4 @@
+# Generated subdirectories
+/log/
+/results/
+/tmp_check/
diff --git a/contrib/pg_stat_statements/Makefile b/contrib/pg_stat_statements/Makefile
new file mode 100644
index 0000000..081f997
--- /dev/null
+++ b/contrib/pg_stat_statements/Makefile
@@ -0,0 +1,33 @@
+# contrib/pg_stat_statements/Makefile
+
+MODULE_big = pg_stat_statements
+OBJS = \
+ $(WIN32RES) \
+ pg_stat_statements.o
+
+EXTENSION = pg_stat_statements
+DATA = pg_stat_statements--1.4.sql \
+ pg_stat_statements--1.7--1.8.sql pg_stat_statements--1.6--1.7.sql \
+ pg_stat_statements--1.5--1.6.sql pg_stat_statements--1.4--1.5.sql \
+ pg_stat_statements--1.3--1.4.sql pg_stat_statements--1.2--1.3.sql \
+ pg_stat_statements--1.1--1.2.sql pg_stat_statements--1.0--1.1.sql
+PGFILEDESC = "pg_stat_statements - execution statistics of SQL statements"
+
+LDFLAGS_SL += $(filter -lm, $(LIBS))
+
+REGRESS_OPTS = --temp-config $(top_srcdir)/contrib/pg_stat_statements/pg_stat_statements.conf
+REGRESS = pg_stat_statements
+# Disabled because these tests require "shared_preload_libraries=pg_stat_statements",
+# which typical installcheck users do not have (e.g. buildfarm clients).
+NO_INSTALLCHECK = 1
+
+ifdef USE_PGXS
+PG_CONFIG = pg_config
+PGXS := $(shell $(PG_CONFIG) --pgxs)
+include $(PGXS)
+else
+subdir = contrib/pg_stat_statements
+top_builddir = ../..
+include $(top_builddir)/src/Makefile.global
+include $(top_srcdir)/contrib/contrib-global.mk
+endif
diff --git a/contrib/pg_stat_statements/expected/pg_stat_statements.out b/contrib/pg_stat_statements/expected/pg_stat_statements.out
new file mode 100644
index 0000000..865e890
--- /dev/null
+++ b/contrib/pg_stat_statements/expected/pg_stat_statements.out
@@ -0,0 +1,806 @@
+CREATE EXTENSION pg_stat_statements;
+--
+-- simple and compound statements
+--
+SET pg_stat_statements.track_utility = FALSE;
+SET pg_stat_statements.track_planning = TRUE;
+SELECT pg_stat_statements_reset();
+ pg_stat_statements_reset
+--------------------------
+
+(1 row)
+
+SELECT 1 AS "int";
+ int
+-----
+ 1
+(1 row)
+
+SELECT 'hello'
+ -- multiline
+ AS "text";
+ text
+-------
+ hello
+(1 row)
+
+SELECT 'world' AS "text";
+ text
+-------
+ world
+(1 row)
+
+-- transaction
+BEGIN;
+SELECT 1 AS "int";
+ int
+-----
+ 1
+(1 row)
+
+SELECT 'hello' AS "text";
+ text
+-------
+ hello
+(1 row)
+
+COMMIT;
+-- compound transaction
+BEGIN \;
+SELECT 2.0 AS "float" \;
+SELECT 'world' AS "text" \;
+COMMIT;
+-- compound with empty statements and spurious leading spacing
+\;\; SELECT 3 + 3 \;\;\; SELECT ' ' || ' !' \;\; SELECT 1 + 4 \;;
+ ?column?
+----------
+ 5
+(1 row)
+
+-- non ;-terminated statements
+SELECT 1 + 1 + 1 AS "add" \gset
+SELECT :add + 1 + 1 AS "add" \;
+SELECT :add + 1 + 1 AS "add" \gset
+-- set operator
+SELECT 1 AS i UNION SELECT 2 ORDER BY i;
+ i
+---
+ 1
+ 2
+(2 rows)
+
+-- ? operator
+select '{"a":1, "b":2}'::jsonb ? 'b';
+ ?column?
+----------
+ t
+(1 row)
+
+-- cte
+WITH t(f) AS (
+ VALUES (1.0), (2.0)
+)
+ SELECT f FROM t ORDER BY f;
+ f
+-----
+ 1.0
+ 2.0
+(2 rows)
+
+-- prepared statement with parameter
+PREPARE pgss_test (int) AS SELECT $1, 'test' LIMIT 1;
+EXECUTE pgss_test(1);
+ ?column? | ?column?
+----------+----------
+ 1 | test
+(1 row)
+
+DEALLOCATE pgss_test;
+SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C";
+ query | calls | rows
+------------------------------------------------------------------------------+-------+------
+ PREPARE pgss_test (int) AS SELECT $1, $2 LIMIT $3 | 1 | 1
+ SELECT $1 +| 4 | 4
+ +| |
+ AS "text" | |
+ SELECT $1 + $2 | 2 | 2
+ SELECT $1 + $2 + $3 AS "add" | 3 | 3
+ SELECT $1 AS "float" | 1 | 1
+ SELECT $1 AS "int" | 2 | 2
+ SELECT $1 AS i UNION SELECT $2 ORDER BY i | 1 | 2
+ SELECT $1 || $2 | 1 | 1
+ SELECT pg_stat_statements_reset() | 1 | 1
+ SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C" | 0 | 0
+ WITH t(f) AS ( +| 1 | 2
+ VALUES ($1), ($2) +| |
+ ) +| |
+ SELECT f FROM t ORDER BY f | |
+ select $1::jsonb ? $2 | 1 | 1
+(12 rows)
+
+--
+-- CRUD: INSERT SELECT UPDATE DELETE on test table
+--
+SELECT pg_stat_statements_reset();
+ pg_stat_statements_reset
+--------------------------
+
+(1 row)
+
+-- utility "create table" should not be shown
+CREATE TEMP TABLE test (a int, b char(20));
+INSERT INTO test VALUES(generate_series(1, 10), 'aaa');
+UPDATE test SET b = 'bbb' WHERE a > 7;
+DELETE FROM test WHERE a > 9;
+-- explicit transaction
+BEGIN;
+UPDATE test SET b = '111' WHERE a = 1 ;
+COMMIT;
+BEGIN \;
+UPDATE test SET b = '222' WHERE a = 2 \;
+COMMIT ;
+UPDATE test SET b = '333' WHERE a = 3 \;
+UPDATE test SET b = '444' WHERE a = 4 ;
+BEGIN \;
+UPDATE test SET b = '555' WHERE a = 5 \;
+UPDATE test SET b = '666' WHERE a = 6 \;
+COMMIT ;
+-- many INSERT values
+INSERT INTO test (a, b) VALUES (1, 'a'), (2, 'b'), (3, 'c');
+-- SELECT with constants
+SELECT * FROM test WHERE a > 5 ORDER BY a ;
+ a | b
+---+----------------------
+ 6 | 666
+ 7 | aaa
+ 8 | bbb
+ 9 | bbb
+(4 rows)
+
+SELECT *
+ FROM test
+ WHERE a > 9
+ ORDER BY a ;
+ a | b
+---+---
+(0 rows)
+
+-- SELECT without constants
+SELECT * FROM test ORDER BY a;
+ a | b
+---+----------------------
+ 1 | a
+ 1 | 111
+ 2 | b
+ 2 | 222
+ 3 | c
+ 3 | 333
+ 4 | 444
+ 5 | 555
+ 6 | 666
+ 7 | aaa
+ 8 | bbb
+ 9 | bbb
+(12 rows)
+
+-- SELECT with IN clause
+SELECT * FROM test WHERE a IN (1, 2, 3, 4, 5);
+ a | b
+---+----------------------
+ 1 | 111
+ 2 | 222
+ 3 | 333
+ 4 | 444
+ 5 | 555
+ 1 | a
+ 2 | b
+ 3 | c
+(8 rows)
+
+SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C";
+ query | calls | rows
+------------------------------------------------------------------------------+-------+------
+ DELETE FROM test WHERE a > $1 | 1 | 1
+ INSERT INTO test (a, b) VALUES ($1, $2), ($3, $4), ($5, $6) | 1 | 3
+ INSERT INTO test VALUES(generate_series($1, $2), $3) | 1 | 10
+ SELECT * FROM test ORDER BY a | 1 | 12
+ SELECT * FROM test WHERE a > $1 ORDER BY a | 2 | 4
+ SELECT * FROM test WHERE a IN ($1, $2, $3, $4, $5) | 1 | 8
+ SELECT pg_stat_statements_reset() | 1 | 1
+ SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C" | 0 | 0
+ UPDATE test SET b = $1 WHERE a = $2 | 6 | 6
+ UPDATE test SET b = $1 WHERE a > $2 | 1 | 3
+(10 rows)
+
+--
+-- INSERT, UPDATE, DELETE on test table to validate WAL generation metrics
+--
+SELECT pg_stat_statements_reset();
+ pg_stat_statements_reset
+--------------------------
+
+(1 row)
+
+-- utility "create table" should not be shown
+CREATE TABLE pgss_test (a int, b char(20));
+INSERT INTO pgss_test VALUES(generate_series(1, 10), 'aaa');
+UPDATE pgss_test SET b = 'bbb' WHERE a > 7;
+DELETE FROM pgss_test WHERE a > 9;
+-- DROP test table
+SET pg_stat_statements.track_utility = TRUE;
+DROP TABLE pgss_test;
+SET pg_stat_statements.track_utility = FALSE;
+-- Check WAL is generated for the above statements
+SELECT query, calls, rows,
+wal_bytes > 0 as wal_bytes_generated,
+wal_records > 0 as wal_records_generated,
+wal_records = rows as wal_records_as_rows
+FROM pg_stat_statements ORDER BY query COLLATE "C";
+ query | calls | rows | wal_bytes_generated | wal_records_generated | wal_records_as_rows
+-----------------------------------------------------------+-------+------+---------------------+-----------------------+---------------------
+ DELETE FROM pgss_test WHERE a > $1 | 1 | 1 | t | t | t
+ DROP TABLE pgss_test | 1 | 0 | t | t | f
+ INSERT INTO pgss_test VALUES(generate_series($1, $2), $3) | 1 | 10 | t | t | t
+ SELECT pg_stat_statements_reset() | 1 | 1 | f | f | f
+ SELECT query, calls, rows, +| 0 | 0 | f | f | t
+ wal_bytes > $1 as wal_bytes_generated, +| | | | |
+ wal_records > $2 as wal_records_generated, +| | | | |
+ wal_records = rows as wal_records_as_rows +| | | | |
+ FROM pg_stat_statements ORDER BY query COLLATE "C" | | | | |
+ SET pg_stat_statements.track_utility = FALSE | 1 | 0 | f | f | t
+ UPDATE pgss_test SET b = $1 WHERE a > $2 | 1 | 3 | t | t | t
+(7 rows)
+
+--
+-- pg_stat_statements.track = none
+--
+SET pg_stat_statements.track = 'none';
+SELECT pg_stat_statements_reset();
+ pg_stat_statements_reset
+--------------------------
+
+(1 row)
+
+SELECT 1 AS "one";
+ one
+-----
+ 1
+(1 row)
+
+SELECT 1 + 1 AS "two";
+ two
+-----
+ 2
+(1 row)
+
+SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C";
+ query | calls | rows
+-------+-------+------
+(0 rows)
+
+--
+-- pg_stat_statements.track = top
+--
+SET pg_stat_statements.track = 'top';
+SELECT pg_stat_statements_reset();
+ pg_stat_statements_reset
+--------------------------
+
+(1 row)
+
+DO LANGUAGE plpgsql $$
+BEGIN
+ -- this is a SELECT
+ PERFORM 'hello world'::TEXT;
+END;
+$$;
+-- PL/pgSQL function
+CREATE FUNCTION PLUS_TWO(i INTEGER) RETURNS INTEGER AS $$
+DECLARE
+ r INTEGER;
+BEGIN
+ SELECT (i + 1 + 1.0)::INTEGER INTO r;
+ RETURN r;
+END; $$ LANGUAGE plpgsql;
+SELECT PLUS_TWO(3);
+ plus_two
+----------
+ 5
+(1 row)
+
+SELECT PLUS_TWO(7);
+ plus_two
+----------
+ 9
+(1 row)
+
+-- SQL function --- use LIMIT to keep it from being inlined
+CREATE FUNCTION PLUS_ONE(i INTEGER) RETURNS INTEGER AS
+$$ SELECT (i + 1.0)::INTEGER LIMIT 1 $$ LANGUAGE SQL;
+SELECT PLUS_ONE(8);
+ plus_one
+----------
+ 9
+(1 row)
+
+SELECT PLUS_ONE(10);
+ plus_one
+----------
+ 11
+(1 row)
+
+SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C";
+ query | calls | rows
+------------------------------------------------------------------------------+-------+------
+ SELECT $1::TEXT | 1 | 1
+ SELECT PLUS_ONE($1) | 2 | 2
+ SELECT PLUS_TWO($1) | 2 | 2
+ SELECT pg_stat_statements_reset() | 1 | 1
+ SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C" | 0 | 0
+(5 rows)
+
+--
+-- pg_stat_statements.track = all
+--
+SET pg_stat_statements.track = 'all';
+SELECT pg_stat_statements_reset();
+ pg_stat_statements_reset
+--------------------------
+
+(1 row)
+
+-- we drop and recreate the functions to avoid any caching funnies
+DROP FUNCTION PLUS_ONE(INTEGER);
+DROP FUNCTION PLUS_TWO(INTEGER);
+-- PL/pgSQL function
+CREATE FUNCTION PLUS_TWO(i INTEGER) RETURNS INTEGER AS $$
+DECLARE
+ r INTEGER;
+BEGIN
+ SELECT (i + 1 + 1.0)::INTEGER INTO r;
+ RETURN r;
+END; $$ LANGUAGE plpgsql;
+SELECT PLUS_TWO(-1);
+ plus_two
+----------
+ 1
+(1 row)
+
+SELECT PLUS_TWO(2);
+ plus_two
+----------
+ 4
+(1 row)
+
+-- SQL function --- use LIMIT to keep it from being inlined
+CREATE FUNCTION PLUS_ONE(i INTEGER) RETURNS INTEGER AS
+$$ SELECT (i + 1.0)::INTEGER LIMIT 1 $$ LANGUAGE SQL;
+SELECT PLUS_ONE(3);
+ plus_one
+----------
+ 4
+(1 row)
+
+SELECT PLUS_ONE(1);
+ plus_one
+----------
+ 2
+(1 row)
+
+SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C";
+ query | calls | rows
+------------------------------------------------------------------------------+-------+------
+ SELECT (i + $2 + $3)::INTEGER | 2 | 2
+ SELECT (i + $2)::INTEGER LIMIT $3 | 2 | 2
+ SELECT PLUS_ONE($1) | 2 | 2
+ SELECT PLUS_TWO($1) | 2 | 2
+ SELECT pg_stat_statements_reset() | 1 | 1
+ SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C" | 0 | 0
+(6 rows)
+
+--
+-- queries with locking clauses
+--
+CREATE TABLE pgss_a (id integer PRIMARY KEY);
+CREATE TABLE pgss_b (id integer PRIMARY KEY, a_id integer REFERENCES pgss_a);
+SELECT pg_stat_statements_reset();
+ pg_stat_statements_reset
+--------------------------
+
+(1 row)
+
+-- control query
+SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id;
+ id | id | a_id
+----+----+------
+(0 rows)
+
+-- test range tables
+SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR UPDATE;
+ id | id | a_id
+----+----+------
+(0 rows)
+
+SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR UPDATE OF pgss_a;
+ id | id | a_id
+----+----+------
+(0 rows)
+
+SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR UPDATE OF pgss_b;
+ id | id | a_id
+----+----+------
+(0 rows)
+
+SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR UPDATE OF pgss_a, pgss_b; -- matches plain "FOR UPDATE"
+ id | id | a_id
+----+----+------
+(0 rows)
+
+SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR UPDATE OF pgss_b, pgss_a;
+ id | id | a_id
+----+----+------
+(0 rows)
+
+-- test strengths
+SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR NO KEY UPDATE;
+ id | id | a_id
+----+----+------
+(0 rows)
+
+SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR SHARE;
+ id | id | a_id
+----+----+------
+(0 rows)
+
+SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR KEY SHARE;
+ id | id | a_id
+----+----+------
+(0 rows)
+
+-- test wait policies
+SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR UPDATE NOWAIT;
+ id | id | a_id
+----+----+------
+(0 rows)
+
+SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR UPDATE SKIP LOCKED;
+ id | id | a_id
+----+----+------
+(0 rows)
+
+SELECT calls, query FROM pg_stat_statements ORDER BY query COLLATE "C";
+ calls | query
+-------+------------------------------------------------------------------------------------------
+ 1 | SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id
+ 1 | SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR KEY SHARE
+ 1 | SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR NO KEY UPDATE
+ 1 | SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR SHARE
+ 2 | SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR UPDATE
+ 1 | SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR UPDATE NOWAIT
+ 1 | SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR UPDATE OF pgss_a
+ 1 | SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR UPDATE OF pgss_b
+ 1 | SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR UPDATE OF pgss_b, pgss_a
+ 1 | SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR UPDATE SKIP LOCKED
+ 0 | SELECT calls, query FROM pg_stat_statements ORDER BY query COLLATE "C"
+ 1 | SELECT pg_stat_statements_reset()
+(12 rows)
+
+DROP TABLE pgss_a, pgss_b CASCADE;
+--
+-- utility commands
+--
+SET pg_stat_statements.track_utility = TRUE;
+SELECT pg_stat_statements_reset();
+ pg_stat_statements_reset
+--------------------------
+
+(1 row)
+
+SELECT 1;
+ ?column?
+----------
+ 1
+(1 row)
+
+CREATE INDEX test_b ON test(b);
+DROP TABLE test \;
+DROP TABLE IF EXISTS test \;
+DROP FUNCTION PLUS_ONE(INTEGER);
+NOTICE: table "test" does not exist, skipping
+DROP TABLE IF EXISTS test \;
+DROP TABLE IF EXISTS test \;
+DROP FUNCTION IF EXISTS PLUS_ONE(INTEGER);
+NOTICE: table "test" does not exist, skipping
+NOTICE: table "test" does not exist, skipping
+NOTICE: function plus_one(pg_catalog.int4) does not exist, skipping
+DROP FUNCTION PLUS_TWO(INTEGER);
+SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C";
+ query | calls | rows
+------------------------------------------------------------------------------+-------+------
+ CREATE INDEX test_b ON test(b) | 1 | 0
+ DROP FUNCTION IF EXISTS PLUS_ONE(INTEGER) | 1 | 0
+ DROP FUNCTION PLUS_ONE(INTEGER) | 1 | 0
+ DROP FUNCTION PLUS_TWO(INTEGER) | 1 | 0
+ DROP TABLE IF EXISTS test | 3 | 0
+ DROP TABLE test | 1 | 0
+ SELECT $1 | 1 | 1
+ SELECT pg_stat_statements_reset() | 1 | 1
+ SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C" | 0 | 0
+(9 rows)
+
+--
+-- Track user activity and reset them
+--
+SELECT pg_stat_statements_reset();
+ pg_stat_statements_reset
+--------------------------
+
+(1 row)
+
+CREATE ROLE regress_stats_user1;
+CREATE ROLE regress_stats_user2;
+SET ROLE regress_stats_user1;
+SELECT 1 AS "ONE";
+ ONE
+-----
+ 1
+(1 row)
+
+SELECT 1+1 AS "TWO";
+ TWO
+-----
+ 2
+(1 row)
+
+RESET ROLE;
+SET ROLE regress_stats_user2;
+SELECT 1 AS "ONE";
+ ONE
+-----
+ 1
+(1 row)
+
+SELECT 1+1 AS "TWO";
+ TWO
+-----
+ 2
+(1 row)
+
+RESET ROLE;
+SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C";
+ query | calls | rows
+------------------------------------------------------------------------------+-------+------
+ CREATE ROLE regress_stats_user1 | 1 | 0
+ CREATE ROLE regress_stats_user2 | 1 | 0
+ RESET ROLE | 2 | 0
+ SELECT $1 AS "ONE" | 1 | 1
+ SELECT $1 AS "ONE" | 1 | 1
+ SELECT $1+$2 AS "TWO" | 1 | 1
+ SELECT $1+$2 AS "TWO" | 1 | 1
+ SELECT pg_stat_statements_reset() | 1 | 1
+ SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C" | 0 | 0
+ SET ROLE regress_stats_user1 | 1 | 0
+ SET ROLE regress_stats_user2 | 1 | 0
+(11 rows)
+
+--
+-- Don't reset anything if any of the parameter is NULL
+--
+SELECT pg_stat_statements_reset(NULL);
+ pg_stat_statements_reset
+--------------------------
+
+(1 row)
+
+SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C";
+ query | calls | rows
+------------------------------------------------------------------------------+-------+------
+ CREATE ROLE regress_stats_user1 | 1 | 0
+ CREATE ROLE regress_stats_user2 | 1 | 0
+ RESET ROLE | 2 | 0
+ SELECT $1 AS "ONE" | 1 | 1
+ SELECT $1 AS "ONE" | 1 | 1
+ SELECT $1+$2 AS "TWO" | 1 | 1
+ SELECT $1+$2 AS "TWO" | 1 | 1
+ SELECT pg_stat_statements_reset($1) | 1 | 1
+ SELECT pg_stat_statements_reset() | 1 | 1
+ SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C" | 1 | 11
+ SET ROLE regress_stats_user1 | 1 | 0
+ SET ROLE regress_stats_user2 | 1 | 0
+(12 rows)
+
+--
+-- remove query ('SELECT $1+$2 AS "TWO"') executed by regress_stats_user2
+-- in the current_database
+--
+SELECT pg_stat_statements_reset(
+ (SELECT r.oid FROM pg_roles AS r WHERE r.rolname = 'regress_stats_user2'),
+ (SELECT d.oid FROM pg_database As d where datname = current_database()),
+ (SELECT s.queryid FROM pg_stat_statements AS s
+ WHERE s.query = 'SELECT $1+$2 AS "TWO"' LIMIT 1));
+ pg_stat_statements_reset
+--------------------------
+
+(1 row)
+
+SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C";
+ query | calls | rows
+----------------------------------------------------------------------------------+-------+------
+ CREATE ROLE regress_stats_user1 | 1 | 0
+ CREATE ROLE regress_stats_user2 | 1 | 0
+ RESET ROLE | 2 | 0
+ SELECT $1 AS "ONE" | 1 | 1
+ SELECT $1 AS "ONE" | 1 | 1
+ SELECT $1+$2 AS "TWO" | 1 | 1
+ SELECT pg_stat_statements_reset( +| 1 | 1
+ (SELECT r.oid FROM pg_roles AS r WHERE r.rolname = $1), +| |
+ (SELECT d.oid FROM pg_database As d where datname = current_database()),+| |
+ (SELECT s.queryid FROM pg_stat_statements AS s +| |
+ WHERE s.query = $2 LIMIT $3)) | |
+ SELECT pg_stat_statements_reset($1) | 1 | 1
+ SELECT pg_stat_statements_reset() | 1 | 1
+ SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C" | 2 | 23
+ SET ROLE regress_stats_user1 | 1 | 0
+ SET ROLE regress_stats_user2 | 1 | 0
+(12 rows)
+
+--
+-- remove query ('SELECT $1 AS "ONE"') executed by two users
+--
+SELECT pg_stat_statements_reset(0,0,s.queryid)
+ FROM pg_stat_statements AS s WHERE s.query = 'SELECT $1 AS "ONE"';
+ pg_stat_statements_reset
+--------------------------
+
+
+(2 rows)
+
+SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C";
+ query | calls | rows
+----------------------------------------------------------------------------------+-------+------
+ CREATE ROLE regress_stats_user1 | 1 | 0
+ CREATE ROLE regress_stats_user2 | 1 | 0
+ RESET ROLE | 2 | 0
+ SELECT $1+$2 AS "TWO" | 1 | 1
+ SELECT pg_stat_statements_reset( +| 1 | 1
+ (SELECT r.oid FROM pg_roles AS r WHERE r.rolname = $1), +| |
+ (SELECT d.oid FROM pg_database As d where datname = current_database()),+| |
+ (SELECT s.queryid FROM pg_stat_statements AS s +| |
+ WHERE s.query = $2 LIMIT $3)) | |
+ SELECT pg_stat_statements_reset($1) | 1 | 1
+ SELECT pg_stat_statements_reset($1,$2,s.queryid) +| 1 | 2
+ FROM pg_stat_statements AS s WHERE s.query = $3 | |
+ SELECT pg_stat_statements_reset() | 1 | 1
+ SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C" | 3 | 35
+ SET ROLE regress_stats_user1 | 1 | 0
+ SET ROLE regress_stats_user2 | 1 | 0
+(11 rows)
+
+--
+-- remove query of a user (regress_stats_user1)
+--
+SELECT pg_stat_statements_reset(r.oid)
+ FROM pg_roles AS r WHERE r.rolname = 'regress_stats_user1';
+ pg_stat_statements_reset
+--------------------------
+
+(1 row)
+
+SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C";
+ query | calls | rows
+----------------------------------------------------------------------------------+-------+------
+ CREATE ROLE regress_stats_user1 | 1 | 0
+ CREATE ROLE regress_stats_user2 | 1 | 0
+ RESET ROLE | 2 | 0
+ SELECT pg_stat_statements_reset( +| 1 | 1
+ (SELECT r.oid FROM pg_roles AS r WHERE r.rolname = $1), +| |
+ (SELECT d.oid FROM pg_database As d where datname = current_database()),+| |
+ (SELECT s.queryid FROM pg_stat_statements AS s +| |
+ WHERE s.query = $2 LIMIT $3)) | |
+ SELECT pg_stat_statements_reset($1) | 1 | 1
+ SELECT pg_stat_statements_reset($1,$2,s.queryid) +| 1 | 2
+ FROM pg_stat_statements AS s WHERE s.query = $3 | |
+ SELECT pg_stat_statements_reset() | 1 | 1
+ SELECT pg_stat_statements_reset(r.oid) +| 1 | 1
+ FROM pg_roles AS r WHERE r.rolname = $1 | |
+ SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C" | 4 | 46
+ SET ROLE regress_stats_user2 | 1 | 0
+(10 rows)
+
+--
+-- reset all
+--
+SELECT pg_stat_statements_reset(0,0,0);
+ pg_stat_statements_reset
+--------------------------
+
+(1 row)
+
+SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C";
+ query | calls | rows
+------------------------------------------------------------------------------+-------+------
+ SELECT pg_stat_statements_reset(0,0,0) | 1 | 1
+ SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C" | 0 | 0
+(2 rows)
+
+--
+-- cleanup
+--
+DROP ROLE regress_stats_user1;
+DROP ROLE regress_stats_user2;
+--
+-- [re]plan counting
+--
+SELECT pg_stat_statements_reset();
+ pg_stat_statements_reset
+--------------------------
+
+(1 row)
+
+CREATE TABLE test ();
+PREPARE prep1 AS SELECT COUNT(*) FROM test;
+EXECUTE prep1;
+ count
+-------
+ 0
+(1 row)
+
+EXECUTE prep1;
+ count
+-------
+ 0
+(1 row)
+
+EXECUTE prep1;
+ count
+-------
+ 0
+(1 row)
+
+ALTER TABLE test ADD COLUMN x int;
+EXECUTE prep1;
+ count
+-------
+ 0
+(1 row)
+
+SELECT 42;
+ ?column?
+----------
+ 42
+(1 row)
+
+SELECT 42;
+ ?column?
+----------
+ 42
+(1 row)
+
+SELECT 42;
+ ?column?
+----------
+ 42
+(1 row)
+
+SELECT query, plans, calls, rows FROM pg_stat_statements
+ WHERE query NOT LIKE 'PREPARE%' ORDER BY query COLLATE "C";
+ query | plans | calls | rows
+----------------------------------------------------------+-------+-------+------
+ ALTER TABLE test ADD COLUMN x int | 0 | 1 | 0
+ CREATE TABLE test () | 0 | 1 | 0
+ SELECT $1 | 3 | 3 | 3
+ SELECT pg_stat_statements_reset() | 0 | 1 | 1
+ SELECT query, plans, calls, rows FROM pg_stat_statements+| 1 | 0 | 0
+ WHERE query NOT LIKE $1 ORDER BY query COLLATE "C" | | |
+(5 rows)
+
+-- for the prepared statement we expect at least one replan, but cache
+-- invalidations could force more
+SELECT query, plans >= 2 AND plans <= calls AS plans_ok, calls, rows FROM pg_stat_statements
+ WHERE query LIKE 'PREPARE%' ORDER BY query COLLATE "C";
+ query | plans_ok | calls | rows
+--------------------------------------------+----------+-------+------
+ PREPARE prep1 AS SELECT COUNT(*) FROM test | t | 4 | 4
+(1 row)
+
+DROP EXTENSION pg_stat_statements;
diff --git a/contrib/pg_stat_statements/pg_stat_statements--1.0--1.1.sql b/contrib/pg_stat_statements/pg_stat_statements--1.0--1.1.sql
new file mode 100644
index 0000000..5be281e
--- /dev/null
+++ b/contrib/pg_stat_statements/pg_stat_statements--1.0--1.1.sql
@@ -0,0 +1,42 @@
+/* contrib/pg_stat_statements/pg_stat_statements--1.0--1.1.sql */
+
+-- complain if script is sourced in psql, rather than via ALTER EXTENSION
+\echo Use "ALTER EXTENSION pg_stat_statements UPDATE TO '1.1'" to load this file. \quit
+
+/* First we have to remove them from the extension */
+ALTER EXTENSION pg_stat_statements DROP VIEW pg_stat_statements;
+ALTER EXTENSION pg_stat_statements DROP FUNCTION pg_stat_statements();
+
+/* Then we can drop them */
+DROP VIEW pg_stat_statements;
+DROP FUNCTION pg_stat_statements();
+
+/* Now redefine */
+CREATE FUNCTION pg_stat_statements(
+ OUT userid oid,
+ OUT dbid oid,
+ OUT query text,
+ OUT calls int8,
+ OUT total_time float8,
+ OUT rows int8,
+ OUT shared_blks_hit int8,
+ OUT shared_blks_read int8,
+ OUT shared_blks_dirtied int8,
+ OUT shared_blks_written int8,
+ OUT local_blks_hit int8,
+ OUT local_blks_read int8,
+ OUT local_blks_dirtied int8,
+ OUT local_blks_written int8,
+ OUT temp_blks_read int8,
+ OUT temp_blks_written int8,
+ OUT blk_read_time float8,
+ OUT blk_write_time float8
+)
+RETURNS SETOF record
+AS 'MODULE_PATHNAME'
+LANGUAGE C;
+
+CREATE VIEW pg_stat_statements AS
+ SELECT * FROM pg_stat_statements();
+
+GRANT SELECT ON pg_stat_statements TO PUBLIC;
diff --git a/contrib/pg_stat_statements/pg_stat_statements--1.1--1.2.sql b/contrib/pg_stat_statements/pg_stat_statements--1.1--1.2.sql
new file mode 100644
index 0000000..74ae438
--- /dev/null
+++ b/contrib/pg_stat_statements/pg_stat_statements--1.1--1.2.sql
@@ -0,0 +1,43 @@
+/* contrib/pg_stat_statements/pg_stat_statements--1.1--1.2.sql */
+
+-- complain if script is sourced in psql, rather than via ALTER EXTENSION
+\echo Use "ALTER EXTENSION pg_stat_statements UPDATE TO '1.2'" to load this file. \quit
+
+/* First we have to remove them from the extension */
+ALTER EXTENSION pg_stat_statements DROP VIEW pg_stat_statements;
+ALTER EXTENSION pg_stat_statements DROP FUNCTION pg_stat_statements();
+
+/* Then we can drop them */
+DROP VIEW pg_stat_statements;
+DROP FUNCTION pg_stat_statements();
+
+/* Now redefine */
+CREATE FUNCTION pg_stat_statements(IN showtext boolean,
+ OUT userid oid,
+ OUT dbid oid,
+ OUT queryid bigint,
+ OUT query text,
+ OUT calls int8,
+ OUT total_time float8,
+ OUT rows int8,
+ OUT shared_blks_hit int8,
+ OUT shared_blks_read int8,
+ OUT shared_blks_dirtied int8,
+ OUT shared_blks_written int8,
+ OUT local_blks_hit int8,
+ OUT local_blks_read int8,
+ OUT local_blks_dirtied int8,
+ OUT local_blks_written int8,
+ OUT temp_blks_read int8,
+ OUT temp_blks_written int8,
+ OUT blk_read_time float8,
+ OUT blk_write_time float8
+)
+RETURNS SETOF record
+AS 'MODULE_PATHNAME', 'pg_stat_statements_1_2'
+LANGUAGE C STRICT VOLATILE;
+
+CREATE VIEW pg_stat_statements AS
+ SELECT * FROM pg_stat_statements(true);
+
+GRANT SELECT ON pg_stat_statements TO PUBLIC;
diff --git a/contrib/pg_stat_statements/pg_stat_statements--1.2--1.3.sql b/contrib/pg_stat_statements/pg_stat_statements--1.2--1.3.sql
new file mode 100644
index 0000000..a56f151
--- /dev/null
+++ b/contrib/pg_stat_statements/pg_stat_statements--1.2--1.3.sql
@@ -0,0 +1,47 @@
+/* contrib/pg_stat_statements/pg_stat_statements--1.2--1.3.sql */
+
+-- complain if script is sourced in psql, rather than via ALTER EXTENSION
+\echo Use "ALTER EXTENSION pg_stat_statements UPDATE TO '1.3'" to load this file. \quit
+
+/* First we have to remove them from the extension */
+ALTER EXTENSION pg_stat_statements DROP VIEW pg_stat_statements;
+ALTER EXTENSION pg_stat_statements DROP FUNCTION pg_stat_statements(boolean);
+
+/* Then we can drop them */
+DROP VIEW pg_stat_statements;
+DROP FUNCTION pg_stat_statements(boolean);
+
+/* Now redefine */
+CREATE FUNCTION pg_stat_statements(IN showtext boolean,
+ OUT userid oid,
+ OUT dbid oid,
+ OUT queryid bigint,
+ OUT query text,
+ OUT calls int8,
+ OUT total_time float8,
+ OUT min_time float8,
+ OUT max_time float8,
+ OUT mean_time float8,
+ OUT stddev_time float8,
+ OUT rows int8,
+ OUT shared_blks_hit int8,
+ OUT shared_blks_read int8,
+ OUT shared_blks_dirtied int8,
+ OUT shared_blks_written int8,
+ OUT local_blks_hit int8,
+ OUT local_blks_read int8,
+ OUT local_blks_dirtied int8,
+ OUT local_blks_written int8,
+ OUT temp_blks_read int8,
+ OUT temp_blks_written int8,
+ OUT blk_read_time float8,
+ OUT blk_write_time float8
+)
+RETURNS SETOF record
+AS 'MODULE_PATHNAME', 'pg_stat_statements_1_3'
+LANGUAGE C STRICT VOLATILE;
+
+CREATE VIEW pg_stat_statements AS
+ SELECT * FROM pg_stat_statements(true);
+
+GRANT SELECT ON pg_stat_statements TO PUBLIC;
diff --git a/contrib/pg_stat_statements/pg_stat_statements--1.3--1.4.sql b/contrib/pg_stat_statements/pg_stat_statements--1.3--1.4.sql
new file mode 100644
index 0000000..ae70c1f
--- /dev/null
+++ b/contrib/pg_stat_statements/pg_stat_statements--1.3--1.4.sql
@@ -0,0 +1,7 @@
+/* contrib/pg_stat_statements/pg_stat_statements--1.3--1.4.sql */
+
+-- complain if script is sourced in psql, rather than via ALTER EXTENSION
+\echo Use "ALTER EXTENSION pg_stat_statements UPDATE TO '1.4'" to load this file. \quit
+
+ALTER FUNCTION pg_stat_statements_reset() PARALLEL SAFE;
+ALTER FUNCTION pg_stat_statements(boolean) PARALLEL SAFE;
diff --git a/contrib/pg_stat_statements/pg_stat_statements--1.4--1.5.sql b/contrib/pg_stat_statements/pg_stat_statements--1.4--1.5.sql
new file mode 100644
index 0000000..9c76122
--- /dev/null
+++ b/contrib/pg_stat_statements/pg_stat_statements--1.4--1.5.sql
@@ -0,0 +1,6 @@
+/* contrib/pg_stat_statements/pg_stat_statements--1.4--1.5.sql */
+
+-- complain if script is sourced in psql, rather than via ALTER EXTENSION
+\echo Use "ALTER EXTENSION pg_stat_statements UPDATE TO '1.5'" to load this file. \quit
+
+GRANT EXECUTE ON FUNCTION pg_stat_statements_reset() TO pg_read_all_stats;
diff --git a/contrib/pg_stat_statements/pg_stat_statements--1.4.sql b/contrib/pg_stat_statements/pg_stat_statements--1.4.sql
new file mode 100644
index 0000000..58cdf60
--- /dev/null
+++ b/contrib/pg_stat_statements/pg_stat_statements--1.4.sql
@@ -0,0 +1,48 @@
+/* contrib/pg_stat_statements/pg_stat_statements--1.4.sql */
+
+-- complain if script is sourced in psql, rather than via CREATE EXTENSION
+\echo Use "CREATE EXTENSION pg_stat_statements" to load this file. \quit
+
+-- Register functions.
+CREATE FUNCTION pg_stat_statements_reset()
+RETURNS void
+AS 'MODULE_PATHNAME'
+LANGUAGE C PARALLEL SAFE;
+
+CREATE FUNCTION pg_stat_statements(IN showtext boolean,
+ OUT userid oid,
+ OUT dbid oid,
+ OUT queryid bigint,
+ OUT query text,
+ OUT calls int8,
+ OUT total_time float8,
+ OUT min_time float8,
+ OUT max_time float8,
+ OUT mean_time float8,
+ OUT stddev_time float8,
+ OUT rows int8,
+ OUT shared_blks_hit int8,
+ OUT shared_blks_read int8,
+ OUT shared_blks_dirtied int8,
+ OUT shared_blks_written int8,
+ OUT local_blks_hit int8,
+ OUT local_blks_read int8,
+ OUT local_blks_dirtied int8,
+ OUT local_blks_written int8,
+ OUT temp_blks_read int8,
+ OUT temp_blks_written int8,
+ OUT blk_read_time float8,
+ OUT blk_write_time float8
+)
+RETURNS SETOF record
+AS 'MODULE_PATHNAME', 'pg_stat_statements_1_3'
+LANGUAGE C STRICT VOLATILE PARALLEL SAFE;
+
+-- Register a view on the function for ease of use.
+CREATE VIEW pg_stat_statements AS
+ SELECT * FROM pg_stat_statements(true);
+
+GRANT SELECT ON pg_stat_statements TO PUBLIC;
+
+-- Don't want this to be available to non-superusers.
+REVOKE ALL ON FUNCTION pg_stat_statements_reset() FROM PUBLIC;
diff --git a/contrib/pg_stat_statements/pg_stat_statements--1.5--1.6.sql b/contrib/pg_stat_statements/pg_stat_statements--1.5--1.6.sql
new file mode 100644
index 0000000..4f8c7f7
--- /dev/null
+++ b/contrib/pg_stat_statements/pg_stat_statements--1.5--1.6.sql
@@ -0,0 +1,7 @@
+/* contrib/pg_stat_statements/pg_stat_statements--1.5--1.6.sql */
+
+-- complain if script is sourced in psql, rather than via ALTER EXTENSION
+\echo Use "ALTER EXTENSION pg_stat_statements UPDATE TO '1.6'" to load this file. \quit
+
+-- Execution is only allowed for superusers, fixing issue with 1.5.
+REVOKE EXECUTE ON FUNCTION pg_stat_statements_reset() FROM pg_read_all_stats;
diff --git a/contrib/pg_stat_statements/pg_stat_statements--1.6--1.7.sql b/contrib/pg_stat_statements/pg_stat_statements--1.6--1.7.sql
new file mode 100644
index 0000000..6fc3fed
--- /dev/null
+++ b/contrib/pg_stat_statements/pg_stat_statements--1.6--1.7.sql
@@ -0,0 +1,22 @@
+/* contrib/pg_stat_statements/pg_stat_statements--1.6--1.7.sql */
+
+-- complain if script is sourced in psql, rather than via ALTER EXTENSION
+\echo Use "ALTER EXTENSION pg_stat_statements UPDATE TO '1.7'" to load this file. \quit
+
+/* First we have to remove them from the extension */
+ALTER EXTENSION pg_stat_statements DROP FUNCTION pg_stat_statements_reset();
+
+/* Then we can drop them */
+DROP FUNCTION pg_stat_statements_reset();
+
+/* Now redefine */
+CREATE FUNCTION pg_stat_statements_reset(IN userid Oid DEFAULT 0,
+ IN dbid Oid DEFAULT 0,
+ IN queryid bigint DEFAULT 0
+)
+RETURNS void
+AS 'MODULE_PATHNAME', 'pg_stat_statements_reset_1_7'
+LANGUAGE C STRICT PARALLEL SAFE;
+
+-- Don't want this to be available to non-superusers.
+REVOKE ALL ON FUNCTION pg_stat_statements_reset(Oid, Oid, bigint) FROM PUBLIC;
diff --git a/contrib/pg_stat_statements/pg_stat_statements--1.7--1.8.sql b/contrib/pg_stat_statements/pg_stat_statements--1.7--1.8.sql
new file mode 100644
index 0000000..0f63f08
--- /dev/null
+++ b/contrib/pg_stat_statements/pg_stat_statements--1.7--1.8.sql
@@ -0,0 +1,56 @@
+/* contrib/pg_stat_statements/pg_stat_statements--1.7--1.8.sql */
+
+-- complain if script is sourced in psql, rather than via ALTER EXTENSION
+\echo Use "ALTER EXTENSION pg_stat_statements UPDATE TO '1.8'" to load this file. \quit
+
+/* First we have to remove them from the extension */
+ALTER EXTENSION pg_stat_statements DROP VIEW pg_stat_statements;
+ALTER EXTENSION pg_stat_statements DROP FUNCTION pg_stat_statements(boolean);
+
+/* Then we can drop them */
+DROP VIEW pg_stat_statements;
+DROP FUNCTION pg_stat_statements(boolean);
+
+/* Now redefine */
+CREATE FUNCTION pg_stat_statements(IN showtext boolean,
+ OUT userid oid,
+ OUT dbid oid,
+ OUT queryid bigint,
+ OUT query text,
+ OUT plans int8,
+ OUT total_plan_time float8,
+ OUT min_plan_time float8,
+ OUT max_plan_time float8,
+ OUT mean_plan_time float8,
+ OUT stddev_plan_time float8,
+ OUT calls int8,
+ OUT total_exec_time float8,
+ OUT min_exec_time float8,
+ OUT max_exec_time float8,
+ OUT mean_exec_time float8,
+ OUT stddev_exec_time float8,
+ OUT rows int8,
+ OUT shared_blks_hit int8,
+ OUT shared_blks_read int8,
+ OUT shared_blks_dirtied int8,
+ OUT shared_blks_written int8,
+ OUT local_blks_hit int8,
+ OUT local_blks_read int8,
+ OUT local_blks_dirtied int8,
+ OUT local_blks_written int8,
+ OUT temp_blks_read int8,
+ OUT temp_blks_written int8,
+ OUT blk_read_time float8,
+ OUT blk_write_time float8,
+ OUT wal_records int8,
+ OUT wal_fpi int8,
+ OUT wal_bytes numeric
+)
+RETURNS SETOF record
+AS 'MODULE_PATHNAME', 'pg_stat_statements_1_8'
+LANGUAGE C STRICT VOLATILE PARALLEL SAFE;
+
+CREATE VIEW pg_stat_statements AS
+ SELECT * FROM pg_stat_statements(true);
+
+GRANT SELECT ON pg_stat_statements TO PUBLIC;
diff --git a/contrib/pg_stat_statements/pg_stat_statements.c b/contrib/pg_stat_statements/pg_stat_statements.c
new file mode 100644
index 0000000..14cad19
--- /dev/null
+++ b/contrib/pg_stat_statements/pg_stat_statements.c
@@ -0,0 +1,3447 @@
+/*-------------------------------------------------------------------------
+ *
+ * pg_stat_statements.c
+ * Track statement planning and execution times as well as resource
+ * usage across a whole database cluster.
+ *
+ * Execution costs are totaled for each distinct source query, and kept in
+ * a shared hashtable. (We track only as many distinct queries as will fit
+ * in the designated amount of shared memory.)
+ *
+ * As of Postgres 9.2, this module normalizes query entries. Normalization
+ * is a process whereby similar queries, typically differing only in their
+ * constants (though the exact rules are somewhat more subtle than that) are
+ * recognized as equivalent, and are tracked as a single entry. This is
+ * particularly useful for non-prepared queries.
+ *
+ * Normalization is implemented by fingerprinting queries, selectively
+ * serializing those fields of each query tree's nodes that are judged to be
+ * essential to the query. This is referred to as a query jumble. This is
+ * distinct from a regular serialization in that various extraneous
+ * information is ignored as irrelevant or not essential to the query, such
+ * as the collations of Vars and, most notably, the values of constants.
+ *
+ * This jumble is acquired at the end of parse analysis of each query, and
+ * a 64-bit hash of it is stored into the query's Query.queryId field.
+ * The server then copies this value around, making it available in plan
+ * tree(s) generated from the query. The executor can then use this value
+ * to blame query costs on the proper queryId.
+ *
+ * To facilitate presenting entries to users, we create "representative" query
+ * strings in which constants are replaced with parameter symbols ($n), to
+ * make it clearer what a normalized entry can represent. To save on shared
+ * memory, and to avoid having to truncate oversized query strings, we store
+ * these strings in a temporary external query-texts file. Offsets into this
+ * file are kept in shared memory.
+ *
+ * Note about locking issues: to create or delete an entry in the shared
+ * hashtable, one must hold pgss->lock exclusively. Modifying any field
+ * in an entry except the counters requires the same. To look up an entry,
+ * one must hold the lock shared. To read or update the counters within
+ * an entry, one must hold the lock shared or exclusive (so the entry doesn't
+ * disappear!) and also take the entry's mutex spinlock.
+ * The shared state variable pgss->extent (the next free spot in the external
+ * query-text file) should be accessed only while holding either the
+ * pgss->mutex spinlock, or exclusive lock on pgss->lock. We use the mutex to
+ * allow reserving file space while holding only shared lock on pgss->lock.
+ * Rewriting the entire external query-text file, eg for garbage collection,
+ * requires holding pgss->lock exclusively; this allows individual entries
+ * in the file to be read or written while holding only shared lock.
+ *
+ *
+ * Copyright (c) 2008-2020, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * contrib/pg_stat_statements/pg_stat_statements.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include <math.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include "catalog/pg_authid.h"
+#include "common/hashfn.h"
+#include "executor/instrument.h"
+#include "funcapi.h"
+#include "mb/pg_wchar.h"
+#include "miscadmin.h"
+#include "optimizer/planner.h"
+#include "parser/analyze.h"
+#include "parser/parsetree.h"
+#include "parser/scanner.h"
+#include "parser/scansup.h"
+#include "pgstat.h"
+#include "storage/fd.h"
+#include "storage/ipc.h"
+#include "storage/spin.h"
+#include "tcop/utility.h"
+#include "utils/acl.h"
+#include "utils/builtins.h"
+#include "utils/memutils.h"
+
+PG_MODULE_MAGIC;
+
+/* Location of permanent stats file (valid when database is shut down) */
+#define PGSS_DUMP_FILE PGSTAT_STAT_PERMANENT_DIRECTORY "/pg_stat_statements.stat"
+
+/*
+ * Location of external query text file. We don't keep it in the core
+ * system's stats_temp_directory. The core system can safely use that GUC
+ * setting, because the statistics collector temp file paths are set only once
+ * as part of changing the GUC, but pg_stat_statements has no way of avoiding
+ * race conditions. Besides, we only expect modest, infrequent I/O for query
+ * strings, so placing the file on a faster filesystem is not compelling.
+ */
+#define PGSS_TEXT_FILE PG_STAT_TMP_DIR "/pgss_query_texts.stat"
+
+/* Magic number identifying the stats file format */
+static const uint32 PGSS_FILE_HEADER = 0x20171004;
+
+/* PostgreSQL major version number, changes in which invalidate all entries */
+static const uint32 PGSS_PG_MAJOR_VERSION = PG_VERSION_NUM / 100;
+
+/* XXX: Should USAGE_EXEC reflect execution time and/or buffer usage? */
+#define USAGE_EXEC(duration) (1.0)
+#define USAGE_INIT (1.0) /* including initial planning */
+#define ASSUMED_MEDIAN_INIT (10.0) /* initial assumed median usage */
+#define ASSUMED_LENGTH_INIT 1024 /* initial assumed mean query length */
+#define USAGE_DECREASE_FACTOR (0.99) /* decreased every entry_dealloc */
+#define STICKY_DECREASE_FACTOR (0.50) /* factor for sticky entries */
+#define USAGE_DEALLOC_PERCENT 5 /* free this % of entries at once */
+#define IS_STICKY(c) ((c.calls[PGSS_PLAN] + c.calls[PGSS_EXEC]) == 0)
+
+#define JUMBLE_SIZE 1024 /* query serialization buffer size */
+
+/*
+ * Extension version number, for supporting older extension versions' objects
+ */
+typedef enum pgssVersion
+{
+ PGSS_V1_0 = 0,
+ PGSS_V1_1,
+ PGSS_V1_2,
+ PGSS_V1_3,
+ PGSS_V1_8
+} pgssVersion;
+
+typedef enum pgssStoreKind
+{
+ PGSS_INVALID = -1,
+
+ /*
+ * PGSS_PLAN and PGSS_EXEC must be respectively 0 and 1 as they're used to
+ * reference the underlying values in the arrays in the Counters struct,
+ * and this order is required in pg_stat_statements_internal().
+ */
+ PGSS_PLAN = 0,
+ PGSS_EXEC,
+
+ PGSS_NUMKIND /* Must be last value of this enum */
+} pgssStoreKind;
+
+/*
+ * Hashtable key that defines the identity of a hashtable entry. We separate
+ * queries by user and by database even if they are otherwise identical.
+ *
+ * Right now, this structure contains no padding. If you add any, make sure
+ * to teach pgss_store() to zero the padding bytes. Otherwise, things will
+ * break, because pgss_hash is created using HASH_BLOBS, and thus tag_hash
+ * is used to hash this.
+ */
+typedef struct pgssHashKey
+{
+ Oid userid; /* user OID */
+ Oid dbid; /* database OID */
+ uint64 queryid; /* query identifier */
+} pgssHashKey;
+
+/*
+ * The actual stats counters kept within pgssEntry.
+ */
+typedef struct Counters
+{
+ int64 calls[PGSS_NUMKIND]; /* # of times planned/executed */
+ double total_time[PGSS_NUMKIND]; /* total planning/execution time,
+ * in msec */
+ double min_time[PGSS_NUMKIND]; /* minimum planning/execution time in
+ * msec */
+ double max_time[PGSS_NUMKIND]; /* maximum planning/execution time in
+ * msec */
+ double mean_time[PGSS_NUMKIND]; /* mean planning/execution time in
+ * msec */
+ double sum_var_time[PGSS_NUMKIND]; /* sum of variances in
+ * planning/execution time in msec */
+ int64 rows; /* total # of retrieved or affected rows */
+ int64 shared_blks_hit; /* # of shared buffer hits */
+ int64 shared_blks_read; /* # of shared disk blocks read */
+ int64 shared_blks_dirtied; /* # of shared disk blocks dirtied */
+ int64 shared_blks_written; /* # of shared disk blocks written */
+ int64 local_blks_hit; /* # of local buffer hits */
+ int64 local_blks_read; /* # of local disk blocks read */
+ int64 local_blks_dirtied; /* # of local disk blocks dirtied */
+ int64 local_blks_written; /* # of local disk blocks written */
+ int64 temp_blks_read; /* # of temp blocks read */
+ int64 temp_blks_written; /* # of temp blocks written */
+ double blk_read_time; /* time spent reading, in msec */
+ double blk_write_time; /* time spent writing, in msec */
+ double usage; /* usage factor */
+ int64 wal_records; /* # of WAL records generated */
+ int64 wal_fpi; /* # of WAL full page images generated */
+ uint64 wal_bytes; /* total amount of WAL bytes generated */
+} Counters;
+
+/*
+ * Statistics per statement
+ *
+ * Note: in event of a failure in garbage collection of the query text file,
+ * we reset query_offset to zero and query_len to -1. This will be seen as
+ * an invalid state by qtext_fetch().
+ */
+typedef struct pgssEntry
+{
+ pgssHashKey key; /* hash key of entry - MUST BE FIRST */
+ Counters counters; /* the statistics for this query */
+ Size query_offset; /* query text offset in external file */
+ int query_len; /* # of valid bytes in query string, or -1 */
+ int encoding; /* query text encoding */
+ slock_t mutex; /* protects the counters only */
+} pgssEntry;
+
+/*
+ * Global shared state
+ */
+typedef struct pgssSharedState
+{
+ LWLock *lock; /* protects hashtable search/modification */
+ double cur_median_usage; /* current median usage in hashtable */
+ Size mean_query_len; /* current mean entry text length */
+ slock_t mutex; /* protects following fields only: */
+ Size extent; /* current extent of query file */
+ int n_writers; /* number of active writers to query file */
+ int gc_count; /* query file garbage collection cycle count */
+} pgssSharedState;
+
+/*
+ * Struct for tracking locations/lengths of constants during normalization
+ */
+typedef struct pgssLocationLen
+{
+ int location; /* start offset in query text */
+ int length; /* length in bytes, or -1 to ignore */
+} pgssLocationLen;
+
+/*
+ * Working state for computing a query jumble and producing a normalized
+ * query string
+ */
+typedef struct pgssJumbleState
+{
+ /* Jumble of current query tree */
+ unsigned char *jumble;
+
+ /* Number of bytes used in jumble[] */
+ Size jumble_len;
+
+ /* Array of locations of constants that should be removed */
+ pgssLocationLen *clocations;
+
+ /* Allocated length of clocations array */
+ int clocations_buf_size;
+
+ /* Current number of valid entries in clocations array */
+ int clocations_count;
+
+ /* highest Param id we've seen, in order to start normalization correctly */
+ int highest_extern_param_id;
+} pgssJumbleState;
+
+/*---- Local variables ----*/
+
+/* Current nesting depth of ExecutorRun+ProcessUtility calls */
+static int exec_nested_level = 0;
+
+/* Current nesting depth of planner calls */
+static int plan_nested_level = 0;
+
+/* Saved hook values in case of unload */
+static shmem_startup_hook_type prev_shmem_startup_hook = NULL;
+static post_parse_analyze_hook_type prev_post_parse_analyze_hook = NULL;
+static planner_hook_type prev_planner_hook = NULL;
+static ExecutorStart_hook_type prev_ExecutorStart = NULL;
+static ExecutorRun_hook_type prev_ExecutorRun = NULL;
+static ExecutorFinish_hook_type prev_ExecutorFinish = NULL;
+static ExecutorEnd_hook_type prev_ExecutorEnd = NULL;
+static ProcessUtility_hook_type prev_ProcessUtility = NULL;
+
+/* Links to shared memory state */
+static pgssSharedState *pgss = NULL;
+static HTAB *pgss_hash = NULL;
+
+/*---- GUC variables ----*/
+
+typedef enum
+{
+ PGSS_TRACK_NONE, /* track no statements */
+ PGSS_TRACK_TOP, /* only top level statements */
+ PGSS_TRACK_ALL /* all statements, including nested ones */
+} PGSSTrackLevel;
+
+static const struct config_enum_entry track_options[] =
+{
+ {"none", PGSS_TRACK_NONE, false},
+ {"top", PGSS_TRACK_TOP, false},
+ {"all", PGSS_TRACK_ALL, false},
+ {NULL, 0, false}
+};
+
+static int pgss_max; /* max # statements to track */
+static int pgss_track; /* tracking level */
+static bool pgss_track_utility; /* whether to track utility commands */
+static bool pgss_track_planning; /* whether to track planning duration */
+static bool pgss_save; /* whether to save stats across shutdown */
+
+
+#define pgss_enabled(level) \
+ (pgss_track == PGSS_TRACK_ALL || \
+ (pgss_track == PGSS_TRACK_TOP && (level) == 0))
+
+#define record_gc_qtexts() \
+ do { \
+ volatile pgssSharedState *s = (volatile pgssSharedState *) pgss; \
+ SpinLockAcquire(&s->mutex); \
+ s->gc_count++; \
+ SpinLockRelease(&s->mutex); \
+ } while(0)
+
+/*---- Function declarations ----*/
+
+void _PG_init(void);
+void _PG_fini(void);
+
+PG_FUNCTION_INFO_V1(pg_stat_statements_reset);
+PG_FUNCTION_INFO_V1(pg_stat_statements_reset_1_7);
+PG_FUNCTION_INFO_V1(pg_stat_statements_1_2);
+PG_FUNCTION_INFO_V1(pg_stat_statements_1_3);
+PG_FUNCTION_INFO_V1(pg_stat_statements_1_8);
+PG_FUNCTION_INFO_V1(pg_stat_statements);
+
+static void pgss_shmem_startup(void);
+static void pgss_shmem_shutdown(int code, Datum arg);
+static void pgss_post_parse_analyze(ParseState *pstate, Query *query);
+static PlannedStmt *pgss_planner(Query *parse,
+ const char *query_string,
+ int cursorOptions,
+ ParamListInfo boundParams);
+static void pgss_ExecutorStart(QueryDesc *queryDesc, int eflags);
+static void pgss_ExecutorRun(QueryDesc *queryDesc,
+ ScanDirection direction,
+ uint64 count, bool execute_once);
+static void pgss_ExecutorFinish(QueryDesc *queryDesc);
+static void pgss_ExecutorEnd(QueryDesc *queryDesc);
+static void pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString,
+ ProcessUtilityContext context, ParamListInfo params,
+ QueryEnvironment *queryEnv,
+ DestReceiver *dest, QueryCompletion *qc);
+static uint64 pgss_hash_string(const char *str, int len);
+static void pgss_store(const char *query, uint64 queryId,
+ int query_location, int query_len,
+ pgssStoreKind kind,
+ double total_time, uint64 rows,
+ const BufferUsage *bufusage,
+ const WalUsage *walusage,
+ pgssJumbleState *jstate);
+static void pg_stat_statements_internal(FunctionCallInfo fcinfo,
+ pgssVersion api_version,
+ bool showtext);
+static Size pgss_memsize(void);
+static pgssEntry *entry_alloc(pgssHashKey *key, Size query_offset, int query_len,
+ int encoding, bool sticky);
+static void entry_dealloc(void);
+static bool qtext_store(const char *query, int query_len,
+ Size *query_offset, int *gc_count);
+static char *qtext_load_file(Size *buffer_size);
+static char *qtext_fetch(Size query_offset, int query_len,
+ char *buffer, Size buffer_size);
+static bool need_gc_qtexts(void);
+static void gc_qtexts(void);
+static void entry_reset(Oid userid, Oid dbid, uint64 queryid);
+static void AppendJumble(pgssJumbleState *jstate,
+ const unsigned char *item, Size size);
+static void JumbleQuery(pgssJumbleState *jstate, Query *query);
+static void JumbleRangeTable(pgssJumbleState *jstate, List *rtable);
+static void JumbleRowMarks(pgssJumbleState *jstate, List *rowMarks);
+static void JumbleExpr(pgssJumbleState *jstate, Node *node);
+static void RecordConstLocation(pgssJumbleState *jstate, int location);
+static char *generate_normalized_query(pgssJumbleState *jstate, const char *query,
+ int query_loc, int *query_len_p, int encoding);
+static void fill_in_constant_lengths(pgssJumbleState *jstate, const char *query,
+ int query_loc);
+static int comp_location(const void *a, const void *b);
+
+
+/*
+ * Module load callback
+ */
+void
+_PG_init(void)
+{
+ /*
+ * In order to create our shared memory area, we have to be loaded via
+ * shared_preload_libraries. If not, fall out without hooking into any of
+ * the main system. (We don't throw error here because it seems useful to
+ * allow the pg_stat_statements functions to be created even when the
+ * module isn't active. The functions must protect themselves against
+ * being called then, however.)
+ */
+ if (!process_shared_preload_libraries_in_progress)
+ return;
+
+ /*
+ * Define (or redefine) custom GUC variables.
+ */
+ DefineCustomIntVariable("pg_stat_statements.max",
+ "Sets the maximum number of statements tracked by pg_stat_statements.",
+ NULL,
+ &pgss_max,
+ 5000,
+ 100,
+ INT_MAX,
+ PGC_POSTMASTER,
+ 0,
+ NULL,
+ NULL,
+ NULL);
+
+ DefineCustomEnumVariable("pg_stat_statements.track",
+ "Selects which statements are tracked by pg_stat_statements.",
+ NULL,
+ &pgss_track,
+ PGSS_TRACK_TOP,
+ track_options,
+ PGC_SUSET,
+ 0,
+ NULL,
+ NULL,
+ NULL);
+
+ DefineCustomBoolVariable("pg_stat_statements.track_utility",
+ "Selects whether utility commands are tracked by pg_stat_statements.",
+ NULL,
+ &pgss_track_utility,
+ true,
+ PGC_SUSET,
+ 0,
+ NULL,
+ NULL,
+ NULL);
+
+ DefineCustomBoolVariable("pg_stat_statements.track_planning",
+ "Selects whether planning duration is tracked by pg_stat_statements.",
+ NULL,
+ &pgss_track_planning,
+ false,
+ PGC_SUSET,
+ 0,
+ NULL,
+ NULL,
+ NULL);
+
+ DefineCustomBoolVariable("pg_stat_statements.save",
+ "Save pg_stat_statements statistics across server shutdowns.",
+ NULL,
+ &pgss_save,
+ true,
+ PGC_SIGHUP,
+ 0,
+ NULL,
+ NULL,
+ NULL);
+
+ EmitWarningsOnPlaceholders("pg_stat_statements");
+
+ /*
+ * Request additional shared resources. (These are no-ops if we're not in
+ * the postmaster process.) We'll allocate or attach to the shared
+ * resources in pgss_shmem_startup().
+ */
+ RequestAddinShmemSpace(pgss_memsize());
+ RequestNamedLWLockTranche("pg_stat_statements", 1);
+
+ /*
+ * Install hooks.
+ */
+ prev_shmem_startup_hook = shmem_startup_hook;
+ shmem_startup_hook = pgss_shmem_startup;
+ prev_post_parse_analyze_hook = post_parse_analyze_hook;
+ post_parse_analyze_hook = pgss_post_parse_analyze;
+ prev_planner_hook = planner_hook;
+ planner_hook = pgss_planner;
+ prev_ExecutorStart = ExecutorStart_hook;
+ ExecutorStart_hook = pgss_ExecutorStart;
+ prev_ExecutorRun = ExecutorRun_hook;
+ ExecutorRun_hook = pgss_ExecutorRun;
+ prev_ExecutorFinish = ExecutorFinish_hook;
+ ExecutorFinish_hook = pgss_ExecutorFinish;
+ prev_ExecutorEnd = ExecutorEnd_hook;
+ ExecutorEnd_hook = pgss_ExecutorEnd;
+ prev_ProcessUtility = ProcessUtility_hook;
+ ProcessUtility_hook = pgss_ProcessUtility;
+}
+
+/*
+ * Module unload callback
+ */
+void
+_PG_fini(void)
+{
+ /* Uninstall hooks. */
+ shmem_startup_hook = prev_shmem_startup_hook;
+ post_parse_analyze_hook = prev_post_parse_analyze_hook;
+ planner_hook = prev_planner_hook;
+ ExecutorStart_hook = prev_ExecutorStart;
+ ExecutorRun_hook = prev_ExecutorRun;
+ ExecutorFinish_hook = prev_ExecutorFinish;
+ ExecutorEnd_hook = prev_ExecutorEnd;
+ ProcessUtility_hook = prev_ProcessUtility;
+}
+
+/*
+ * shmem_startup hook: allocate or attach to shared memory,
+ * then load any pre-existing statistics from file.
+ * Also create and load the query-texts file, which is expected to exist
+ * (even if empty) while the module is enabled.
+ */
+static void
+pgss_shmem_startup(void)
+{
+ bool found;
+ HASHCTL info;
+ FILE *file = NULL;
+ FILE *qfile = NULL;
+ uint32 header;
+ int32 num;
+ int32 pgver;
+ int32 i;
+ int buffer_size;
+ char *buffer = NULL;
+
+ if (prev_shmem_startup_hook)
+ prev_shmem_startup_hook();
+
+ /* reset in case this is a restart within the postmaster */
+ pgss = NULL;
+ pgss_hash = NULL;
+
+ /*
+ * Create or attach to the shared memory state, including hash table
+ */
+ LWLockAcquire(AddinShmemInitLock, LW_EXCLUSIVE);
+
+ pgss = ShmemInitStruct("pg_stat_statements",
+ sizeof(pgssSharedState),
+ &found);
+
+ if (!found)
+ {
+ /* First time through ... */
+ pgss->lock = &(GetNamedLWLockTranche("pg_stat_statements"))->lock;
+ pgss->cur_median_usage = ASSUMED_MEDIAN_INIT;
+ pgss->mean_query_len = ASSUMED_LENGTH_INIT;
+ SpinLockInit(&pgss->mutex);
+ pgss->extent = 0;
+ pgss->n_writers = 0;
+ pgss->gc_count = 0;
+ }
+
+ memset(&info, 0, sizeof(info));
+ info.keysize = sizeof(pgssHashKey);
+ info.entrysize = sizeof(pgssEntry);
+ pgss_hash = ShmemInitHash("pg_stat_statements hash",
+ pgss_max, pgss_max,
+ &info,
+ HASH_ELEM | HASH_BLOBS);
+
+ LWLockRelease(AddinShmemInitLock);
+
+ /*
+ * If we're in the postmaster (or a standalone backend...), set up a shmem
+ * exit hook to dump the statistics to disk.
+ */
+ if (!IsUnderPostmaster)
+ on_shmem_exit(pgss_shmem_shutdown, (Datum) 0);
+
+ /*
+ * Done if some other process already completed our initialization.
+ */
+ if (found)
+ return;
+
+ /*
+ * Note: we don't bother with locks here, because there should be no other
+ * processes running when this code is reached.
+ */
+
+ /* Unlink query text file possibly left over from crash */
+ unlink(PGSS_TEXT_FILE);
+
+ /* Allocate new query text temp file */
+ qfile = AllocateFile(PGSS_TEXT_FILE, PG_BINARY_W);
+ if (qfile == NULL)
+ goto write_error;
+
+ /*
+ * If we were told not to load old statistics, we're done. (Note we do
+ * not try to unlink any old dump file in this case. This seems a bit
+ * questionable but it's the historical behavior.)
+ */
+ if (!pgss_save)
+ {
+ FreeFile(qfile);
+ return;
+ }
+
+ /*
+ * Attempt to load old statistics from the dump file.
+ */
+ file = AllocateFile(PGSS_DUMP_FILE, PG_BINARY_R);
+ if (file == NULL)
+ {
+ if (errno != ENOENT)
+ goto read_error;
+ /* No existing persisted stats file, so we're done */
+ FreeFile(qfile);
+ return;
+ }
+
+ buffer_size = 2048;
+ buffer = (char *) palloc(buffer_size);
+
+ if (fread(&header, sizeof(uint32), 1, file) != 1 ||
+ fread(&pgver, sizeof(uint32), 1, file) != 1 ||
+ fread(&num, sizeof(int32), 1, file) != 1)
+ goto read_error;
+
+ if (header != PGSS_FILE_HEADER ||
+ pgver != PGSS_PG_MAJOR_VERSION)
+ goto data_error;
+
+ for (i = 0; i < num; i++)
+ {
+ pgssEntry temp;
+ pgssEntry *entry;
+ Size query_offset;
+
+ if (fread(&temp, sizeof(pgssEntry), 1, file) != 1)
+ goto read_error;
+
+ /* Encoding is the only field we can easily sanity-check */
+ if (!PG_VALID_BE_ENCODING(temp.encoding))
+ goto data_error;
+
+ /* Resize buffer as needed */
+ if (temp.query_len >= buffer_size)
+ {
+ buffer_size = Max(buffer_size * 2, temp.query_len + 1);
+ buffer = repalloc(buffer, buffer_size);
+ }
+
+ if (fread(buffer, 1, temp.query_len + 1, file) != temp.query_len + 1)
+ goto read_error;
+
+ /* Should have a trailing null, but let's make sure */
+ buffer[temp.query_len] = '\0';
+
+ /* Skip loading "sticky" entries */
+ if (IS_STICKY(temp.counters))
+ continue;
+
+ /* Store the query text */
+ query_offset = pgss->extent;
+ if (fwrite(buffer, 1, temp.query_len + 1, qfile) != temp.query_len + 1)
+ goto write_error;
+ pgss->extent += temp.query_len + 1;
+
+ /* make the hashtable entry (discards old entries if too many) */
+ entry = entry_alloc(&temp.key, query_offset, temp.query_len,
+ temp.encoding,
+ false);
+
+ /* copy in the actual stats */
+ entry->counters = temp.counters;
+ }
+
+ pfree(buffer);
+ FreeFile(file);
+ FreeFile(qfile);
+
+ /*
+ * Remove the persisted stats file so it's not included in
+ * backups/replication standbys, etc. A new file will be written on next
+ * shutdown.
+ *
+ * Note: it's okay if the PGSS_TEXT_FILE is included in a basebackup,
+ * because we remove that file on startup; it acts inversely to
+ * PGSS_DUMP_FILE, in that it is only supposed to be around when the
+ * server is running, whereas PGSS_DUMP_FILE is only supposed to be around
+ * when the server is not running. Leaving the file creates no danger of
+ * a newly restored database having a spurious record of execution costs,
+ * which is what we're really concerned about here.
+ */
+ unlink(PGSS_DUMP_FILE);
+
+ return;
+
+read_error:
+ ereport(LOG,
+ (errcode_for_file_access(),
+ errmsg("could not read file \"%s\": %m",
+ PGSS_DUMP_FILE)));
+ goto fail;
+data_error:
+ ereport(LOG,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("ignoring invalid data in file \"%s\"",
+ PGSS_DUMP_FILE)));
+ goto fail;
+write_error:
+ ereport(LOG,
+ (errcode_for_file_access(),
+ errmsg("could not write file \"%s\": %m",
+ PGSS_TEXT_FILE)));
+fail:
+ if (buffer)
+ pfree(buffer);
+ if (file)
+ FreeFile(file);
+ if (qfile)
+ FreeFile(qfile);
+ /* If possible, throw away the bogus file; ignore any error */
+ unlink(PGSS_DUMP_FILE);
+
+ /*
+ * Don't unlink PGSS_TEXT_FILE here; it should always be around while the
+ * server is running with pg_stat_statements enabled
+ */
+}
+
+/*
+ * shmem_shutdown hook: Dump statistics into file.
+ *
+ * Note: we don't bother with acquiring lock, because there should be no
+ * other processes running when this is called.
+ */
+static void
+pgss_shmem_shutdown(int code, Datum arg)
+{
+ FILE *file;
+ char *qbuffer = NULL;
+ Size qbuffer_size = 0;
+ HASH_SEQ_STATUS hash_seq;
+ int32 num_entries;
+ pgssEntry *entry;
+
+ /* Don't try to dump during a crash. */
+ if (code)
+ return;
+
+ /* Safety check ... shouldn't get here unless shmem is set up. */
+ if (!pgss || !pgss_hash)
+ return;
+
+ /* Don't dump if told not to. */
+ if (!pgss_save)
+ return;
+
+ file = AllocateFile(PGSS_DUMP_FILE ".tmp", PG_BINARY_W);
+ if (file == NULL)
+ goto error;
+
+ if (fwrite(&PGSS_FILE_HEADER, sizeof(uint32), 1, file) != 1)
+ goto error;
+ if (fwrite(&PGSS_PG_MAJOR_VERSION, sizeof(uint32), 1, file) != 1)
+ goto error;
+ num_entries = hash_get_num_entries(pgss_hash);
+ if (fwrite(&num_entries, sizeof(int32), 1, file) != 1)
+ goto error;
+
+ qbuffer = qtext_load_file(&qbuffer_size);
+ if (qbuffer == NULL)
+ goto error;
+
+ /*
+ * When serializing to disk, we store query texts immediately after their
+ * entry data. Any orphaned query texts are thereby excluded.
+ */
+ hash_seq_init(&hash_seq, pgss_hash);
+ while ((entry = hash_seq_search(&hash_seq)) != NULL)
+ {
+ int len = entry->query_len;
+ char *qstr = qtext_fetch(entry->query_offset, len,
+ qbuffer, qbuffer_size);
+
+ if (qstr == NULL)
+ continue; /* Ignore any entries with bogus texts */
+
+ if (fwrite(entry, sizeof(pgssEntry), 1, file) != 1 ||
+ fwrite(qstr, 1, len + 1, file) != len + 1)
+ {
+ /* note: we assume hash_seq_term won't change errno */
+ hash_seq_term(&hash_seq);
+ goto error;
+ }
+ }
+
+ free(qbuffer);
+ qbuffer = NULL;
+
+ if (FreeFile(file))
+ {
+ file = NULL;
+ goto error;
+ }
+
+ /*
+ * Rename file into place, so we atomically replace any old one.
+ */
+ (void) durable_rename(PGSS_DUMP_FILE ".tmp", PGSS_DUMP_FILE, LOG);
+
+ /* Unlink query-texts file; it's not needed while shutdown */
+ unlink(PGSS_TEXT_FILE);
+
+ return;
+
+error:
+ ereport(LOG,
+ (errcode_for_file_access(),
+ errmsg("could not write file \"%s\": %m",
+ PGSS_DUMP_FILE ".tmp")));
+ if (qbuffer)
+ free(qbuffer);
+ if (file)
+ FreeFile(file);
+ unlink(PGSS_DUMP_FILE ".tmp");
+ unlink(PGSS_TEXT_FILE);
+}
+
+/*
+ * Post-parse-analysis hook: mark query with a queryId
+ */
+static void
+pgss_post_parse_analyze(ParseState *pstate, Query *query)
+{
+ pgssJumbleState jstate;
+
+ if (prev_post_parse_analyze_hook)
+ prev_post_parse_analyze_hook(pstate, query);
+
+ /* Assert we didn't do this already */
+ Assert(query->queryId == UINT64CONST(0));
+
+ /* Safety check... */
+ if (!pgss || !pgss_hash || !pgss_enabled(exec_nested_level))
+ return;
+
+ /*
+ * Utility statements get queryId zero. We do this even in cases where
+ * the statement contains an optimizable statement for which a queryId
+ * could be derived (such as EXPLAIN or DECLARE CURSOR). For such cases,
+ * runtime control will first go through ProcessUtility and then the
+ * executor, and we don't want the executor hooks to do anything, since we
+ * are already measuring the statement's costs at the utility level.
+ */
+ if (query->utilityStmt)
+ {
+ query->queryId = UINT64CONST(0);
+ return;
+ }
+
+ /* Set up workspace for query jumbling */
+ jstate.jumble = (unsigned char *) palloc(JUMBLE_SIZE);
+ jstate.jumble_len = 0;
+ jstate.clocations_buf_size = 32;
+ jstate.clocations = (pgssLocationLen *)
+ palloc(jstate.clocations_buf_size * sizeof(pgssLocationLen));
+ jstate.clocations_count = 0;
+ jstate.highest_extern_param_id = 0;
+
+ /* Compute query ID and mark the Query node with it */
+ JumbleQuery(&jstate, query);
+ query->queryId =
+ DatumGetUInt64(hash_any_extended(jstate.jumble, jstate.jumble_len, 0));
+
+ /*
+ * If we are unlucky enough to get a hash of zero, use 1 instead, to
+ * prevent confusion with the utility-statement case.
+ */
+ if (query->queryId == UINT64CONST(0))
+ query->queryId = UINT64CONST(1);
+
+ /*
+ * If we were able to identify any ignorable constants, we immediately
+ * create a hash table entry for the query, so that we can record the
+ * normalized form of the query string. If there were no such constants,
+ * the normalized string would be the same as the query text anyway, so
+ * there's no need for an early entry.
+ */
+ if (jstate.clocations_count > 0)
+ pgss_store(pstate->p_sourcetext,
+ query->queryId,
+ query->stmt_location,
+ query->stmt_len,
+ PGSS_INVALID,
+ 0,
+ 0,
+ NULL,
+ NULL,
+ &jstate);
+}
+
+/*
+ * Planner hook: forward to regular planner, but measure planning time
+ * if needed.
+ */
+static PlannedStmt *
+pgss_planner(Query *parse,
+ const char *query_string,
+ int cursorOptions,
+ ParamListInfo boundParams)
+{
+ PlannedStmt *result;
+
+ /*
+ * We can't process the query if no query_string is provided, as
+ * pgss_store needs it. We also ignore query without queryid, as it would
+ * be treated as a utility statement, which may not be the case.
+ *
+ * Note that planner_hook can be called from the planner itself, so we
+ * have a specific nesting level for the planner. However, utility
+ * commands containing optimizable statements can also call the planner,
+ * same for regular DML (for instance for underlying foreign key queries).
+ * So testing the planner nesting level only is not enough to detect real
+ * top level planner call.
+ */
+ if (pgss_enabled(plan_nested_level + exec_nested_level)
+ && pgss_track_planning && query_string
+ && parse->queryId != UINT64CONST(0))
+ {
+ instr_time start;
+ instr_time duration;
+ BufferUsage bufusage_start,
+ bufusage;
+ WalUsage walusage_start,
+ walusage;
+
+ /* We need to track buffer usage as the planner can access them. */
+ bufusage_start = pgBufferUsage;
+
+ /*
+ * Similarly the planner could write some WAL records in some cases
+ * (e.g. setting a hint bit with those being WAL-logged)
+ */
+ walusage_start = pgWalUsage;
+ INSTR_TIME_SET_CURRENT(start);
+
+ plan_nested_level++;
+ PG_TRY();
+ {
+ if (prev_planner_hook)
+ result = prev_planner_hook(parse, query_string, cursorOptions,
+ boundParams);
+ else
+ result = standard_planner(parse, query_string, cursorOptions,
+ boundParams);
+ }
+ PG_FINALLY();
+ {
+ plan_nested_level--;
+ }
+ PG_END_TRY();
+
+ INSTR_TIME_SET_CURRENT(duration);
+ INSTR_TIME_SUBTRACT(duration, start);
+
+ /* calc differences of buffer counters. */
+ memset(&bufusage, 0, sizeof(BufferUsage));
+ BufferUsageAccumDiff(&bufusage, &pgBufferUsage, &bufusage_start);
+
+ /* calc differences of WAL counters. */
+ memset(&walusage, 0, sizeof(WalUsage));
+ WalUsageAccumDiff(&walusage, &pgWalUsage, &walusage_start);
+
+ pgss_store(query_string,
+ parse->queryId,
+ parse->stmt_location,
+ parse->stmt_len,
+ PGSS_PLAN,
+ INSTR_TIME_GET_MILLISEC(duration),
+ 0,
+ &bufusage,
+ &walusage,
+ NULL);
+ }
+ else
+ {
+ if (prev_planner_hook)
+ result = prev_planner_hook(parse, query_string, cursorOptions,
+ boundParams);
+ else
+ result = standard_planner(parse, query_string, cursorOptions,
+ boundParams);
+ }
+
+ return result;
+}
+
+/*
+ * ExecutorStart hook: start up tracking if needed
+ */
+static void
+pgss_ExecutorStart(QueryDesc *queryDesc, int eflags)
+{
+ if (prev_ExecutorStart)
+ prev_ExecutorStart(queryDesc, eflags);
+ else
+ standard_ExecutorStart(queryDesc, eflags);
+
+ /*
+ * If query has queryId zero, don't track it. This prevents double
+ * counting of optimizable statements that are directly contained in
+ * utility statements.
+ */
+ if (pgss_enabled(exec_nested_level) && queryDesc->plannedstmt->queryId != UINT64CONST(0))
+ {
+ /*
+ * Set up to track total elapsed time in ExecutorRun. Make sure the
+ * space is allocated in the per-query context so it will go away at
+ * ExecutorEnd.
+ */
+ if (queryDesc->totaltime == NULL)
+ {
+ MemoryContext oldcxt;
+
+ oldcxt = MemoryContextSwitchTo(queryDesc->estate->es_query_cxt);
+ queryDesc->totaltime = InstrAlloc(1, INSTRUMENT_ALL);
+ MemoryContextSwitchTo(oldcxt);
+ }
+ }
+}
+
+/*
+ * ExecutorRun hook: all we need do is track nesting depth
+ */
+static void
+pgss_ExecutorRun(QueryDesc *queryDesc, ScanDirection direction, uint64 count,
+ bool execute_once)
+{
+ exec_nested_level++;
+ PG_TRY();
+ {
+ if (prev_ExecutorRun)
+ prev_ExecutorRun(queryDesc, direction, count, execute_once);
+ else
+ standard_ExecutorRun(queryDesc, direction, count, execute_once);
+ }
+ PG_FINALLY();
+ {
+ exec_nested_level--;
+ }
+ PG_END_TRY();
+}
+
+/*
+ * ExecutorFinish hook: all we need do is track nesting depth
+ */
+static void
+pgss_ExecutorFinish(QueryDesc *queryDesc)
+{
+ exec_nested_level++;
+ PG_TRY();
+ {
+ if (prev_ExecutorFinish)
+ prev_ExecutorFinish(queryDesc);
+ else
+ standard_ExecutorFinish(queryDesc);
+ }
+ PG_FINALLY();
+ {
+ exec_nested_level--;
+ }
+ PG_END_TRY();
+}
+
+/*
+ * ExecutorEnd hook: store results if needed
+ */
+static void
+pgss_ExecutorEnd(QueryDesc *queryDesc)
+{
+ uint64 queryId = queryDesc->plannedstmt->queryId;
+
+ if (queryId != UINT64CONST(0) && queryDesc->totaltime &&
+ pgss_enabled(exec_nested_level))
+ {
+ /*
+ * Make sure stats accumulation is done. (Note: it's okay if several
+ * levels of hook all do this.)
+ */
+ InstrEndLoop(queryDesc->totaltime);
+
+ pgss_store(queryDesc->sourceText,
+ queryId,
+ queryDesc->plannedstmt->stmt_location,
+ queryDesc->plannedstmt->stmt_len,
+ PGSS_EXEC,
+ queryDesc->totaltime->total * 1000.0, /* convert to msec */
+ queryDesc->estate->es_processed,
+ &queryDesc->totaltime->bufusage,
+ &queryDesc->totaltime->walusage,
+ NULL);
+ }
+
+ if (prev_ExecutorEnd)
+ prev_ExecutorEnd(queryDesc);
+ else
+ standard_ExecutorEnd(queryDesc);
+}
+
+/*
+ * ProcessUtility hook
+ */
+static void
+pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString,
+ ProcessUtilityContext context,
+ ParamListInfo params, QueryEnvironment *queryEnv,
+ DestReceiver *dest, QueryCompletion *qc)
+{
+ Node *parsetree = pstmt->utilityStmt;
+
+ /*
+ * If it's an EXECUTE statement, we don't track it and don't increment the
+ * nesting level. This allows the cycles to be charged to the underlying
+ * PREPARE instead (by the Executor hooks), which is much more useful.
+ *
+ * We also don't track execution of PREPARE. If we did, we would get one
+ * hash table entry for the PREPARE (with hash calculated from the query
+ * string), and then a different one with the same query string (but hash
+ * calculated from the query tree) would be used to accumulate costs of
+ * ensuing EXECUTEs. This would be confusing, and inconsistent with other
+ * cases where planning time is not included at all.
+ *
+ * Likewise, we don't track execution of DEALLOCATE.
+ */
+ if (pgss_track_utility && pgss_enabled(exec_nested_level) &&
+ !IsA(parsetree, ExecuteStmt) &&
+ !IsA(parsetree, PrepareStmt) &&
+ !IsA(parsetree, DeallocateStmt))
+ {
+ instr_time start;
+ instr_time duration;
+ uint64 rows;
+ BufferUsage bufusage_start,
+ bufusage;
+ WalUsage walusage_start,
+ walusage;
+
+ bufusage_start = pgBufferUsage;
+ walusage_start = pgWalUsage;
+ INSTR_TIME_SET_CURRENT(start);
+
+ exec_nested_level++;
+ PG_TRY();
+ {
+ if (prev_ProcessUtility)
+ prev_ProcessUtility(pstmt, queryString,
+ context, params, queryEnv,
+ dest, qc);
+ else
+ standard_ProcessUtility(pstmt, queryString,
+ context, params, queryEnv,
+ dest, qc);
+ }
+ PG_FINALLY();
+ {
+ exec_nested_level--;
+ }
+ PG_END_TRY();
+
+ INSTR_TIME_SET_CURRENT(duration);
+ INSTR_TIME_SUBTRACT(duration, start);
+
+ rows = (qc && qc->commandTag == CMDTAG_COPY) ? qc->nprocessed : 0;
+
+ /* calc differences of buffer counters. */
+ memset(&bufusage, 0, sizeof(BufferUsage));
+ BufferUsageAccumDiff(&bufusage, &pgBufferUsage, &bufusage_start);
+
+ /* calc differences of WAL counters. */
+ memset(&walusage, 0, sizeof(WalUsage));
+ WalUsageAccumDiff(&walusage, &pgWalUsage, &walusage_start);
+
+ pgss_store(queryString,
+ 0, /* signal that it's a utility stmt */
+ pstmt->stmt_location,
+ pstmt->stmt_len,
+ PGSS_EXEC,
+ INSTR_TIME_GET_MILLISEC(duration),
+ rows,
+ &bufusage,
+ &walusage,
+ NULL);
+ }
+ else
+ {
+ if (prev_ProcessUtility)
+ prev_ProcessUtility(pstmt, queryString,
+ context, params, queryEnv,
+ dest, qc);
+ else
+ standard_ProcessUtility(pstmt, queryString,
+ context, params, queryEnv,
+ dest, qc);
+ }
+}
+
+/*
+ * Given an arbitrarily long query string, produce a hash for the purposes of
+ * identifying the query, without normalizing constants. Used when hashing
+ * utility statements.
+ */
+static uint64
+pgss_hash_string(const char *str, int len)
+{
+ return DatumGetUInt64(hash_any_extended((const unsigned char *) str,
+ len, 0));
+}
+
+/*
+ * Store some statistics for a statement.
+ *
+ * If queryId is 0 then this is a utility statement and we should compute
+ * a suitable queryId internally.
+ *
+ * If jstate is not NULL then we're trying to create an entry for which
+ * we have no statistics as yet; we just want to record the normalized
+ * query string. total_time, rows, bufusage and walusage are ignored in this
+ * case.
+ *
+ * If kind is PGSS_PLAN or PGSS_EXEC, its value is used as the array position
+ * for the arrays in the Counters field.
+ */
+static void
+pgss_store(const char *query, uint64 queryId,
+ int query_location, int query_len,
+ pgssStoreKind kind,
+ double total_time, uint64 rows,
+ const BufferUsage *bufusage,
+ const WalUsage *walusage,
+ pgssJumbleState *jstate)
+{
+ pgssHashKey key;
+ pgssEntry *entry;
+ char *norm_query = NULL;
+ int encoding = GetDatabaseEncoding();
+
+ Assert(query != NULL);
+
+ /* Safety check... */
+ if (!pgss || !pgss_hash)
+ return;
+
+ /*
+ * Confine our attention to the relevant part of the string, if the query
+ * is a portion of a multi-statement source string.
+ *
+ * First apply starting offset, unless it's -1 (unknown).
+ */
+ if (query_location >= 0)
+ {
+ Assert(query_location <= strlen(query));
+ query += query_location;
+ /* Length of 0 (or -1) means "rest of string" */
+ if (query_len <= 0)
+ query_len = strlen(query);
+ else
+ Assert(query_len <= strlen(query));
+ }
+ else
+ {
+ /* If query location is unknown, distrust query_len as well */
+ query_location = 0;
+ query_len = strlen(query);
+ }
+
+ /*
+ * Discard leading and trailing whitespace, too. Use scanner_isspace()
+ * not libc's isspace(), because we want to match the lexer's behavior.
+ */
+ while (query_len > 0 && scanner_isspace(query[0]))
+ query++, query_location++, query_len--;
+ while (query_len > 0 && scanner_isspace(query[query_len - 1]))
+ query_len--;
+
+ /*
+ * For utility statements, we just hash the query string to get an ID.
+ */
+ if (queryId == UINT64CONST(0))
+ {
+ queryId = pgss_hash_string(query, query_len);
+
+ /*
+ * If we are unlucky enough to get a hash of zero(invalid), use
+ * queryID as 2 instead, queryID 1 is already in use for normal
+ * statements.
+ */
+ if (queryId == UINT64CONST(0))
+ queryId = UINT64CONST(2);
+ }
+
+ /* Set up key for hashtable search */
+ key.userid = GetUserId();
+ key.dbid = MyDatabaseId;
+ key.queryid = queryId;
+
+ /* Lookup the hash table entry with shared lock. */
+ LWLockAcquire(pgss->lock, LW_SHARED);
+
+ entry = (pgssEntry *) hash_search(pgss_hash, &key, HASH_FIND, NULL);
+
+ /* Create new entry, if not present */
+ if (!entry)
+ {
+ Size query_offset;
+ int gc_count;
+ bool stored;
+ bool do_gc;
+
+ /*
+ * Create a new, normalized query string if caller asked. We don't
+ * need to hold the lock while doing this work. (Note: in any case,
+ * it's possible that someone else creates a duplicate hashtable entry
+ * in the interval where we don't hold the lock below. That case is
+ * handled by entry_alloc.)
+ */
+ if (jstate)
+ {
+ LWLockRelease(pgss->lock);
+ norm_query = generate_normalized_query(jstate, query,
+ query_location,
+ &query_len,
+ encoding);
+ LWLockAcquire(pgss->lock, LW_SHARED);
+ }
+
+ /* Append new query text to file with only shared lock held */
+ stored = qtext_store(norm_query ? norm_query : query, query_len,
+ &query_offset, &gc_count);
+
+ /*
+ * Determine whether we need to garbage collect external query texts
+ * while the shared lock is still held. This micro-optimization
+ * avoids taking the time to decide this while holding exclusive lock.
+ */
+ do_gc = need_gc_qtexts();
+
+ /* Need exclusive lock to make a new hashtable entry - promote */
+ LWLockRelease(pgss->lock);
+ LWLockAcquire(pgss->lock, LW_EXCLUSIVE);
+
+ /*
+ * A garbage collection may have occurred while we weren't holding the
+ * lock. In the unlikely event that this happens, the query text we
+ * stored above will have been garbage collected, so write it again.
+ * This should be infrequent enough that doing it while holding
+ * exclusive lock isn't a performance problem.
+ */
+ if (!stored || pgss->gc_count != gc_count)
+ stored = qtext_store(norm_query ? norm_query : query, query_len,
+ &query_offset, NULL);
+
+ /* If we failed to write to the text file, give up */
+ if (!stored)
+ goto done;
+
+ /* OK to create a new hashtable entry */
+ entry = entry_alloc(&key, query_offset, query_len, encoding,
+ jstate != NULL);
+
+ /* If needed, perform garbage collection while exclusive lock held */
+ if (do_gc)
+ gc_qtexts();
+ }
+
+ /* Increment the counts, except when jstate is not NULL */
+ if (!jstate)
+ {
+ /*
+ * Grab the spinlock while updating the counters (see comment about
+ * locking rules at the head of the file)
+ */
+ volatile pgssEntry *e = (volatile pgssEntry *) entry;
+
+ Assert(kind == PGSS_PLAN || kind == PGSS_EXEC);
+
+ SpinLockAcquire(&e->mutex);
+
+ /* "Unstick" entry if it was previously sticky */
+ if (IS_STICKY(e->counters))
+ e->counters.usage = USAGE_INIT;
+
+ e->counters.calls[kind] += 1;
+ e->counters.total_time[kind] += total_time;
+
+ if (e->counters.calls[kind] == 1)
+ {
+ e->counters.min_time[kind] = total_time;
+ e->counters.max_time[kind] = total_time;
+ e->counters.mean_time[kind] = total_time;
+ }
+ else
+ {
+ /*
+ * Welford's method for accurately computing variance. See
+ * <http://www.johndcook.com/blog/standard_deviation/>
+ */
+ double old_mean = e->counters.mean_time[kind];
+
+ e->counters.mean_time[kind] +=
+ (total_time - old_mean) / e->counters.calls[kind];
+ e->counters.sum_var_time[kind] +=
+ (total_time - old_mean) * (total_time - e->counters.mean_time[kind]);
+
+ /* calculate min and max time */
+ if (e->counters.min_time[kind] > total_time)
+ e->counters.min_time[kind] = total_time;
+ if (e->counters.max_time[kind] < total_time)
+ e->counters.max_time[kind] = total_time;
+ }
+ e->counters.rows += rows;
+ e->counters.shared_blks_hit += bufusage->shared_blks_hit;
+ e->counters.shared_blks_read += bufusage->shared_blks_read;
+ e->counters.shared_blks_dirtied += bufusage->shared_blks_dirtied;
+ e->counters.shared_blks_written += bufusage->shared_blks_written;
+ e->counters.local_blks_hit += bufusage->local_blks_hit;
+ e->counters.local_blks_read += bufusage->local_blks_read;
+ e->counters.local_blks_dirtied += bufusage->local_blks_dirtied;
+ e->counters.local_blks_written += bufusage->local_blks_written;
+ e->counters.temp_blks_read += bufusage->temp_blks_read;
+ e->counters.temp_blks_written += bufusage->temp_blks_written;
+ e->counters.blk_read_time += INSTR_TIME_GET_MILLISEC(bufusage->blk_read_time);
+ e->counters.blk_write_time += INSTR_TIME_GET_MILLISEC(bufusage->blk_write_time);
+ e->counters.usage += USAGE_EXEC(total_time);
+ e->counters.wal_records += walusage->wal_records;
+ e->counters.wal_fpi += walusage->wal_fpi;
+ e->counters.wal_bytes += walusage->wal_bytes;
+
+ SpinLockRelease(&e->mutex);
+ }
+
+done:
+ LWLockRelease(pgss->lock);
+
+ /* We postpone this clean-up until we're out of the lock */
+ if (norm_query)
+ pfree(norm_query);
+}
+
+/*
+ * Reset statement statistics corresponding to userid, dbid, and queryid.
+ */
+Datum
+pg_stat_statements_reset_1_7(PG_FUNCTION_ARGS)
+{
+ Oid userid;
+ Oid dbid;
+ uint64 queryid;
+
+ userid = PG_GETARG_OID(0);
+ dbid = PG_GETARG_OID(1);
+ queryid = (uint64) PG_GETARG_INT64(2);
+
+ entry_reset(userid, dbid, queryid);
+
+ PG_RETURN_VOID();
+}
+
+/*
+ * Reset statement statistics.
+ */
+Datum
+pg_stat_statements_reset(PG_FUNCTION_ARGS)
+{
+ entry_reset(0, 0, 0);
+
+ PG_RETURN_VOID();
+}
+
+/* Number of output arguments (columns) for various API versions */
+#define PG_STAT_STATEMENTS_COLS_V1_0 14
+#define PG_STAT_STATEMENTS_COLS_V1_1 18
+#define PG_STAT_STATEMENTS_COLS_V1_2 19
+#define PG_STAT_STATEMENTS_COLS_V1_3 23
+#define PG_STAT_STATEMENTS_COLS_V1_8 32
+#define PG_STAT_STATEMENTS_COLS 32 /* maximum of above */
+
+/*
+ * Retrieve statement statistics.
+ *
+ * The SQL API of this function has changed multiple times, and will likely
+ * do so again in future. To support the case where a newer version of this
+ * loadable module is being used with an old SQL declaration of the function,
+ * we continue to support the older API versions. For 1.2 and later, the
+ * expected API version is identified by embedding it in the C name of the
+ * function. Unfortunately we weren't bright enough to do that for 1.1.
+ */
+Datum
+pg_stat_statements_1_8(PG_FUNCTION_ARGS)
+{
+ bool showtext = PG_GETARG_BOOL(0);
+
+ pg_stat_statements_internal(fcinfo, PGSS_V1_8, showtext);
+
+ return (Datum) 0;
+}
+
+Datum
+pg_stat_statements_1_3(PG_FUNCTION_ARGS)
+{
+ bool showtext = PG_GETARG_BOOL(0);
+
+ pg_stat_statements_internal(fcinfo, PGSS_V1_3, showtext);
+
+ return (Datum) 0;
+}
+
+Datum
+pg_stat_statements_1_2(PG_FUNCTION_ARGS)
+{
+ bool showtext = PG_GETARG_BOOL(0);
+
+ pg_stat_statements_internal(fcinfo, PGSS_V1_2, showtext);
+
+ return (Datum) 0;
+}
+
+/*
+ * Legacy entry point for pg_stat_statements() API versions 1.0 and 1.1.
+ * This can be removed someday, perhaps.
+ */
+Datum
+pg_stat_statements(PG_FUNCTION_ARGS)
+{
+ /* If it's really API 1.1, we'll figure that out below */
+ pg_stat_statements_internal(fcinfo, PGSS_V1_0, true);
+
+ return (Datum) 0;
+}
+
+/* Common code for all versions of pg_stat_statements() */
+static void
+pg_stat_statements_internal(FunctionCallInfo fcinfo,
+ pgssVersion api_version,
+ bool showtext)
+{
+ ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
+ TupleDesc tupdesc;
+ Tuplestorestate *tupstore;
+ MemoryContext per_query_ctx;
+ MemoryContext oldcontext;
+ Oid userid = GetUserId();
+ bool is_allowed_role = false;
+ char *qbuffer = NULL;
+ Size qbuffer_size = 0;
+ Size extent = 0;
+ int gc_count = 0;
+ HASH_SEQ_STATUS hash_seq;
+ pgssEntry *entry;
+
+ /* Superusers or members of pg_read_all_stats members are allowed */
+ is_allowed_role = is_member_of_role(GetUserId(), DEFAULT_ROLE_READ_ALL_STATS);
+
+ /* hash table must exist already */
+ if (!pgss || !pgss_hash)
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("pg_stat_statements must be loaded via shared_preload_libraries")));
+
+ /* check to see if caller supports us returning a tuplestore */
+ if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo))
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("set-valued function called in context that cannot accept a set")));
+ if (!(rsinfo->allowedModes & SFRM_Materialize))
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("materialize mode required, but it is not allowed in this context")));
+
+ /* Switch into long-lived context to construct returned data structures */
+ per_query_ctx = rsinfo->econtext->ecxt_per_query_memory;
+ oldcontext = MemoryContextSwitchTo(per_query_ctx);
+
+ /* Build a tuple descriptor for our result type */
+ if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
+ elog(ERROR, "return type must be a row type");
+
+ /*
+ * Check we have the expected number of output arguments. Aside from
+ * being a good safety check, we need a kluge here to detect API version
+ * 1.1, which was wedged into the code in an ill-considered way.
+ */
+ switch (tupdesc->natts)
+ {
+ case PG_STAT_STATEMENTS_COLS_V1_0:
+ if (api_version != PGSS_V1_0)
+ elog(ERROR, "incorrect number of output arguments");
+ break;
+ case PG_STAT_STATEMENTS_COLS_V1_1:
+ /* pg_stat_statements() should have told us 1.0 */
+ if (api_version != PGSS_V1_0)
+ elog(ERROR, "incorrect number of output arguments");
+ api_version = PGSS_V1_1;
+ break;
+ case PG_STAT_STATEMENTS_COLS_V1_2:
+ if (api_version != PGSS_V1_2)
+ elog(ERROR, "incorrect number of output arguments");
+ break;
+ case PG_STAT_STATEMENTS_COLS_V1_3:
+ if (api_version != PGSS_V1_3)
+ elog(ERROR, "incorrect number of output arguments");
+ break;
+ case PG_STAT_STATEMENTS_COLS_V1_8:
+ if (api_version != PGSS_V1_8)
+ elog(ERROR, "incorrect number of output arguments");
+ break;
+ default:
+ elog(ERROR, "incorrect number of output arguments");
+ }
+
+ tupstore = tuplestore_begin_heap(true, false, work_mem);
+ rsinfo->returnMode = SFRM_Materialize;
+ rsinfo->setResult = tupstore;
+ rsinfo->setDesc = tupdesc;
+
+ MemoryContextSwitchTo(oldcontext);
+
+ /*
+ * We'd like to load the query text file (if needed) while not holding any
+ * lock on pgss->lock. In the worst case we'll have to do this again
+ * after we have the lock, but it's unlikely enough to make this a win
+ * despite occasional duplicated work. We need to reload if anybody
+ * writes to the file (either a retail qtext_store(), or a garbage
+ * collection) between this point and where we've gotten shared lock. If
+ * a qtext_store is actually in progress when we look, we might as well
+ * skip the speculative load entirely.
+ */
+ if (showtext)
+ {
+ int n_writers;
+
+ /* Take the mutex so we can examine variables */
+ {
+ volatile pgssSharedState *s = (volatile pgssSharedState *) pgss;
+
+ SpinLockAcquire(&s->mutex);
+ extent = s->extent;
+ n_writers = s->n_writers;
+ gc_count = s->gc_count;
+ SpinLockRelease(&s->mutex);
+ }
+
+ /* No point in loading file now if there are active writers */
+ if (n_writers == 0)
+ qbuffer = qtext_load_file(&qbuffer_size);
+ }
+
+ /*
+ * Get shared lock, load or reload the query text file if we must, and
+ * iterate over the hashtable entries.
+ *
+ * With a large hash table, we might be holding the lock rather longer
+ * than one could wish. However, this only blocks creation of new hash
+ * table entries, and the larger the hash table the less likely that is to
+ * be needed. So we can hope this is okay. Perhaps someday we'll decide
+ * we need to partition the hash table to limit the time spent holding any
+ * one lock.
+ */
+ LWLockAcquire(pgss->lock, LW_SHARED);
+
+ if (showtext)
+ {
+ /*
+ * Here it is safe to examine extent and gc_count without taking the
+ * mutex. Note that although other processes might change
+ * pgss->extent just after we look at it, the strings they then write
+ * into the file cannot yet be referenced in the hashtable, so we
+ * don't care whether we see them or not.
+ *
+ * If qtext_load_file fails, we just press on; we'll return NULL for
+ * every query text.
+ */
+ if (qbuffer == NULL ||
+ pgss->extent != extent ||
+ pgss->gc_count != gc_count)
+ {
+ if (qbuffer)
+ free(qbuffer);
+ qbuffer = qtext_load_file(&qbuffer_size);
+ }
+ }
+
+ hash_seq_init(&hash_seq, pgss_hash);
+ while ((entry = hash_seq_search(&hash_seq)) != NULL)
+ {
+ Datum values[PG_STAT_STATEMENTS_COLS];
+ bool nulls[PG_STAT_STATEMENTS_COLS];
+ int i = 0;
+ Counters tmp;
+ double stddev;
+ int64 queryid = entry->key.queryid;
+
+ memset(values, 0, sizeof(values));
+ memset(nulls, 0, sizeof(nulls));
+
+ values[i++] = ObjectIdGetDatum(entry->key.userid);
+ values[i++] = ObjectIdGetDatum(entry->key.dbid);
+
+ if (is_allowed_role || entry->key.userid == userid)
+ {
+ if (api_version >= PGSS_V1_2)
+ values[i++] = Int64GetDatumFast(queryid);
+
+ if (showtext)
+ {
+ char *qstr = qtext_fetch(entry->query_offset,
+ entry->query_len,
+ qbuffer,
+ qbuffer_size);
+
+ if (qstr)
+ {
+ char *enc;
+
+ enc = pg_any_to_server(qstr,
+ entry->query_len,
+ entry->encoding);
+
+ values[i++] = CStringGetTextDatum(enc);
+
+ if (enc != qstr)
+ pfree(enc);
+ }
+ else
+ {
+ /* Just return a null if we fail to find the text */
+ nulls[i++] = true;
+ }
+ }
+ else
+ {
+ /* Query text not requested */
+ nulls[i++] = true;
+ }
+ }
+ else
+ {
+ /* Don't show queryid */
+ if (api_version >= PGSS_V1_2)
+ nulls[i++] = true;
+
+ /*
+ * Don't show query text, but hint as to the reason for not doing
+ * so if it was requested
+ */
+ if (showtext)
+ values[i++] = CStringGetTextDatum("<insufficient privilege>");
+ else
+ nulls[i++] = true;
+ }
+
+ /* copy counters to a local variable to keep locking time short */
+ {
+ volatile pgssEntry *e = (volatile pgssEntry *) entry;
+
+ SpinLockAcquire(&e->mutex);
+ tmp = e->counters;
+ SpinLockRelease(&e->mutex);
+ }
+
+ /* Skip entry if unexecuted (ie, it's a pending "sticky" entry) */
+ if (IS_STICKY(tmp))
+ continue;
+
+ /* Note that we rely on PGSS_PLAN being 0 and PGSS_EXEC being 1. */
+ for (int kind = 0; kind < PGSS_NUMKIND; kind++)
+ {
+ if (kind == PGSS_EXEC || api_version >= PGSS_V1_8)
+ {
+ values[i++] = Int64GetDatumFast(tmp.calls[kind]);
+ values[i++] = Float8GetDatumFast(tmp.total_time[kind]);
+ }
+
+ if ((kind == PGSS_EXEC && api_version >= PGSS_V1_3) ||
+ api_version >= PGSS_V1_8)
+ {
+ values[i++] = Float8GetDatumFast(tmp.min_time[kind]);
+ values[i++] = Float8GetDatumFast(tmp.max_time[kind]);
+ values[i++] = Float8GetDatumFast(tmp.mean_time[kind]);
+
+ /*
+ * Note we are calculating the population variance here, not
+ * the sample variance, as we have data for the whole
+ * population, so Bessel's correction is not used, and we
+ * don't divide by tmp.calls - 1.
+ */
+ if (tmp.calls[kind] > 1)
+ stddev = sqrt(tmp.sum_var_time[kind] / tmp.calls[kind]);
+ else
+ stddev = 0.0;
+ values[i++] = Float8GetDatumFast(stddev);
+ }
+ }
+ values[i++] = Int64GetDatumFast(tmp.rows);
+ values[i++] = Int64GetDatumFast(tmp.shared_blks_hit);
+ values[i++] = Int64GetDatumFast(tmp.shared_blks_read);
+ if (api_version >= PGSS_V1_1)
+ values[i++] = Int64GetDatumFast(tmp.shared_blks_dirtied);
+ values[i++] = Int64GetDatumFast(tmp.shared_blks_written);
+ values[i++] = Int64GetDatumFast(tmp.local_blks_hit);
+ values[i++] = Int64GetDatumFast(tmp.local_blks_read);
+ if (api_version >= PGSS_V1_1)
+ values[i++] = Int64GetDatumFast(tmp.local_blks_dirtied);
+ values[i++] = Int64GetDatumFast(tmp.local_blks_written);
+ values[i++] = Int64GetDatumFast(tmp.temp_blks_read);
+ values[i++] = Int64GetDatumFast(tmp.temp_blks_written);
+ if (api_version >= PGSS_V1_1)
+ {
+ values[i++] = Float8GetDatumFast(tmp.blk_read_time);
+ values[i++] = Float8GetDatumFast(tmp.blk_write_time);
+ }
+ if (api_version >= PGSS_V1_8)
+ {
+ char buf[256];
+ Datum wal_bytes;
+
+ values[i++] = Int64GetDatumFast(tmp.wal_records);
+ values[i++] = Int64GetDatumFast(tmp.wal_fpi);
+
+ snprintf(buf, sizeof buf, UINT64_FORMAT, tmp.wal_bytes);
+
+ /* Convert to numeric. */
+ wal_bytes = DirectFunctionCall3(numeric_in,
+ CStringGetDatum(buf),
+ ObjectIdGetDatum(0),
+ Int32GetDatum(-1));
+ values[i++] = wal_bytes;
+ }
+
+ Assert(i == (api_version == PGSS_V1_0 ? PG_STAT_STATEMENTS_COLS_V1_0 :
+ api_version == PGSS_V1_1 ? PG_STAT_STATEMENTS_COLS_V1_1 :
+ api_version == PGSS_V1_2 ? PG_STAT_STATEMENTS_COLS_V1_2 :
+ api_version == PGSS_V1_3 ? PG_STAT_STATEMENTS_COLS_V1_3 :
+ api_version == PGSS_V1_8 ? PG_STAT_STATEMENTS_COLS_V1_8 :
+ -1 /* fail if you forget to update this assert */ ));
+
+ tuplestore_putvalues(tupstore, tupdesc, values, nulls);
+ }
+
+ /* clean up and return the tuplestore */
+ LWLockRelease(pgss->lock);
+
+ if (qbuffer)
+ free(qbuffer);
+
+ tuplestore_donestoring(tupstore);
+}
+
+/*
+ * Estimate shared memory space needed.
+ */
+static Size
+pgss_memsize(void)
+{
+ Size size;
+
+ size = MAXALIGN(sizeof(pgssSharedState));
+ size = add_size(size, hash_estimate_size(pgss_max, sizeof(pgssEntry)));
+
+ return size;
+}
+
+/*
+ * Allocate a new hashtable entry.
+ * caller must hold an exclusive lock on pgss->lock
+ *
+ * "query" need not be null-terminated; we rely on query_len instead
+ *
+ * If "sticky" is true, make the new entry artificially sticky so that it will
+ * probably still be there when the query finishes execution. We do this by
+ * giving it a median usage value rather than the normal value. (Strictly
+ * speaking, query strings are normalized on a best effort basis, though it
+ * would be difficult to demonstrate this even under artificial conditions.)
+ *
+ * Note: despite needing exclusive lock, it's not an error for the target
+ * entry to already exist. This is because pgss_store releases and
+ * reacquires lock after failing to find a match; so someone else could
+ * have made the entry while we waited to get exclusive lock.
+ */
+static pgssEntry *
+entry_alloc(pgssHashKey *key, Size query_offset, int query_len, int encoding,
+ bool sticky)
+{
+ pgssEntry *entry;
+ bool found;
+
+ /* Make space if needed */
+ while (hash_get_num_entries(pgss_hash) >= pgss_max)
+ entry_dealloc();
+
+ /* Find or create an entry with desired hash code */
+ entry = (pgssEntry *) hash_search(pgss_hash, key, HASH_ENTER, &found);
+
+ if (!found)
+ {
+ /* New entry, initialize it */
+
+ /* reset the statistics */
+ memset(&entry->counters, 0, sizeof(Counters));
+ /* set the appropriate initial usage count */
+ entry->counters.usage = sticky ? pgss->cur_median_usage : USAGE_INIT;
+ /* re-initialize the mutex each time ... we assume no one using it */
+ SpinLockInit(&entry->mutex);
+ /* ... and don't forget the query text metadata */
+ Assert(query_len >= 0);
+ entry->query_offset = query_offset;
+ entry->query_len = query_len;
+ entry->encoding = encoding;
+ }
+
+ return entry;
+}
+
+/*
+ * qsort comparator for sorting into increasing usage order
+ */
+static int
+entry_cmp(const void *lhs, const void *rhs)
+{
+ double l_usage = (*(pgssEntry *const *) lhs)->counters.usage;
+ double r_usage = (*(pgssEntry *const *) rhs)->counters.usage;
+
+ if (l_usage < r_usage)
+ return -1;
+ else if (l_usage > r_usage)
+ return +1;
+ else
+ return 0;
+}
+
+/*
+ * Deallocate least-used entries.
+ *
+ * Caller must hold an exclusive lock on pgss->lock.
+ */
+static void
+entry_dealloc(void)
+{
+ HASH_SEQ_STATUS hash_seq;
+ pgssEntry **entries;
+ pgssEntry *entry;
+ int nvictims;
+ int i;
+ Size tottextlen;
+ int nvalidtexts;
+
+ /*
+ * Sort entries by usage and deallocate USAGE_DEALLOC_PERCENT of them.
+ * While we're scanning the table, apply the decay factor to the usage
+ * values, and update the mean query length.
+ *
+ * Note that the mean query length is almost immediately obsolete, since
+ * we compute it before not after discarding the least-used entries.
+ * Hopefully, that doesn't affect the mean too much; it doesn't seem worth
+ * making two passes to get a more current result. Likewise, the new
+ * cur_median_usage includes the entries we're about to zap.
+ */
+
+ entries = palloc(hash_get_num_entries(pgss_hash) * sizeof(pgssEntry *));
+
+ i = 0;
+ tottextlen = 0;
+ nvalidtexts = 0;
+
+ hash_seq_init(&hash_seq, pgss_hash);
+ while ((entry = hash_seq_search(&hash_seq)) != NULL)
+ {
+ entries[i++] = entry;
+ /* "Sticky" entries get a different usage decay rate. */
+ if (IS_STICKY(entry->counters))
+ entry->counters.usage *= STICKY_DECREASE_FACTOR;
+ else
+ entry->counters.usage *= USAGE_DECREASE_FACTOR;
+ /* In the mean length computation, ignore dropped texts. */
+ if (entry->query_len >= 0)
+ {
+ tottextlen += entry->query_len + 1;
+ nvalidtexts++;
+ }
+ }
+
+ /* Sort into increasing order by usage */
+ qsort(entries, i, sizeof(pgssEntry *), entry_cmp);
+
+ /* Record the (approximate) median usage */
+ if (i > 0)
+ pgss->cur_median_usage = entries[i / 2]->counters.usage;
+ /* Record the mean query length */
+ if (nvalidtexts > 0)
+ pgss->mean_query_len = tottextlen / nvalidtexts;
+ else
+ pgss->mean_query_len = ASSUMED_LENGTH_INIT;
+
+ /* Now zap an appropriate fraction of lowest-usage entries */
+ nvictims = Max(10, i * USAGE_DEALLOC_PERCENT / 100);
+ nvictims = Min(nvictims, i);
+
+ for (i = 0; i < nvictims; i++)
+ {
+ hash_search(pgss_hash, &entries[i]->key, HASH_REMOVE, NULL);
+ }
+
+ pfree(entries);
+}
+
+/*
+ * Given a query string (not necessarily null-terminated), allocate a new
+ * entry in the external query text file and store the string there.
+ *
+ * If successful, returns true, and stores the new entry's offset in the file
+ * into *query_offset. Also, if gc_count isn't NULL, *gc_count is set to the
+ * number of garbage collections that have occurred so far.
+ *
+ * On failure, returns false.
+ *
+ * At least a shared lock on pgss->lock must be held by the caller, so as
+ * to prevent a concurrent garbage collection. Share-lock-holding callers
+ * should pass a gc_count pointer to obtain the number of garbage collections,
+ * so that they can recheck the count after obtaining exclusive lock to
+ * detect whether a garbage collection occurred (and removed this entry).
+ */
+static bool
+qtext_store(const char *query, int query_len,
+ Size *query_offset, int *gc_count)
+{
+ Size off;
+ int fd;
+
+ /*
+ * We use a spinlock to protect extent/n_writers/gc_count, so that
+ * multiple processes may execute this function concurrently.
+ */
+ {
+ volatile pgssSharedState *s = (volatile pgssSharedState *) pgss;
+
+ SpinLockAcquire(&s->mutex);
+ off = s->extent;
+ s->extent += query_len + 1;
+ s->n_writers++;
+ if (gc_count)
+ *gc_count = s->gc_count;
+ SpinLockRelease(&s->mutex);
+ }
+
+ *query_offset = off;
+
+ /* Now write the data into the successfully-reserved part of the file */
+ fd = OpenTransientFile(PGSS_TEXT_FILE, O_RDWR | O_CREAT | PG_BINARY);
+ if (fd < 0)
+ goto error;
+
+ if (pg_pwrite(fd, query, query_len, off) != query_len)
+ goto error;
+ if (pg_pwrite(fd, "\0", 1, off + query_len) != 1)
+ goto error;
+
+ CloseTransientFile(fd);
+
+ /* Mark our write complete */
+ {
+ volatile pgssSharedState *s = (volatile pgssSharedState *) pgss;
+
+ SpinLockAcquire(&s->mutex);
+ s->n_writers--;
+ SpinLockRelease(&s->mutex);
+ }
+
+ return true;
+
+error:
+ ereport(LOG,
+ (errcode_for_file_access(),
+ errmsg("could not write file \"%s\": %m",
+ PGSS_TEXT_FILE)));
+
+ if (fd >= 0)
+ CloseTransientFile(fd);
+
+ /* Mark our write complete */
+ {
+ volatile pgssSharedState *s = (volatile pgssSharedState *) pgss;
+
+ SpinLockAcquire(&s->mutex);
+ s->n_writers--;
+ SpinLockRelease(&s->mutex);
+ }
+
+ return false;
+}
+
+/*
+ * Read the external query text file into a malloc'd buffer.
+ *
+ * Returns NULL (without throwing an error) if unable to read, eg
+ * file not there or insufficient memory.
+ *
+ * On success, the buffer size is also returned into *buffer_size.
+ *
+ * This can be called without any lock on pgss->lock, but in that case
+ * the caller is responsible for verifying that the result is sane.
+ */
+static char *
+qtext_load_file(Size *buffer_size)
+{
+ char *buf;
+ int fd;
+ struct stat stat;
+
+ fd = OpenTransientFile(PGSS_TEXT_FILE, O_RDONLY | PG_BINARY);
+ if (fd < 0)
+ {
+ if (errno != ENOENT)
+ ereport(LOG,
+ (errcode_for_file_access(),
+ errmsg("could not read file \"%s\": %m",
+ PGSS_TEXT_FILE)));
+ return NULL;
+ }
+
+ /* Get file length */
+ if (fstat(fd, &stat))
+ {
+ ereport(LOG,
+ (errcode_for_file_access(),
+ errmsg("could not stat file \"%s\": %m",
+ PGSS_TEXT_FILE)));
+ CloseTransientFile(fd);
+ return NULL;
+ }
+
+ /* Allocate buffer; beware that off_t might be wider than size_t */
+ if (stat.st_size <= MaxAllocHugeSize)
+ buf = (char *) malloc(stat.st_size);
+ else
+ buf = NULL;
+ if (buf == NULL)
+ {
+ ereport(LOG,
+ (errcode(ERRCODE_OUT_OF_MEMORY),
+ errmsg("out of memory"),
+ errdetail("Could not allocate enough memory to read file \"%s\".",
+ PGSS_TEXT_FILE)));
+ CloseTransientFile(fd);
+ return NULL;
+ }
+
+ /*
+ * OK, slurp in the file. If we get a short read and errno doesn't get
+ * set, the reason is probably that garbage collection truncated the file
+ * since we did the fstat(), so we don't log a complaint --- but we don't
+ * return the data, either, since it's most likely corrupt due to
+ * concurrent writes from garbage collection.
+ */
+ errno = 0;
+ if (read(fd, buf, stat.st_size) != stat.st_size)
+ {
+ if (errno)
+ ereport(LOG,
+ (errcode_for_file_access(),
+ errmsg("could not read file \"%s\": %m",
+ PGSS_TEXT_FILE)));
+ free(buf);
+ CloseTransientFile(fd);
+ return NULL;
+ }
+
+ if (CloseTransientFile(fd) != 0)
+ ereport(LOG,
+ (errcode_for_file_access(),
+ errmsg("could not close file \"%s\": %m", PGSS_TEXT_FILE)));
+
+ *buffer_size = stat.st_size;
+ return buf;
+}
+
+/*
+ * Locate a query text in the file image previously read by qtext_load_file().
+ *
+ * We validate the given offset/length, and return NULL if bogus. Otherwise,
+ * the result points to a null-terminated string within the buffer.
+ */
+static char *
+qtext_fetch(Size query_offset, int query_len,
+ char *buffer, Size buffer_size)
+{
+ /* File read failed? */
+ if (buffer == NULL)
+ return NULL;
+ /* Bogus offset/length? */
+ if (query_len < 0 ||
+ query_offset + query_len >= buffer_size)
+ return NULL;
+ /* As a further sanity check, make sure there's a trailing null */
+ if (buffer[query_offset + query_len] != '\0')
+ return NULL;
+ /* Looks OK */
+ return buffer + query_offset;
+}
+
+/*
+ * Do we need to garbage-collect the external query text file?
+ *
+ * Caller should hold at least a shared lock on pgss->lock.
+ */
+static bool
+need_gc_qtexts(void)
+{
+ Size extent;
+
+ /* Read shared extent pointer */
+ {
+ volatile pgssSharedState *s = (volatile pgssSharedState *) pgss;
+
+ SpinLockAcquire(&s->mutex);
+ extent = s->extent;
+ SpinLockRelease(&s->mutex);
+ }
+
+ /* Don't proceed if file does not exceed 512 bytes per possible entry */
+ if (extent < 512 * pgss_max)
+ return false;
+
+ /*
+ * Don't proceed if file is less than about 50% bloat. Nothing can or
+ * should be done in the event of unusually large query texts accounting
+ * for file's large size. We go to the trouble of maintaining the mean
+ * query length in order to prevent garbage collection from thrashing
+ * uselessly.
+ */
+ if (extent < pgss->mean_query_len * pgss_max * 2)
+ return false;
+
+ return true;
+}
+
+/*
+ * Garbage-collect orphaned query texts in external file.
+ *
+ * This won't be called often in the typical case, since it's likely that
+ * there won't be too much churn, and besides, a similar compaction process
+ * occurs when serializing to disk at shutdown or as part of resetting.
+ * Despite this, it seems prudent to plan for the edge case where the file
+ * becomes unreasonably large, with no other method of compaction likely to
+ * occur in the foreseeable future.
+ *
+ * The caller must hold an exclusive lock on pgss->lock.
+ *
+ * At the first sign of trouble we unlink the query text file to get a clean
+ * slate (although existing statistics are retained), rather than risk
+ * thrashing by allowing the same problem case to recur indefinitely.
+ */
+static void
+gc_qtexts(void)
+{
+ char *qbuffer;
+ Size qbuffer_size;
+ FILE *qfile = NULL;
+ HASH_SEQ_STATUS hash_seq;
+ pgssEntry *entry;
+ Size extent;
+ int nentries;
+
+ /*
+ * When called from pgss_store, some other session might have proceeded
+ * with garbage collection in the no-lock-held interim of lock strength
+ * escalation. Check once more that this is actually necessary.
+ */
+ if (!need_gc_qtexts())
+ return;
+
+ /*
+ * Load the old texts file. If we fail (out of memory, for instance),
+ * invalidate query texts. Hopefully this is rare. It might seem better
+ * to leave things alone on an OOM failure, but the problem is that the
+ * file is only going to get bigger; hoping for a future non-OOM result is
+ * risky and can easily lead to complete denial of service.
+ */
+ qbuffer = qtext_load_file(&qbuffer_size);
+ if (qbuffer == NULL)
+ goto gc_fail;
+
+ /*
+ * We overwrite the query texts file in place, so as to reduce the risk of
+ * an out-of-disk-space failure. Since the file is guaranteed not to get
+ * larger, this should always work on traditional filesystems; though we
+ * could still lose on copy-on-write filesystems.
+ */
+ qfile = AllocateFile(PGSS_TEXT_FILE, PG_BINARY_W);
+ if (qfile == NULL)
+ {
+ ereport(LOG,
+ (errcode_for_file_access(),
+ errmsg("could not write file \"%s\": %m",
+ PGSS_TEXT_FILE)));
+ goto gc_fail;
+ }
+
+ extent = 0;
+ nentries = 0;
+
+ hash_seq_init(&hash_seq, pgss_hash);
+ while ((entry = hash_seq_search(&hash_seq)) != NULL)
+ {
+ int query_len = entry->query_len;
+ char *qry = qtext_fetch(entry->query_offset,
+ query_len,
+ qbuffer,
+ qbuffer_size);
+
+ if (qry == NULL)
+ {
+ /* Trouble ... drop the text */
+ entry->query_offset = 0;
+ entry->query_len = -1;
+ /* entry will not be counted in mean query length computation */
+ continue;
+ }
+
+ if (fwrite(qry, 1, query_len + 1, qfile) != query_len + 1)
+ {
+ ereport(LOG,
+ (errcode_for_file_access(),
+ errmsg("could not write file \"%s\": %m",
+ PGSS_TEXT_FILE)));
+ hash_seq_term(&hash_seq);
+ goto gc_fail;
+ }
+
+ entry->query_offset = extent;
+ extent += query_len + 1;
+ nentries++;
+ }
+
+ /*
+ * Truncate away any now-unused space. If this fails for some odd reason,
+ * we log it, but there's no need to fail.
+ */
+ if (ftruncate(fileno(qfile), extent) != 0)
+ ereport(LOG,
+ (errcode_for_file_access(),
+ errmsg("could not truncate file \"%s\": %m",
+ PGSS_TEXT_FILE)));
+
+ if (FreeFile(qfile))
+ {
+ ereport(LOG,
+ (errcode_for_file_access(),
+ errmsg("could not write file \"%s\": %m",
+ PGSS_TEXT_FILE)));
+ qfile = NULL;
+ goto gc_fail;
+ }
+
+ elog(DEBUG1, "pgss gc of queries file shrunk size from %zu to %zu",
+ pgss->extent, extent);
+
+ /* Reset the shared extent pointer */
+ pgss->extent = extent;
+
+ /*
+ * Also update the mean query length, to be sure that need_gc_qtexts()
+ * won't still think we have a problem.
+ */
+ if (nentries > 0)
+ pgss->mean_query_len = extent / nentries;
+ else
+ pgss->mean_query_len = ASSUMED_LENGTH_INIT;
+
+ free(qbuffer);
+
+ /*
+ * OK, count a garbage collection cycle. (Note: even though we have
+ * exclusive lock on pgss->lock, we must take pgss->mutex for this, since
+ * other processes may examine gc_count while holding only the mutex.
+ * Also, we have to advance the count *after* we've rewritten the file,
+ * else other processes might not realize they read a stale file.)
+ */
+ record_gc_qtexts();
+
+ return;
+
+gc_fail:
+ /* clean up resources */
+ if (qfile)
+ FreeFile(qfile);
+ if (qbuffer)
+ free(qbuffer);
+
+ /*
+ * Since the contents of the external file are now uncertain, mark all
+ * hashtable entries as having invalid texts.
+ */
+ hash_seq_init(&hash_seq, pgss_hash);
+ while ((entry = hash_seq_search(&hash_seq)) != NULL)
+ {
+ entry->query_offset = 0;
+ entry->query_len = -1;
+ }
+
+ /*
+ * Destroy the query text file and create a new, empty one
+ */
+ (void) unlink(PGSS_TEXT_FILE);
+ qfile = AllocateFile(PGSS_TEXT_FILE, PG_BINARY_W);
+ if (qfile == NULL)
+ ereport(LOG,
+ (errcode_for_file_access(),
+ errmsg("could not recreate file \"%s\": %m",
+ PGSS_TEXT_FILE)));
+ else
+ FreeFile(qfile);
+
+ /* Reset the shared extent pointer */
+ pgss->extent = 0;
+
+ /* Reset mean_query_len to match the new state */
+ pgss->mean_query_len = ASSUMED_LENGTH_INIT;
+
+ /*
+ * Bump the GC count even though we failed.
+ *
+ * This is needed to make concurrent readers of file without any lock on
+ * pgss->lock notice existence of new version of file. Once readers
+ * subsequently observe a change in GC count with pgss->lock held, that
+ * forces a safe reopen of file. Writers also require that we bump here,
+ * of course. (As required by locking protocol, readers and writers don't
+ * trust earlier file contents until gc_count is found unchanged after
+ * pgss->lock acquired in shared or exclusive mode respectively.)
+ */
+ record_gc_qtexts();
+}
+
+/*
+ * Release entries corresponding to parameters passed.
+ */
+static void
+entry_reset(Oid userid, Oid dbid, uint64 queryid)
+{
+ HASH_SEQ_STATUS hash_seq;
+ pgssEntry *entry;
+ FILE *qfile;
+ long num_entries;
+ long num_remove = 0;
+ pgssHashKey key;
+
+ if (!pgss || !pgss_hash)
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("pg_stat_statements must be loaded via shared_preload_libraries")));
+
+ LWLockAcquire(pgss->lock, LW_EXCLUSIVE);
+ num_entries = hash_get_num_entries(pgss_hash);
+
+ if (userid != 0 && dbid != 0 && queryid != UINT64CONST(0))
+ {
+ /* If all the parameters are available, use the fast path. */
+ key.userid = userid;
+ key.dbid = dbid;
+ key.queryid = queryid;
+
+ /* Remove the key if exists */
+ entry = (pgssEntry *) hash_search(pgss_hash, &key, HASH_REMOVE, NULL);
+ if (entry) /* found */
+ num_remove++;
+ }
+ else if (userid != 0 || dbid != 0 || queryid != UINT64CONST(0))
+ {
+ /* Remove entries corresponding to valid parameters. */
+ hash_seq_init(&hash_seq, pgss_hash);
+ while ((entry = hash_seq_search(&hash_seq)) != NULL)
+ {
+ if ((!userid || entry->key.userid == userid) &&
+ (!dbid || entry->key.dbid == dbid) &&
+ (!queryid || entry->key.queryid == queryid))
+ {
+ hash_search(pgss_hash, &entry->key, HASH_REMOVE, NULL);
+ num_remove++;
+ }
+ }
+ }
+ else
+ {
+ /* Remove all entries. */
+ hash_seq_init(&hash_seq, pgss_hash);
+ while ((entry = hash_seq_search(&hash_seq)) != NULL)
+ {
+ hash_search(pgss_hash, &entry->key, HASH_REMOVE, NULL);
+ num_remove++;
+ }
+ }
+
+ /* All entries are removed? */
+ if (num_entries != num_remove)
+ goto release_lock;
+
+ /*
+ * Write new empty query file, perhaps even creating a new one to recover
+ * if the file was missing.
+ */
+ qfile = AllocateFile(PGSS_TEXT_FILE, PG_BINARY_W);
+ if (qfile == NULL)
+ {
+ ereport(LOG,
+ (errcode_for_file_access(),
+ errmsg("could not create file \"%s\": %m",
+ PGSS_TEXT_FILE)));
+ goto done;
+ }
+
+ /* If ftruncate fails, log it, but it's not a fatal problem */
+ if (ftruncate(fileno(qfile), 0) != 0)
+ ereport(LOG,
+ (errcode_for_file_access(),
+ errmsg("could not truncate file \"%s\": %m",
+ PGSS_TEXT_FILE)));
+
+ FreeFile(qfile);
+
+done:
+ pgss->extent = 0;
+ /* This counts as a query text garbage collection for our purposes */
+ record_gc_qtexts();
+
+release_lock:
+ LWLockRelease(pgss->lock);
+}
+
+/*
+ * AppendJumble: Append a value that is substantive in a given query to
+ * the current jumble.
+ */
+static void
+AppendJumble(pgssJumbleState *jstate, const unsigned char *item, Size size)
+{
+ unsigned char *jumble = jstate->jumble;
+ Size jumble_len = jstate->jumble_len;
+
+ /*
+ * Whenever the jumble buffer is full, we hash the current contents and
+ * reset the buffer to contain just that hash value, thus relying on the
+ * hash to summarize everything so far.
+ */
+ while (size > 0)
+ {
+ Size part_size;
+
+ if (jumble_len >= JUMBLE_SIZE)
+ {
+ uint64 start_hash;
+
+ start_hash = DatumGetUInt64(hash_any_extended(jumble,
+ JUMBLE_SIZE, 0));
+ memcpy(jumble, &start_hash, sizeof(start_hash));
+ jumble_len = sizeof(start_hash);
+ }
+ part_size = Min(size, JUMBLE_SIZE - jumble_len);
+ memcpy(jumble + jumble_len, item, part_size);
+ jumble_len += part_size;
+ item += part_size;
+ size -= part_size;
+ }
+ jstate->jumble_len = jumble_len;
+}
+
+/*
+ * Wrappers around AppendJumble to encapsulate details of serialization
+ * of individual local variable elements.
+ */
+#define APP_JUMB(item) \
+ AppendJumble(jstate, (const unsigned char *) &(item), sizeof(item))
+#define APP_JUMB_STRING(str) \
+ AppendJumble(jstate, (const unsigned char *) (str), strlen(str) + 1)
+
+/*
+ * JumbleQuery: Selectively serialize the query tree, appending significant
+ * data to the "query jumble" while ignoring nonsignificant data.
+ *
+ * Rule of thumb for what to include is that we should ignore anything not
+ * semantically significant (such as alias names) as well as anything that can
+ * be deduced from child nodes (else we'd just be double-hashing that piece
+ * of information).
+ */
+static void
+JumbleQuery(pgssJumbleState *jstate, Query *query)
+{
+ Assert(IsA(query, Query));
+ Assert(query->utilityStmt == NULL);
+
+ APP_JUMB(query->commandType);
+ /* resultRelation is usually predictable from commandType */
+ JumbleExpr(jstate, (Node *) query->cteList);
+ JumbleRangeTable(jstate, query->rtable);
+ JumbleExpr(jstate, (Node *) query->jointree);
+ JumbleExpr(jstate, (Node *) query->targetList);
+ JumbleExpr(jstate, (Node *) query->onConflict);
+ JumbleExpr(jstate, (Node *) query->returningList);
+ JumbleExpr(jstate, (Node *) query->groupClause);
+ JumbleExpr(jstate, (Node *) query->groupingSets);
+ JumbleExpr(jstate, query->havingQual);
+ JumbleExpr(jstate, (Node *) query->windowClause);
+ JumbleExpr(jstate, (Node *) query->distinctClause);
+ JumbleExpr(jstate, (Node *) query->sortClause);
+ JumbleExpr(jstate, query->limitOffset);
+ JumbleExpr(jstate, query->limitCount);
+ JumbleRowMarks(jstate, query->rowMarks);
+ JumbleExpr(jstate, query->setOperations);
+}
+
+/*
+ * Jumble a range table
+ */
+static void
+JumbleRangeTable(pgssJumbleState *jstate, List *rtable)
+{
+ ListCell *lc;
+
+ foreach(lc, rtable)
+ {
+ RangeTblEntry *rte = lfirst_node(RangeTblEntry, lc);
+
+ APP_JUMB(rte->rtekind);
+ switch (rte->rtekind)
+ {
+ case RTE_RELATION:
+ APP_JUMB(rte->relid);
+ JumbleExpr(jstate, (Node *) rte->tablesample);
+ break;
+ case RTE_SUBQUERY:
+ JumbleQuery(jstate, rte->subquery);
+ break;
+ case RTE_JOIN:
+ APP_JUMB(rte->jointype);
+ break;
+ case RTE_FUNCTION:
+ JumbleExpr(jstate, (Node *) rte->functions);
+ break;
+ case RTE_TABLEFUNC:
+ JumbleExpr(jstate, (Node *) rte->tablefunc);
+ break;
+ case RTE_VALUES:
+ JumbleExpr(jstate, (Node *) rte->values_lists);
+ break;
+ case RTE_CTE:
+
+ /*
+ * Depending on the CTE name here isn't ideal, but it's the
+ * only info we have to identify the referenced WITH item.
+ */
+ APP_JUMB_STRING(rte->ctename);
+ APP_JUMB(rte->ctelevelsup);
+ break;
+ case RTE_NAMEDTUPLESTORE:
+ APP_JUMB_STRING(rte->enrname);
+ break;
+ case RTE_RESULT:
+ break;
+ default:
+ elog(ERROR, "unrecognized RTE kind: %d", (int) rte->rtekind);
+ break;
+ }
+ }
+}
+
+/*
+ * Jumble a rowMarks list
+ */
+static void
+JumbleRowMarks(pgssJumbleState *jstate, List *rowMarks)
+{
+ ListCell *lc;
+
+ foreach(lc, rowMarks)
+ {
+ RowMarkClause *rowmark = lfirst_node(RowMarkClause, lc);
+
+ if (!rowmark->pushedDown)
+ {
+ APP_JUMB(rowmark->rti);
+ APP_JUMB(rowmark->strength);
+ APP_JUMB(rowmark->waitPolicy);
+ }
+ }
+}
+
+/*
+ * Jumble an expression tree
+ *
+ * In general this function should handle all the same node types that
+ * expression_tree_walker() does, and therefore it's coded to be as parallel
+ * to that function as possible. However, since we are only invoked on
+ * queries immediately post-parse-analysis, we need not handle node types
+ * that only appear in planning.
+ *
+ * Note: the reason we don't simply use expression_tree_walker() is that the
+ * point of that function is to support tree walkers that don't care about
+ * most tree node types, but here we care about all types. We should complain
+ * about any unrecognized node type.
+ */
+static void
+JumbleExpr(pgssJumbleState *jstate, Node *node)
+{
+ ListCell *temp;
+
+ if (node == NULL)
+ return;
+
+ /* Guard against stack overflow due to overly complex expressions */
+ check_stack_depth();
+
+ /*
+ * We always emit the node's NodeTag, then any additional fields that are
+ * considered significant, and then we recurse to any child nodes.
+ */
+ APP_JUMB(node->type);
+
+ switch (nodeTag(node))
+ {
+ case T_Var:
+ {
+ Var *var = (Var *) node;
+
+ APP_JUMB(var->varno);
+ APP_JUMB(var->varattno);
+ APP_JUMB(var->varlevelsup);
+ }
+ break;
+ case T_Const:
+ {
+ Const *c = (Const *) node;
+
+ /* We jumble only the constant's type, not its value */
+ APP_JUMB(c->consttype);
+ /* Also, record its parse location for query normalization */
+ RecordConstLocation(jstate, c->location);
+ }
+ break;
+ case T_Param:
+ {
+ Param *p = (Param *) node;
+
+ APP_JUMB(p->paramkind);
+ APP_JUMB(p->paramid);
+ APP_JUMB(p->paramtype);
+ /* Also, track the highest external Param id */
+ if (p->paramkind == PARAM_EXTERN &&
+ p->paramid > jstate->highest_extern_param_id)
+ jstate->highest_extern_param_id = p->paramid;
+ }
+ break;
+ case T_Aggref:
+ {
+ Aggref *expr = (Aggref *) node;
+
+ APP_JUMB(expr->aggfnoid);
+ JumbleExpr(jstate, (Node *) expr->aggdirectargs);
+ JumbleExpr(jstate, (Node *) expr->args);
+ JumbleExpr(jstate, (Node *) expr->aggorder);
+ JumbleExpr(jstate, (Node *) expr->aggdistinct);
+ JumbleExpr(jstate, (Node *) expr->aggfilter);
+ }
+ break;
+ case T_GroupingFunc:
+ {
+ GroupingFunc *grpnode = (GroupingFunc *) node;
+
+ JumbleExpr(jstate, (Node *) grpnode->refs);
+ }
+ break;
+ case T_WindowFunc:
+ {
+ WindowFunc *expr = (WindowFunc *) node;
+
+ APP_JUMB(expr->winfnoid);
+ APP_JUMB(expr->winref);
+ JumbleExpr(jstate, (Node *) expr->args);
+ JumbleExpr(jstate, (Node *) expr->aggfilter);
+ }
+ break;
+ case T_SubscriptingRef:
+ {
+ SubscriptingRef *sbsref = (SubscriptingRef *) node;
+
+ JumbleExpr(jstate, (Node *) sbsref->refupperindexpr);
+ JumbleExpr(jstate, (Node *) sbsref->reflowerindexpr);
+ JumbleExpr(jstate, (Node *) sbsref->refexpr);
+ JumbleExpr(jstate, (Node *) sbsref->refassgnexpr);
+ }
+ break;
+ case T_FuncExpr:
+ {
+ FuncExpr *expr = (FuncExpr *) node;
+
+ APP_JUMB(expr->funcid);
+ JumbleExpr(jstate, (Node *) expr->args);
+ }
+ break;
+ case T_NamedArgExpr:
+ {
+ NamedArgExpr *nae = (NamedArgExpr *) node;
+
+ APP_JUMB(nae->argnumber);
+ JumbleExpr(jstate, (Node *) nae->arg);
+ }
+ break;
+ case T_OpExpr:
+ case T_DistinctExpr: /* struct-equivalent to OpExpr */
+ case T_NullIfExpr: /* struct-equivalent to OpExpr */
+ {
+ OpExpr *expr = (OpExpr *) node;
+
+ APP_JUMB(expr->opno);
+ JumbleExpr(jstate, (Node *) expr->args);
+ }
+ break;
+ case T_ScalarArrayOpExpr:
+ {
+ ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) node;
+
+ APP_JUMB(expr->opno);
+ APP_JUMB(expr->useOr);
+ JumbleExpr(jstate, (Node *) expr->args);
+ }
+ break;
+ case T_BoolExpr:
+ {
+ BoolExpr *expr = (BoolExpr *) node;
+
+ APP_JUMB(expr->boolop);
+ JumbleExpr(jstate, (Node *) expr->args);
+ }
+ break;
+ case T_SubLink:
+ {
+ SubLink *sublink = (SubLink *) node;
+
+ APP_JUMB(sublink->subLinkType);
+ APP_JUMB(sublink->subLinkId);
+ JumbleExpr(jstate, (Node *) sublink->testexpr);
+ JumbleQuery(jstate, castNode(Query, sublink->subselect));
+ }
+ break;
+ case T_FieldSelect:
+ {
+ FieldSelect *fs = (FieldSelect *) node;
+
+ APP_JUMB(fs->fieldnum);
+ JumbleExpr(jstate, (Node *) fs->arg);
+ }
+ break;
+ case T_FieldStore:
+ {
+ FieldStore *fstore = (FieldStore *) node;
+
+ JumbleExpr(jstate, (Node *) fstore->arg);
+ JumbleExpr(jstate, (Node *) fstore->newvals);
+ }
+ break;
+ case T_RelabelType:
+ {
+ RelabelType *rt = (RelabelType *) node;
+
+ APP_JUMB(rt->resulttype);
+ JumbleExpr(jstate, (Node *) rt->arg);
+ }
+ break;
+ case T_CoerceViaIO:
+ {
+ CoerceViaIO *cio = (CoerceViaIO *) node;
+
+ APP_JUMB(cio->resulttype);
+ JumbleExpr(jstate, (Node *) cio->arg);
+ }
+ break;
+ case T_ArrayCoerceExpr:
+ {
+ ArrayCoerceExpr *acexpr = (ArrayCoerceExpr *) node;
+
+ APP_JUMB(acexpr->resulttype);
+ JumbleExpr(jstate, (Node *) acexpr->arg);
+ JumbleExpr(jstate, (Node *) acexpr->elemexpr);
+ }
+ break;
+ case T_ConvertRowtypeExpr:
+ {
+ ConvertRowtypeExpr *crexpr = (ConvertRowtypeExpr *) node;
+
+ APP_JUMB(crexpr->resulttype);
+ JumbleExpr(jstate, (Node *) crexpr->arg);
+ }
+ break;
+ case T_CollateExpr:
+ {
+ CollateExpr *ce = (CollateExpr *) node;
+
+ APP_JUMB(ce->collOid);
+ JumbleExpr(jstate, (Node *) ce->arg);
+ }
+ break;
+ case T_CaseExpr:
+ {
+ CaseExpr *caseexpr = (CaseExpr *) node;
+
+ JumbleExpr(jstate, (Node *) caseexpr->arg);
+ foreach(temp, caseexpr->args)
+ {
+ CaseWhen *when = lfirst_node(CaseWhen, temp);
+
+ JumbleExpr(jstate, (Node *) when->expr);
+ JumbleExpr(jstate, (Node *) when->result);
+ }
+ JumbleExpr(jstate, (Node *) caseexpr->defresult);
+ }
+ break;
+ case T_CaseTestExpr:
+ {
+ CaseTestExpr *ct = (CaseTestExpr *) node;
+
+ APP_JUMB(ct->typeId);
+ }
+ break;
+ case T_ArrayExpr:
+ JumbleExpr(jstate, (Node *) ((ArrayExpr *) node)->elements);
+ break;
+ case T_RowExpr:
+ JumbleExpr(jstate, (Node *) ((RowExpr *) node)->args);
+ break;
+ case T_RowCompareExpr:
+ {
+ RowCompareExpr *rcexpr = (RowCompareExpr *) node;
+
+ APP_JUMB(rcexpr->rctype);
+ JumbleExpr(jstate, (Node *) rcexpr->largs);
+ JumbleExpr(jstate, (Node *) rcexpr->rargs);
+ }
+ break;
+ case T_CoalesceExpr:
+ JumbleExpr(jstate, (Node *) ((CoalesceExpr *) node)->args);
+ break;
+ case T_MinMaxExpr:
+ {
+ MinMaxExpr *mmexpr = (MinMaxExpr *) node;
+
+ APP_JUMB(mmexpr->op);
+ JumbleExpr(jstate, (Node *) mmexpr->args);
+ }
+ break;
+ case T_SQLValueFunction:
+ {
+ SQLValueFunction *svf = (SQLValueFunction *) node;
+
+ APP_JUMB(svf->op);
+ /* type is fully determined by op */
+ APP_JUMB(svf->typmod);
+ }
+ break;
+ case T_XmlExpr:
+ {
+ XmlExpr *xexpr = (XmlExpr *) node;
+
+ APP_JUMB(xexpr->op);
+ JumbleExpr(jstate, (Node *) xexpr->named_args);
+ JumbleExpr(jstate, (Node *) xexpr->args);
+ }
+ break;
+ case T_NullTest:
+ {
+ NullTest *nt = (NullTest *) node;
+
+ APP_JUMB(nt->nulltesttype);
+ JumbleExpr(jstate, (Node *) nt->arg);
+ }
+ break;
+ case T_BooleanTest:
+ {
+ BooleanTest *bt = (BooleanTest *) node;
+
+ APP_JUMB(bt->booltesttype);
+ JumbleExpr(jstate, (Node *) bt->arg);
+ }
+ break;
+ case T_CoerceToDomain:
+ {
+ CoerceToDomain *cd = (CoerceToDomain *) node;
+
+ APP_JUMB(cd->resulttype);
+ JumbleExpr(jstate, (Node *) cd->arg);
+ }
+ break;
+ case T_CoerceToDomainValue:
+ {
+ CoerceToDomainValue *cdv = (CoerceToDomainValue *) node;
+
+ APP_JUMB(cdv->typeId);
+ }
+ break;
+ case T_SetToDefault:
+ {
+ SetToDefault *sd = (SetToDefault *) node;
+
+ APP_JUMB(sd->typeId);
+ }
+ break;
+ case T_CurrentOfExpr:
+ {
+ CurrentOfExpr *ce = (CurrentOfExpr *) node;
+
+ APP_JUMB(ce->cvarno);
+ if (ce->cursor_name)
+ APP_JUMB_STRING(ce->cursor_name);
+ APP_JUMB(ce->cursor_param);
+ }
+ break;
+ case T_NextValueExpr:
+ {
+ NextValueExpr *nve = (NextValueExpr *) node;
+
+ APP_JUMB(nve->seqid);
+ APP_JUMB(nve->typeId);
+ }
+ break;
+ case T_InferenceElem:
+ {
+ InferenceElem *ie = (InferenceElem *) node;
+
+ APP_JUMB(ie->infercollid);
+ APP_JUMB(ie->inferopclass);
+ JumbleExpr(jstate, ie->expr);
+ }
+ break;
+ case T_TargetEntry:
+ {
+ TargetEntry *tle = (TargetEntry *) node;
+
+ APP_JUMB(tle->resno);
+ APP_JUMB(tle->ressortgroupref);
+ JumbleExpr(jstate, (Node *) tle->expr);
+ }
+ break;
+ case T_RangeTblRef:
+ {
+ RangeTblRef *rtr = (RangeTblRef *) node;
+
+ APP_JUMB(rtr->rtindex);
+ }
+ break;
+ case T_JoinExpr:
+ {
+ JoinExpr *join = (JoinExpr *) node;
+
+ APP_JUMB(join->jointype);
+ APP_JUMB(join->isNatural);
+ APP_JUMB(join->rtindex);
+ JumbleExpr(jstate, join->larg);
+ JumbleExpr(jstate, join->rarg);
+ JumbleExpr(jstate, join->quals);
+ }
+ break;
+ case T_FromExpr:
+ {
+ FromExpr *from = (FromExpr *) node;
+
+ JumbleExpr(jstate, (Node *) from->fromlist);
+ JumbleExpr(jstate, from->quals);
+ }
+ break;
+ case T_OnConflictExpr:
+ {
+ OnConflictExpr *conf = (OnConflictExpr *) node;
+
+ APP_JUMB(conf->action);
+ JumbleExpr(jstate, (Node *) conf->arbiterElems);
+ JumbleExpr(jstate, conf->arbiterWhere);
+ JumbleExpr(jstate, (Node *) conf->onConflictSet);
+ JumbleExpr(jstate, conf->onConflictWhere);
+ APP_JUMB(conf->constraint);
+ APP_JUMB(conf->exclRelIndex);
+ JumbleExpr(jstate, (Node *) conf->exclRelTlist);
+ }
+ break;
+ case T_List:
+ foreach(temp, (List *) node)
+ {
+ JumbleExpr(jstate, (Node *) lfirst(temp));
+ }
+ break;
+ case T_IntList:
+ foreach(temp, (List *) node)
+ {
+ APP_JUMB(lfirst_int(temp));
+ }
+ break;
+ case T_SortGroupClause:
+ {
+ SortGroupClause *sgc = (SortGroupClause *) node;
+
+ APP_JUMB(sgc->tleSortGroupRef);
+ APP_JUMB(sgc->eqop);
+ APP_JUMB(sgc->sortop);
+ APP_JUMB(sgc->nulls_first);
+ }
+ break;
+ case T_GroupingSet:
+ {
+ GroupingSet *gsnode = (GroupingSet *) node;
+
+ JumbleExpr(jstate, (Node *) gsnode->content);
+ }
+ break;
+ case T_WindowClause:
+ {
+ WindowClause *wc = (WindowClause *) node;
+
+ APP_JUMB(wc->winref);
+ APP_JUMB(wc->frameOptions);
+ JumbleExpr(jstate, (Node *) wc->partitionClause);
+ JumbleExpr(jstate, (Node *) wc->orderClause);
+ JumbleExpr(jstate, wc->startOffset);
+ JumbleExpr(jstate, wc->endOffset);
+ }
+ break;
+ case T_CommonTableExpr:
+ {
+ CommonTableExpr *cte = (CommonTableExpr *) node;
+
+ /* we store the string name because RTE_CTE RTEs need it */
+ APP_JUMB_STRING(cte->ctename);
+ APP_JUMB(cte->ctematerialized);
+ JumbleQuery(jstate, castNode(Query, cte->ctequery));
+ }
+ break;
+ case T_SetOperationStmt:
+ {
+ SetOperationStmt *setop = (SetOperationStmt *) node;
+
+ APP_JUMB(setop->op);
+ APP_JUMB(setop->all);
+ JumbleExpr(jstate, setop->larg);
+ JumbleExpr(jstate, setop->rarg);
+ }
+ break;
+ case T_RangeTblFunction:
+ {
+ RangeTblFunction *rtfunc = (RangeTblFunction *) node;
+
+ JumbleExpr(jstate, rtfunc->funcexpr);
+ }
+ break;
+ case T_TableFunc:
+ {
+ TableFunc *tablefunc = (TableFunc *) node;
+
+ JumbleExpr(jstate, tablefunc->docexpr);
+ JumbleExpr(jstate, tablefunc->rowexpr);
+ JumbleExpr(jstate, (Node *) tablefunc->colexprs);
+ }
+ break;
+ case T_TableSampleClause:
+ {
+ TableSampleClause *tsc = (TableSampleClause *) node;
+
+ APP_JUMB(tsc->tsmhandler);
+ JumbleExpr(jstate, (Node *) tsc->args);
+ JumbleExpr(jstate, (Node *) tsc->repeatable);
+ }
+ break;
+ default:
+ /* Only a warning, since we can stumble along anyway */
+ elog(WARNING, "unrecognized node type: %d",
+ (int) nodeTag(node));
+ break;
+ }
+}
+
+/*
+ * Record location of constant within query string of query tree
+ * that is currently being walked.
+ */
+static void
+RecordConstLocation(pgssJumbleState *jstate, int location)
+{
+ /* -1 indicates unknown or undefined location */
+ if (location >= 0)
+ {
+ /* enlarge array if needed */
+ if (jstate->clocations_count >= jstate->clocations_buf_size)
+ {
+ jstate->clocations_buf_size *= 2;
+ jstate->clocations = (pgssLocationLen *)
+ repalloc(jstate->clocations,
+ jstate->clocations_buf_size *
+ sizeof(pgssLocationLen));
+ }
+ jstate->clocations[jstate->clocations_count].location = location;
+ /* initialize lengths to -1 to simplify fill_in_constant_lengths */
+ jstate->clocations[jstate->clocations_count].length = -1;
+ jstate->clocations_count++;
+ }
+}
+
+/*
+ * Generate a normalized version of the query string that will be used to
+ * represent all similar queries.
+ *
+ * Note that the normalized representation may well vary depending on
+ * just which "equivalent" query is used to create the hashtable entry.
+ * We assume this is OK.
+ *
+ * If query_loc > 0, then "query" has been advanced by that much compared to
+ * the original string start, so we need to translate the provided locations
+ * to compensate. (This lets us avoid re-scanning statements before the one
+ * of interest, so it's worth doing.)
+ *
+ * *query_len_p contains the input string length, and is updated with
+ * the result string length on exit. The resulting string might be longer
+ * or shorter depending on what happens with replacement of constants.
+ *
+ * Returns a palloc'd string.
+ */
+static char *
+generate_normalized_query(pgssJumbleState *jstate, const char *query,
+ int query_loc, int *query_len_p, int encoding)
+{
+ char *norm_query;
+ int query_len = *query_len_p;
+ int i,
+ norm_query_buflen, /* Space allowed for norm_query */
+ len_to_wrt, /* Length (in bytes) to write */
+ quer_loc = 0, /* Source query byte location */
+ n_quer_loc = 0, /* Normalized query byte location */
+ last_off = 0, /* Offset from start for previous tok */
+ last_tok_len = 0; /* Length (in bytes) of that tok */
+
+ /*
+ * Get constants' lengths (core system only gives us locations). Note
+ * this also ensures the items are sorted by location.
+ */
+ fill_in_constant_lengths(jstate, query, query_loc);
+
+ /*
+ * Allow for $n symbols to be longer than the constants they replace.
+ * Constants must take at least one byte in text form, while a $n symbol
+ * certainly isn't more than 11 bytes, even if n reaches INT_MAX. We
+ * could refine that limit based on the max value of n for the current
+ * query, but it hardly seems worth any extra effort to do so.
+ */
+ norm_query_buflen = query_len + jstate->clocations_count * 10;
+
+ /* Allocate result buffer */
+ norm_query = palloc(norm_query_buflen + 1);
+
+ for (i = 0; i < jstate->clocations_count; i++)
+ {
+ int off, /* Offset from start for cur tok */
+ tok_len; /* Length (in bytes) of that tok */
+
+ off = jstate->clocations[i].location;
+ /* Adjust recorded location if we're dealing with partial string */
+ off -= query_loc;
+
+ tok_len = jstate->clocations[i].length;
+
+ if (tok_len < 0)
+ continue; /* ignore any duplicates */
+
+ /* Copy next chunk (what precedes the next constant) */
+ len_to_wrt = off - last_off;
+ len_to_wrt -= last_tok_len;
+
+ Assert(len_to_wrt >= 0);
+ memcpy(norm_query + n_quer_loc, query + quer_loc, len_to_wrt);
+ n_quer_loc += len_to_wrt;
+
+ /* And insert a param symbol in place of the constant token */
+ n_quer_loc += sprintf(norm_query + n_quer_loc, "$%d",
+ i + 1 + jstate->highest_extern_param_id);
+
+ quer_loc = off + tok_len;
+ last_off = off;
+ last_tok_len = tok_len;
+ }
+
+ /*
+ * We've copied up until the last ignorable constant. Copy over the
+ * remaining bytes of the original query string.
+ */
+ len_to_wrt = query_len - quer_loc;
+
+ Assert(len_to_wrt >= 0);
+ memcpy(norm_query + n_quer_loc, query + quer_loc, len_to_wrt);
+ n_quer_loc += len_to_wrt;
+
+ Assert(n_quer_loc <= norm_query_buflen);
+ norm_query[n_quer_loc] = '\0';
+
+ *query_len_p = n_quer_loc;
+ return norm_query;
+}
+
+/*
+ * Given a valid SQL string and an array of constant-location records,
+ * fill in the textual lengths of those constants.
+ *
+ * The constants may use any allowed constant syntax, such as float literals,
+ * bit-strings, single-quoted strings and dollar-quoted strings. This is
+ * accomplished by using the public API for the core scanner.
+ *
+ * It is the caller's job to ensure that the string is a valid SQL statement
+ * with constants at the indicated locations. Since in practice the string
+ * has already been parsed, and the locations that the caller provides will
+ * have originated from within the authoritative parser, this should not be
+ * a problem.
+ *
+ * Duplicate constant pointers are possible, and will have their lengths
+ * marked as '-1', so that they are later ignored. (Actually, we assume the
+ * lengths were initialized as -1 to start with, and don't change them here.)
+ *
+ * If query_loc > 0, then "query" has been advanced by that much compared to
+ * the original string start, so we need to translate the provided locations
+ * to compensate. (This lets us avoid re-scanning statements before the one
+ * of interest, so it's worth doing.)
+ *
+ * N.B. There is an assumption that a '-' character at a Const location begins
+ * a negative numeric constant. This precludes there ever being another
+ * reason for a constant to start with a '-'.
+ */
+static void
+fill_in_constant_lengths(pgssJumbleState *jstate, const char *query,
+ int query_loc)
+{
+ pgssLocationLen *locs;
+ core_yyscan_t yyscanner;
+ core_yy_extra_type yyextra;
+ core_YYSTYPE yylval;
+ YYLTYPE yylloc;
+ int last_loc = -1;
+ int i;
+
+ /*
+ * Sort the records by location so that we can process them in order while
+ * scanning the query text.
+ */
+ if (jstate->clocations_count > 1)
+ qsort(jstate->clocations, jstate->clocations_count,
+ sizeof(pgssLocationLen), comp_location);
+ locs = jstate->clocations;
+
+ /* initialize the flex scanner --- should match raw_parser() */
+ yyscanner = scanner_init(query,
+ &yyextra,
+ &ScanKeywords,
+ ScanKeywordTokens);
+
+ /* we don't want to re-emit any escape string warnings */
+ yyextra.escape_string_warning = false;
+
+ /* Search for each constant, in sequence */
+ for (i = 0; i < jstate->clocations_count; i++)
+ {
+ int loc = locs[i].location;
+ int tok;
+
+ /* Adjust recorded location if we're dealing with partial string */
+ loc -= query_loc;
+
+ Assert(loc >= 0);
+
+ if (loc <= last_loc)
+ continue; /* Duplicate constant, ignore */
+
+ /* Lex tokens until we find the desired constant */
+ for (;;)
+ {
+ tok = core_yylex(&yylval, &yylloc, yyscanner);
+
+ /* We should not hit end-of-string, but if we do, behave sanely */
+ if (tok == 0)
+ break; /* out of inner for-loop */
+
+ /*
+ * We should find the token position exactly, but if we somehow
+ * run past it, work with that.
+ */
+ if (yylloc >= loc)
+ {
+ if (query[loc] == '-')
+ {
+ /*
+ * It's a negative value - this is the one and only case
+ * where we replace more than a single token.
+ *
+ * Do not compensate for the core system's special-case
+ * adjustment of location to that of the leading '-'
+ * operator in the event of a negative constant. It is
+ * also useful for our purposes to start from the minus
+ * symbol. In this way, queries like "select * from foo
+ * where bar = 1" and "select * from foo where bar = -2"
+ * will have identical normalized query strings.
+ */
+ tok = core_yylex(&yylval, &yylloc, yyscanner);
+ if (tok == 0)
+ break; /* out of inner for-loop */
+ }
+
+ /*
+ * We now rely on the assumption that flex has placed a zero
+ * byte after the text of the current token in scanbuf.
+ */
+ locs[i].length = strlen(yyextra.scanbuf + loc);
+ break; /* out of inner for-loop */
+ }
+ }
+
+ /* If we hit end-of-string, give up, leaving remaining lengths -1 */
+ if (tok == 0)
+ break;
+
+ last_loc = loc;
+ }
+
+ scanner_finish(yyscanner);
+}
+
+/*
+ * comp_location: comparator for qsorting pgssLocationLen structs by location
+ */
+static int
+comp_location(const void *a, const void *b)
+{
+ int l = ((const pgssLocationLen *) a)->location;
+ int r = ((const pgssLocationLen *) b)->location;
+
+ if (l < r)
+ return -1;
+ else if (l > r)
+ return +1;
+ else
+ return 0;
+}
diff --git a/contrib/pg_stat_statements/pg_stat_statements.conf b/contrib/pg_stat_statements/pg_stat_statements.conf
new file mode 100644
index 0000000..13346e2
--- /dev/null
+++ b/contrib/pg_stat_statements/pg_stat_statements.conf
@@ -0,0 +1 @@
+shared_preload_libraries = 'pg_stat_statements'
diff --git a/contrib/pg_stat_statements/pg_stat_statements.control b/contrib/pg_stat_statements/pg_stat_statements.control
new file mode 100644
index 0000000..65b18b1
--- /dev/null
+++ b/contrib/pg_stat_statements/pg_stat_statements.control
@@ -0,0 +1,5 @@
+# pg_stat_statements extension
+comment = 'track planning and execution statistics of all SQL statements executed'
+default_version = '1.8'
+module_pathname = '$libdir/pg_stat_statements'
+relocatable = true
diff --git a/contrib/pg_stat_statements/sql/pg_stat_statements.sql b/contrib/pg_stat_statements/sql/pg_stat_statements.sql
new file mode 100644
index 0000000..3344599
--- /dev/null
+++ b/contrib/pg_stat_statements/sql/pg_stat_statements.sql
@@ -0,0 +1,338 @@
+CREATE EXTENSION pg_stat_statements;
+
+--
+-- simple and compound statements
+--
+SET pg_stat_statements.track_utility = FALSE;
+SET pg_stat_statements.track_planning = TRUE;
+SELECT pg_stat_statements_reset();
+
+SELECT 1 AS "int";
+
+SELECT 'hello'
+ -- multiline
+ AS "text";
+
+SELECT 'world' AS "text";
+
+-- transaction
+BEGIN;
+SELECT 1 AS "int";
+SELECT 'hello' AS "text";
+COMMIT;
+
+-- compound transaction
+BEGIN \;
+SELECT 2.0 AS "float" \;
+SELECT 'world' AS "text" \;
+COMMIT;
+
+-- compound with empty statements and spurious leading spacing
+\;\; SELECT 3 + 3 \;\;\; SELECT ' ' || ' !' \;\; SELECT 1 + 4 \;;
+
+-- non ;-terminated statements
+SELECT 1 + 1 + 1 AS "add" \gset
+SELECT :add + 1 + 1 AS "add" \;
+SELECT :add + 1 + 1 AS "add" \gset
+
+-- set operator
+SELECT 1 AS i UNION SELECT 2 ORDER BY i;
+
+-- ? operator
+select '{"a":1, "b":2}'::jsonb ? 'b';
+
+-- cte
+WITH t(f) AS (
+ VALUES (1.0), (2.0)
+)
+ SELECT f FROM t ORDER BY f;
+
+-- prepared statement with parameter
+PREPARE pgss_test (int) AS SELECT $1, 'test' LIMIT 1;
+EXECUTE pgss_test(1);
+DEALLOCATE pgss_test;
+
+SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C";
+
+--
+-- CRUD: INSERT SELECT UPDATE DELETE on test table
+--
+SELECT pg_stat_statements_reset();
+
+-- utility "create table" should not be shown
+CREATE TEMP TABLE test (a int, b char(20));
+
+INSERT INTO test VALUES(generate_series(1, 10), 'aaa');
+UPDATE test SET b = 'bbb' WHERE a > 7;
+DELETE FROM test WHERE a > 9;
+
+-- explicit transaction
+BEGIN;
+UPDATE test SET b = '111' WHERE a = 1 ;
+COMMIT;
+
+BEGIN \;
+UPDATE test SET b = '222' WHERE a = 2 \;
+COMMIT ;
+
+UPDATE test SET b = '333' WHERE a = 3 \;
+UPDATE test SET b = '444' WHERE a = 4 ;
+
+BEGIN \;
+UPDATE test SET b = '555' WHERE a = 5 \;
+UPDATE test SET b = '666' WHERE a = 6 \;
+COMMIT ;
+
+-- many INSERT values
+INSERT INTO test (a, b) VALUES (1, 'a'), (2, 'b'), (3, 'c');
+
+-- SELECT with constants
+SELECT * FROM test WHERE a > 5 ORDER BY a ;
+
+SELECT *
+ FROM test
+ WHERE a > 9
+ ORDER BY a ;
+
+-- SELECT without constants
+SELECT * FROM test ORDER BY a;
+
+-- SELECT with IN clause
+SELECT * FROM test WHERE a IN (1, 2, 3, 4, 5);
+
+SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C";
+
+--
+-- INSERT, UPDATE, DELETE on test table to validate WAL generation metrics
+--
+SELECT pg_stat_statements_reset();
+
+-- utility "create table" should not be shown
+CREATE TABLE pgss_test (a int, b char(20));
+
+INSERT INTO pgss_test VALUES(generate_series(1, 10), 'aaa');
+UPDATE pgss_test SET b = 'bbb' WHERE a > 7;
+DELETE FROM pgss_test WHERE a > 9;
+-- DROP test table
+SET pg_stat_statements.track_utility = TRUE;
+DROP TABLE pgss_test;
+SET pg_stat_statements.track_utility = FALSE;
+
+-- Check WAL is generated for the above statements
+SELECT query, calls, rows,
+wal_bytes > 0 as wal_bytes_generated,
+wal_records > 0 as wal_records_generated,
+wal_records = rows as wal_records_as_rows
+FROM pg_stat_statements ORDER BY query COLLATE "C";
+
+--
+-- pg_stat_statements.track = none
+--
+SET pg_stat_statements.track = 'none';
+SELECT pg_stat_statements_reset();
+
+SELECT 1 AS "one";
+SELECT 1 + 1 AS "two";
+
+SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C";
+
+--
+-- pg_stat_statements.track = top
+--
+SET pg_stat_statements.track = 'top';
+SELECT pg_stat_statements_reset();
+
+DO LANGUAGE plpgsql $$
+BEGIN
+ -- this is a SELECT
+ PERFORM 'hello world'::TEXT;
+END;
+$$;
+
+-- PL/pgSQL function
+CREATE FUNCTION PLUS_TWO(i INTEGER) RETURNS INTEGER AS $$
+DECLARE
+ r INTEGER;
+BEGIN
+ SELECT (i + 1 + 1.0)::INTEGER INTO r;
+ RETURN r;
+END; $$ LANGUAGE plpgsql;
+
+SELECT PLUS_TWO(3);
+SELECT PLUS_TWO(7);
+
+-- SQL function --- use LIMIT to keep it from being inlined
+CREATE FUNCTION PLUS_ONE(i INTEGER) RETURNS INTEGER AS
+$$ SELECT (i + 1.0)::INTEGER LIMIT 1 $$ LANGUAGE SQL;
+
+SELECT PLUS_ONE(8);
+SELECT PLUS_ONE(10);
+
+SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C";
+
+--
+-- pg_stat_statements.track = all
+--
+SET pg_stat_statements.track = 'all';
+SELECT pg_stat_statements_reset();
+
+-- we drop and recreate the functions to avoid any caching funnies
+DROP FUNCTION PLUS_ONE(INTEGER);
+DROP FUNCTION PLUS_TWO(INTEGER);
+
+-- PL/pgSQL function
+CREATE FUNCTION PLUS_TWO(i INTEGER) RETURNS INTEGER AS $$
+DECLARE
+ r INTEGER;
+BEGIN
+ SELECT (i + 1 + 1.0)::INTEGER INTO r;
+ RETURN r;
+END; $$ LANGUAGE plpgsql;
+
+SELECT PLUS_TWO(-1);
+SELECT PLUS_TWO(2);
+
+-- SQL function --- use LIMIT to keep it from being inlined
+CREATE FUNCTION PLUS_ONE(i INTEGER) RETURNS INTEGER AS
+$$ SELECT (i + 1.0)::INTEGER LIMIT 1 $$ LANGUAGE SQL;
+
+SELECT PLUS_ONE(3);
+SELECT PLUS_ONE(1);
+
+SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C";
+
+--
+-- queries with locking clauses
+--
+CREATE TABLE pgss_a (id integer PRIMARY KEY);
+CREATE TABLE pgss_b (id integer PRIMARY KEY, a_id integer REFERENCES pgss_a);
+
+SELECT pg_stat_statements_reset();
+
+-- control query
+SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id;
+
+-- test range tables
+SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR UPDATE;
+SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR UPDATE OF pgss_a;
+SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR UPDATE OF pgss_b;
+SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR UPDATE OF pgss_a, pgss_b; -- matches plain "FOR UPDATE"
+SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR UPDATE OF pgss_b, pgss_a;
+
+-- test strengths
+SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR NO KEY UPDATE;
+SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR SHARE;
+SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR KEY SHARE;
+
+-- test wait policies
+SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR UPDATE NOWAIT;
+SELECT * FROM pgss_a JOIN pgss_b ON pgss_b.a_id = pgss_a.id FOR UPDATE SKIP LOCKED;
+
+SELECT calls, query FROM pg_stat_statements ORDER BY query COLLATE "C";
+
+DROP TABLE pgss_a, pgss_b CASCADE;
+
+--
+-- utility commands
+--
+SET pg_stat_statements.track_utility = TRUE;
+SELECT pg_stat_statements_reset();
+
+SELECT 1;
+CREATE INDEX test_b ON test(b);
+DROP TABLE test \;
+DROP TABLE IF EXISTS test \;
+DROP FUNCTION PLUS_ONE(INTEGER);
+DROP TABLE IF EXISTS test \;
+DROP TABLE IF EXISTS test \;
+DROP FUNCTION IF EXISTS PLUS_ONE(INTEGER);
+DROP FUNCTION PLUS_TWO(INTEGER);
+
+SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C";
+
+--
+-- Track user activity and reset them
+--
+SELECT pg_stat_statements_reset();
+CREATE ROLE regress_stats_user1;
+CREATE ROLE regress_stats_user2;
+
+SET ROLE regress_stats_user1;
+
+SELECT 1 AS "ONE";
+SELECT 1+1 AS "TWO";
+
+RESET ROLE;
+SET ROLE regress_stats_user2;
+
+SELECT 1 AS "ONE";
+SELECT 1+1 AS "TWO";
+
+RESET ROLE;
+SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C";
+
+--
+-- Don't reset anything if any of the parameter is NULL
+--
+SELECT pg_stat_statements_reset(NULL);
+SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C";
+
+--
+-- remove query ('SELECT $1+$2 AS "TWO"') executed by regress_stats_user2
+-- in the current_database
+--
+SELECT pg_stat_statements_reset(
+ (SELECT r.oid FROM pg_roles AS r WHERE r.rolname = 'regress_stats_user2'),
+ (SELECT d.oid FROM pg_database As d where datname = current_database()),
+ (SELECT s.queryid FROM pg_stat_statements AS s
+ WHERE s.query = 'SELECT $1+$2 AS "TWO"' LIMIT 1));
+SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C";
+
+--
+-- remove query ('SELECT $1 AS "ONE"') executed by two users
+--
+SELECT pg_stat_statements_reset(0,0,s.queryid)
+ FROM pg_stat_statements AS s WHERE s.query = 'SELECT $1 AS "ONE"';
+SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C";
+
+--
+-- remove query of a user (regress_stats_user1)
+--
+SELECT pg_stat_statements_reset(r.oid)
+ FROM pg_roles AS r WHERE r.rolname = 'regress_stats_user1';
+SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C";
+
+--
+-- reset all
+--
+SELECT pg_stat_statements_reset(0,0,0);
+SELECT query, calls, rows FROM pg_stat_statements ORDER BY query COLLATE "C";
+
+--
+-- cleanup
+--
+DROP ROLE regress_stats_user1;
+DROP ROLE regress_stats_user2;
+
+--
+-- [re]plan counting
+--
+SELECT pg_stat_statements_reset();
+CREATE TABLE test ();
+PREPARE prep1 AS SELECT COUNT(*) FROM test;
+EXECUTE prep1;
+EXECUTE prep1;
+EXECUTE prep1;
+ALTER TABLE test ADD COLUMN x int;
+EXECUTE prep1;
+SELECT 42;
+SELECT 42;
+SELECT 42;
+SELECT query, plans, calls, rows FROM pg_stat_statements
+ WHERE query NOT LIKE 'PREPARE%' ORDER BY query COLLATE "C";
+-- for the prepared statement we expect at least one replan, but cache
+-- invalidations could force more
+SELECT query, plans >= 2 AND plans <= calls AS plans_ok, calls, rows FROM pg_stat_statements
+ WHERE query LIKE 'PREPARE%' ORDER BY query COLLATE "C";
+
+DROP EXTENSION pg_stat_statements;