Compare commits
157 Commits
REL_18_BET
...
master
Author | SHA1 | Date | |
---|---|---|---|
|
84914e964b | ||
|
0f65f3eec4 | ||
|
f7b11414e9 | ||
|
f85f6ab051 | ||
|
dd2ce37927 | ||
|
e1458f2f1b | ||
|
b774ad4933 | ||
|
7c319f5491 | ||
|
361499538c | ||
|
137935bd11 | ||
|
8898082a5d | ||
|
7a6880fadc | ||
|
78bd364ee3 | ||
|
4c787a24e7 | ||
|
3feff3916e | ||
|
166b4f4560 | ||
|
7d4667c620 | ||
|
73e26cbeb5 | ||
|
37e5f0b61f | ||
|
1a857348e4 | ||
|
5b40feab59 | ||
|
a31767fc09 | ||
|
304862973e | ||
|
e6eed40e44 | ||
|
016e407f4b | ||
|
c37be39a74 | ||
|
54c6ea8c81 | ||
|
e6f98d8848 | ||
|
04acad82b0 | ||
|
cc733ed164 | ||
|
4b05ebf095 | ||
|
112e40b867 | ||
|
b87163e5f3 | ||
|
f9b1192190 | ||
|
30c15987d9 | ||
|
48814415d5 | ||
|
f777d77387 | ||
|
7f3381c7ee | ||
|
58fbfde152 | ||
|
0e164eb9f4 | ||
|
73bdcfab35 | ||
|
aa87f69c00 | ||
|
31a7e175fd | ||
|
fc32be3c94 | ||
|
32edf732e8 | ||
|
5231ed8262 | ||
|
c98975ba85 | ||
|
4672b62239 | ||
|
c6f7f11d8f | ||
|
e5a3c9d9b5 | ||
|
b006bcd531 | ||
|
e050af2868 | ||
|
706054b11b | ||
|
232d8caeaa | ||
|
d98cefe114 | ||
|
961553daf5 | ||
|
c3eda50b06 | ||
|
03c53a7314 | ||
|
470273da0f | ||
|
3c4d7557e0 | ||
|
c3623703f3 | ||
|
a1de1b0833 | ||
|
35a428f30b | ||
|
089f27cf8a | ||
|
bf6034d00d | ||
|
e5d64fd654 | ||
|
be86ca103a | ||
|
c861092b0e | ||
|
4fbb46f612 | ||
|
d46911e584 | ||
|
3e782ca322 | ||
|
08b8aa1748 | ||
|
34eb2a80d5 | ||
|
4c08ecd161 | ||
|
c53f3b9cc8 | ||
|
1f62dbf5f0 | ||
|
52a1df85f2 | ||
|
3bcb554fd2 | ||
|
47d90b741d | ||
|
44ce4e1593 | ||
|
a8f093234d | ||
|
cbc8fd0c9a | ||
|
1ca583f6c0 | ||
|
02502c1bca | ||
|
6aa33afe6d | ||
|
fb844b9f06 | ||
|
70a13c528b | ||
|
883339c170 | ||
|
7ddfac79f2 | ||
|
3b7140d27e | ||
|
b7ab88ddb1 | ||
|
cb1456423d | ||
|
d376ab570e | ||
|
f24605e2dc | ||
|
5d6eac80cd | ||
|
1722d5eb05 | ||
|
f3622b6476 | ||
|
3d0c3a418f | ||
|
0bd762e81f | ||
|
06450c7b8c | ||
|
a6060f1cbe | ||
|
cbf53e2b8a | ||
|
54675d8986 | ||
|
acad909321 | ||
|
29f7ce6fe7 | ||
|
ad5eaf390c | ||
|
f8db5c7a3f | ||
|
2c6469d4cd | ||
|
11b2dc3709 | ||
|
3d3a81fc24 | ||
|
12eee85e51 | ||
|
0d4dad200d | ||
|
5987553fde | ||
|
fe29b2a1da | ||
|
2c0ed86d39 | ||
|
6e289f2d5d | ||
|
8ede692de5 | ||
|
dbf42b84ac | ||
|
0588656366 | ||
|
dc9a2d54fd | ||
|
7b2ad43426 | ||
|
d4a7e4e179 | ||
|
4a4ee0c2c1 | ||
|
99ddf8615c | ||
|
c259ba881c | ||
|
36e5fda632 | ||
|
371f2db8b0 | ||
|
89372d0aaa | ||
|
95129709fd | ||
|
75d73331d0 | ||
|
0aaf69965d | ||
|
bc35adee8d | ||
|
b28c59a6cd | ||
|
965213d9c5 | ||
|
9d710a1ac0 | ||
|
8fcc648780 | ||
|
c06e909c26 | ||
|
773db22269 | ||
|
9fef27a83b | ||
|
f8d49aa130 | ||
|
69aca072eb | ||
|
3bd5271729 | ||
|
16bf24e0e4 | ||
|
5f4d98d4f3 | ||
|
7e25c9363a | ||
|
acea3fc49f | ||
|
ab42d643c1 | ||
|
2448c7a9e0 | ||
|
c0cf282551 | ||
|
c11bd5f500 | ||
|
a2c6d84acd | ||
|
c4c236ab5c | ||
|
09a47c68e2 | ||
|
b560ce7884 | ||
|
ada78f9bef | ||
|
575f6003ed | ||
|
45750c6cfe |
@ -14,6 +14,9 @@
|
||||
#
|
||||
# $ git log --pretty=format:"%H # %cd%n# %s" $PGINDENTGITHASH -1 --date=iso
|
||||
|
||||
4672b6223910687b2aab075bcd2dd54ce90d5171 # 2025-06-01 14:55:24 -0400
|
||||
# Run pgindent on the previous commit.
|
||||
|
||||
918e7287ed20eb1fe280ab6c4056ccf94dcd53a8 # 2025-04-30 19:18:30 +1200
|
||||
# Fix broken indentation
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
PostgreSQL Database Management System
|
||||
(formerly known as Postgres, then as Postgres95)
|
||||
(also known as Postgres, formerly known as Postgres95)
|
||||
|
||||
Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
|
||||
|
||||
|
15
configure
vendored
15
configure
vendored
@ -15616,7 +15616,7 @@ fi
|
||||
LIBS_including_readline="$LIBS"
|
||||
LIBS=`echo "$LIBS" | sed -e 's/-ledit//g' -e 's/-lreadline//g'`
|
||||
|
||||
for ac_func in backtrace_symbols copyfile copy_file_range elf_aux_info getauxval getifaddrs getpeerucred inet_pton kqueue localeconv_l mbstowcs_l memset_s posix_fallocate ppoll pthread_is_threaded_np setproctitle setproctitle_fast strsignal syncfs sync_file_range uselocale wcstombs_l
|
||||
for ac_func in backtrace_symbols copyfile copy_file_range elf_aux_info getauxval getifaddrs getpeerucred inet_pton kqueue localeconv_l mbstowcs_l posix_fallocate ppoll pthread_is_threaded_np setproctitle setproctitle_fast strsignal syncfs sync_file_range uselocale wcstombs_l
|
||||
do :
|
||||
as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh`
|
||||
ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var"
|
||||
@ -16192,6 +16192,19 @@ cat >>confdefs.h <<_ACEOF
|
||||
#define HAVE_DECL_STRCHRNUL $ac_have_decl
|
||||
_ACEOF
|
||||
|
||||
ac_fn_c_check_decl "$LINENO" "memset_s" "ac_cv_have_decl_memset_s" "#define __STDC_WANT_LIB_EXT1__ 1
|
||||
#include <string.h>
|
||||
"
|
||||
if test "x$ac_cv_have_decl_memset_s" = xyes; then :
|
||||
ac_have_decl=1
|
||||
else
|
||||
ac_have_decl=0
|
||||
fi
|
||||
|
||||
cat >>confdefs.h <<_ACEOF
|
||||
#define HAVE_DECL_MEMSET_S $ac_have_decl
|
||||
_ACEOF
|
||||
|
||||
|
||||
# This is probably only present on macOS, but may as well check always
|
||||
ac_fn_c_check_decl "$LINENO" "F_FULLFSYNC" "ac_cv_have_decl_F_FULLFSYNC" "#include <fcntl.h>
|
||||
|
@ -1792,7 +1792,6 @@ AC_CHECK_FUNCS(m4_normalize([
|
||||
kqueue
|
||||
localeconv_l
|
||||
mbstowcs_l
|
||||
memset_s
|
||||
posix_fallocate
|
||||
ppoll
|
||||
pthread_is_threaded_np
|
||||
@ -1838,6 +1837,8 @@ AC_CHECK_DECLS([strlcat, strlcpy, strnlen, strsep, timingsafe_bcmp])
|
||||
AC_CHECK_DECLS([preadv], [], [], [#include <sys/uio.h>])
|
||||
AC_CHECK_DECLS([pwritev], [], [], [#include <sys/uio.h>])
|
||||
AC_CHECK_DECLS([strchrnul], [], [], [#include <string.h>])
|
||||
AC_CHECK_DECLS([memset_s], [], [], [#define __STDC_WANT_LIB_EXT1__ 1
|
||||
#include <string.h>])
|
||||
|
||||
# This is probably only present on macOS, but may as well check always
|
||||
AC_CHECK_DECLS(F_FULLFSYNC, [], [], [#include <fcntl.h>])
|
||||
|
@ -81,7 +81,7 @@ static ExecutorRun_hook_type prev_ExecutorRun = NULL;
|
||||
static ExecutorFinish_hook_type prev_ExecutorFinish = NULL;
|
||||
static ExecutorEnd_hook_type prev_ExecutorEnd = NULL;
|
||||
|
||||
static bool explain_ExecutorStart(QueryDesc *queryDesc, int eflags);
|
||||
static void explain_ExecutorStart(QueryDesc *queryDesc, int eflags);
|
||||
static void explain_ExecutorRun(QueryDesc *queryDesc,
|
||||
ScanDirection direction,
|
||||
uint64 count);
|
||||
@ -261,11 +261,9 @@ _PG_init(void)
|
||||
/*
|
||||
* ExecutorStart hook: start up logging if needed
|
||||
*/
|
||||
static bool
|
||||
static void
|
||||
explain_ExecutorStart(QueryDesc *queryDesc, int eflags)
|
||||
{
|
||||
bool plan_valid;
|
||||
|
||||
/*
|
||||
* At the beginning of each top-level statement, decide whether we'll
|
||||
* sample this statement. If nested-statement explaining is enabled,
|
||||
@ -301,13 +299,9 @@ explain_ExecutorStart(QueryDesc *queryDesc, int eflags)
|
||||
}
|
||||
|
||||
if (prev_ExecutorStart)
|
||||
plan_valid = prev_ExecutorStart(queryDesc, eflags);
|
||||
prev_ExecutorStart(queryDesc, eflags);
|
||||
else
|
||||
plan_valid = standard_ExecutorStart(queryDesc, eflags);
|
||||
|
||||
/* The plan may have become invalid during standard_ExecutorStart() */
|
||||
if (!plan_valid)
|
||||
return false;
|
||||
standard_ExecutorStart(queryDesc, eflags);
|
||||
|
||||
if (auto_explain_enabled())
|
||||
{
|
||||
@ -325,8 +319,6 @@ explain_ExecutorStart(QueryDesc *queryDesc, int eflags)
|
||||
MemoryContextSwitchTo(oldcxt);
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -3,85 +3,85 @@
|
||||
-- complain if script is sourced in psql, rather than via CREATE EXTENSION
|
||||
\echo Use "ALTER EXTENSION btree_gist UPDATE TO '1.8'" to load this file. \quit
|
||||
|
||||
CREATE FUNCTION gist_stratnum_btree(int)
|
||||
CREATE FUNCTION gist_translate_cmptype_btree(int)
|
||||
RETURNS smallint
|
||||
AS 'MODULE_PATHNAME'
|
||||
LANGUAGE C IMMUTABLE PARALLEL SAFE STRICT;
|
||||
|
||||
ALTER OPERATOR FAMILY gist_oid_ops USING gist ADD
|
||||
FUNCTION 12 ("any", "any") gist_stratnum_btree (int) ;
|
||||
FUNCTION 12 ("any", "any") gist_translate_cmptype_btree (int) ;
|
||||
|
||||
ALTER OPERATOR FAMILY gist_int2_ops USING gist ADD
|
||||
FUNCTION 12 ("any", "any") gist_stratnum_btree (int) ;
|
||||
FUNCTION 12 ("any", "any") gist_translate_cmptype_btree (int) ;
|
||||
|
||||
ALTER OPERATOR FAMILY gist_int4_ops USING gist ADD
|
||||
FUNCTION 12 ("any", "any") gist_stratnum_btree (int) ;
|
||||
FUNCTION 12 ("any", "any") gist_translate_cmptype_btree (int) ;
|
||||
|
||||
ALTER OPERATOR FAMILY gist_int8_ops USING gist ADD
|
||||
FUNCTION 12 ("any", "any") gist_stratnum_btree (int) ;
|
||||
FUNCTION 12 ("any", "any") gist_translate_cmptype_btree (int) ;
|
||||
|
||||
ALTER OPERATOR FAMILY gist_float4_ops USING gist ADD
|
||||
FUNCTION 12 ("any", "any") gist_stratnum_btree (int) ;
|
||||
FUNCTION 12 ("any", "any") gist_translate_cmptype_btree (int) ;
|
||||
|
||||
ALTER OPERATOR FAMILY gist_float8_ops USING gist ADD
|
||||
FUNCTION 12 ("any", "any") gist_stratnum_btree (int) ;
|
||||
FUNCTION 12 ("any", "any") gist_translate_cmptype_btree (int) ;
|
||||
|
||||
ALTER OPERATOR FAMILY gist_timestamp_ops USING gist ADD
|
||||
FUNCTION 12 ("any", "any") gist_stratnum_btree (int) ;
|
||||
FUNCTION 12 ("any", "any") gist_translate_cmptype_btree (int) ;
|
||||
|
||||
ALTER OPERATOR FAMILY gist_timestamptz_ops USING gist ADD
|
||||
FUNCTION 12 ("any", "any") gist_stratnum_btree (int) ;
|
||||
FUNCTION 12 ("any", "any") gist_translate_cmptype_btree (int) ;
|
||||
|
||||
ALTER OPERATOR FAMILY gist_time_ops USING gist ADD
|
||||
FUNCTION 12 ("any", "any") gist_stratnum_btree (int) ;
|
||||
FUNCTION 12 ("any", "any") gist_translate_cmptype_btree (int) ;
|
||||
|
||||
ALTER OPERATOR FAMILY gist_date_ops USING gist ADD
|
||||
FUNCTION 12 ("any", "any") gist_stratnum_btree (int) ;
|
||||
FUNCTION 12 ("any", "any") gist_translate_cmptype_btree (int) ;
|
||||
|
||||
ALTER OPERATOR FAMILY gist_interval_ops USING gist ADD
|
||||
FUNCTION 12 ("any", "any") gist_stratnum_btree (int) ;
|
||||
FUNCTION 12 ("any", "any") gist_translate_cmptype_btree (int) ;
|
||||
|
||||
ALTER OPERATOR FAMILY gist_cash_ops USING gist ADD
|
||||
FUNCTION 12 ("any", "any") gist_stratnum_btree (int) ;
|
||||
FUNCTION 12 ("any", "any") gist_translate_cmptype_btree (int) ;
|
||||
|
||||
ALTER OPERATOR FAMILY gist_macaddr_ops USING gist ADD
|
||||
FUNCTION 12 ("any", "any") gist_stratnum_btree (int) ;
|
||||
FUNCTION 12 ("any", "any") gist_translate_cmptype_btree (int) ;
|
||||
|
||||
ALTER OPERATOR FAMILY gist_text_ops USING gist ADD
|
||||
FUNCTION 12 ("any", "any") gist_stratnum_btree (int) ;
|
||||
FUNCTION 12 ("any", "any") gist_translate_cmptype_btree (int) ;
|
||||
|
||||
ALTER OPERATOR FAMILY gist_bpchar_ops USING gist ADD
|
||||
FUNCTION 12 ("any", "any") gist_stratnum_btree (int) ;
|
||||
FUNCTION 12 ("any", "any") gist_translate_cmptype_btree (int) ;
|
||||
|
||||
ALTER OPERATOR FAMILY gist_bytea_ops USING gist ADD
|
||||
FUNCTION 12 ("any", "any") gist_stratnum_btree (int) ;
|
||||
FUNCTION 12 ("any", "any") gist_translate_cmptype_btree (int) ;
|
||||
|
||||
ALTER OPERATOR FAMILY gist_numeric_ops USING gist ADD
|
||||
FUNCTION 12 ("any", "any") gist_stratnum_btree (int) ;
|
||||
FUNCTION 12 ("any", "any") gist_translate_cmptype_btree (int) ;
|
||||
|
||||
ALTER OPERATOR FAMILY gist_bit_ops USING gist ADD
|
||||
FUNCTION 12 ("any", "any") gist_stratnum_btree (int) ;
|
||||
FUNCTION 12 ("any", "any") gist_translate_cmptype_btree (int) ;
|
||||
|
||||
ALTER OPERATOR FAMILY gist_vbit_ops USING gist ADD
|
||||
FUNCTION 12 ("any", "any") gist_stratnum_btree (int) ;
|
||||
FUNCTION 12 ("any", "any") gist_translate_cmptype_btree (int) ;
|
||||
|
||||
ALTER OPERATOR FAMILY gist_inet_ops USING gist ADD
|
||||
FUNCTION 12 ("any", "any") gist_stratnum_btree (int) ;
|
||||
FUNCTION 12 ("any", "any") gist_translate_cmptype_btree (int) ;
|
||||
|
||||
ALTER OPERATOR FAMILY gist_cidr_ops USING gist ADD
|
||||
FUNCTION 12 ("any", "any") gist_stratnum_btree (int) ;
|
||||
FUNCTION 12 ("any", "any") gist_translate_cmptype_btree (int) ;
|
||||
|
||||
ALTER OPERATOR FAMILY gist_timetz_ops USING gist ADD
|
||||
FUNCTION 12 ("any", "any") gist_stratnum_btree (int) ;
|
||||
FUNCTION 12 ("any", "any") gist_translate_cmptype_btree (int) ;
|
||||
|
||||
ALTER OPERATOR FAMILY gist_uuid_ops USING gist ADD
|
||||
FUNCTION 12 ("any", "any") gist_stratnum_btree (int) ;
|
||||
FUNCTION 12 ("any", "any") gist_translate_cmptype_btree (int) ;
|
||||
|
||||
ALTER OPERATOR FAMILY gist_macaddr8_ops USING gist ADD
|
||||
FUNCTION 12 ("any", "any") gist_stratnum_btree (int) ;
|
||||
FUNCTION 12 ("any", "any") gist_translate_cmptype_btree (int) ;
|
||||
|
||||
ALTER OPERATOR FAMILY gist_enum_ops USING gist ADD
|
||||
FUNCTION 12 ("any", "any") gist_stratnum_btree (int) ;
|
||||
FUNCTION 12 ("any", "any") gist_translate_cmptype_btree (int) ;
|
||||
|
||||
ALTER OPERATOR FAMILY gist_bool_ops USING gist ADD
|
||||
FUNCTION 12 ("any", "any") gist_stratnum_btree (int) ;
|
||||
FUNCTION 12 ("any", "any") gist_translate_cmptype_btree (int) ;
|
||||
|
@ -15,7 +15,7 @@ PG_MODULE_MAGIC_EXT(
|
||||
PG_FUNCTION_INFO_V1(gbt_decompress);
|
||||
PG_FUNCTION_INFO_V1(gbtreekey_in);
|
||||
PG_FUNCTION_INFO_V1(gbtreekey_out);
|
||||
PG_FUNCTION_INFO_V1(gist_stratnum_btree);
|
||||
PG_FUNCTION_INFO_V1(gist_translate_cmptype_btree);
|
||||
|
||||
/**************************************************
|
||||
* In/Out for keys
|
||||
@ -62,7 +62,7 @@ gbt_decompress(PG_FUNCTION_ARGS)
|
||||
* Returns the btree number for supported operators, otherwise invalid.
|
||||
*/
|
||||
Datum
|
||||
gist_stratnum_btree(PG_FUNCTION_ARGS)
|
||||
gist_translate_cmptype_btree(PG_FUNCTION_ARGS)
|
||||
{
|
||||
CompareType cmptype = PG_GETARG_INT32(0);
|
||||
|
||||
|
@ -1,13 +1,13 @@
|
||||
-- test stratnum support func
|
||||
SELECT gist_stratnum_btree(7);
|
||||
gist_stratnum_btree
|
||||
---------------------
|
||||
0
|
||||
-- test stratnum translation support func
|
||||
SELECT gist_translate_cmptype_btree(7);
|
||||
gist_translate_cmptype_btree
|
||||
------------------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT gist_stratnum_btree(3);
|
||||
gist_stratnum_btree
|
||||
---------------------
|
||||
3
|
||||
SELECT gist_translate_cmptype_btree(3);
|
||||
gist_translate_cmptype_btree
|
||||
------------------------------
|
||||
3
|
||||
(1 row)
|
||||
|
||||
|
@ -1,3 +1,3 @@
|
||||
-- test stratnum support func
|
||||
SELECT gist_stratnum_btree(7);
|
||||
SELECT gist_stratnum_btree(3);
|
||||
-- test stratnum translation support func
|
||||
SELECT gist_translate_cmptype_btree(7);
|
||||
SELECT gist_translate_cmptype_btree(3);
|
||||
|
@ -105,7 +105,7 @@ static PGresult *storeQueryResult(volatile storeInfo *sinfo, PGconn *conn, const
|
||||
static void storeRow(volatile storeInfo *sinfo, PGresult *res, bool first);
|
||||
static remoteConn *getConnectionByName(const char *name);
|
||||
static HTAB *createConnHash(void);
|
||||
static void createNewConnection(const char *name, remoteConn *rconn);
|
||||
static remoteConn *createNewConnection(const char *name);
|
||||
static void deleteConnection(const char *name);
|
||||
static char **get_pkey_attnames(Relation rel, int16 *indnkeyatts);
|
||||
static char **get_text_array_contents(ArrayType *array, int *numitems);
|
||||
@ -119,7 +119,8 @@ static Relation get_rel_from_relname(text *relname_text, LOCKMODE lockmode, AclM
|
||||
static char *generate_relation_name(Relation rel);
|
||||
static void dblink_connstr_check(const char *connstr);
|
||||
static bool dblink_connstr_has_pw(const char *connstr);
|
||||
static void dblink_security_check(PGconn *conn, remoteConn *rconn, const char *connstr);
|
||||
static void dblink_security_check(PGconn *conn, const char *connname,
|
||||
const char *connstr);
|
||||
static void dblink_res_error(PGconn *conn, const char *conname, PGresult *res,
|
||||
bool fail, const char *fmt,...) pg_attribute_printf(5, 6);
|
||||
static char *get_connect_string(const char *servername);
|
||||
@ -147,16 +148,22 @@ static uint32 dblink_we_get_conn = 0;
|
||||
static uint32 dblink_we_get_result = 0;
|
||||
|
||||
/*
|
||||
* Following is list that holds multiple remote connections.
|
||||
* Following is hash that holds multiple remote connections.
|
||||
* Calling convention of each dblink function changes to accept
|
||||
* connection name as the first parameter. The connection list is
|
||||
* connection name as the first parameter. The connection hash is
|
||||
* much like ecpg e.g. a mapping between a name and a PGconn object.
|
||||
*
|
||||
* To avoid potentially leaking a PGconn object in case of out-of-memory
|
||||
* errors, we first create the hash entry, then open the PGconn.
|
||||
* Hence, a hash entry whose rconn.conn pointer is NULL must be
|
||||
* understood as a leftover from a failed create; it should be ignored
|
||||
* by lookup operations, and silently replaced by create operations.
|
||||
*/
|
||||
|
||||
typedef struct remoteConnHashEnt
|
||||
{
|
||||
char name[NAMEDATALEN];
|
||||
remoteConn *rconn;
|
||||
remoteConn rconn;
|
||||
} remoteConnHashEnt;
|
||||
|
||||
/* initial number of connection hashes */
|
||||
@ -233,7 +240,7 @@ dblink_get_conn(char *conname_or_str,
|
||||
errmsg("could not establish connection"),
|
||||
errdetail_internal("%s", msg)));
|
||||
}
|
||||
dblink_security_check(conn, rconn, connstr);
|
||||
dblink_security_check(conn, NULL, connstr);
|
||||
if (PQclientEncoding(conn) != GetDatabaseEncoding())
|
||||
PQsetClientEncoding(conn, GetDatabaseEncodingName());
|
||||
freeconn = true;
|
||||
@ -296,15 +303,6 @@ dblink_connect(PG_FUNCTION_ARGS)
|
||||
else if (PG_NARGS() == 1)
|
||||
conname_or_str = text_to_cstring(PG_GETARG_TEXT_PP(0));
|
||||
|
||||
if (connname)
|
||||
{
|
||||
rconn = (remoteConn *) MemoryContextAlloc(TopMemoryContext,
|
||||
sizeof(remoteConn));
|
||||
rconn->conn = NULL;
|
||||
rconn->openCursorCount = 0;
|
||||
rconn->newXactForCursor = false;
|
||||
}
|
||||
|
||||
/* first check for valid foreign data server */
|
||||
connstr = get_connect_string(conname_or_str);
|
||||
if (connstr == NULL)
|
||||
@ -317,6 +315,13 @@ dblink_connect(PG_FUNCTION_ARGS)
|
||||
if (dblink_we_connect == 0)
|
||||
dblink_we_connect = WaitEventExtensionNew("DblinkConnect");
|
||||
|
||||
/* if we need a hashtable entry, make that first, since it might fail */
|
||||
if (connname)
|
||||
{
|
||||
rconn = createNewConnection(connname);
|
||||
Assert(rconn->conn == NULL);
|
||||
}
|
||||
|
||||
/* OK to make connection */
|
||||
conn = libpqsrv_connect(connstr, dblink_we_connect);
|
||||
|
||||
@ -324,8 +329,8 @@ dblink_connect(PG_FUNCTION_ARGS)
|
||||
{
|
||||
msg = pchomp(PQerrorMessage(conn));
|
||||
libpqsrv_disconnect(conn);
|
||||
if (rconn)
|
||||
pfree(rconn);
|
||||
if (connname)
|
||||
deleteConnection(connname);
|
||||
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION),
|
||||
@ -334,16 +339,16 @@ dblink_connect(PG_FUNCTION_ARGS)
|
||||
}
|
||||
|
||||
/* check password actually used if not superuser */
|
||||
dblink_security_check(conn, rconn, connstr);
|
||||
dblink_security_check(conn, connname, connstr);
|
||||
|
||||
/* attempt to set client encoding to match server encoding, if needed */
|
||||
if (PQclientEncoding(conn) != GetDatabaseEncoding())
|
||||
PQsetClientEncoding(conn, GetDatabaseEncodingName());
|
||||
|
||||
/* all OK, save away the conn */
|
||||
if (connname)
|
||||
{
|
||||
rconn->conn = conn;
|
||||
createNewConnection(connname, rconn);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -383,10 +388,7 @@ dblink_disconnect(PG_FUNCTION_ARGS)
|
||||
|
||||
libpqsrv_disconnect(conn);
|
||||
if (rconn)
|
||||
{
|
||||
deleteConnection(conname);
|
||||
pfree(rconn);
|
||||
}
|
||||
else
|
||||
pconn->conn = NULL;
|
||||
|
||||
@ -1304,6 +1306,9 @@ dblink_get_connections(PG_FUNCTION_ARGS)
|
||||
hash_seq_init(&status, remoteConnHash);
|
||||
while ((hentry = (remoteConnHashEnt *) hash_seq_search(&status)) != NULL)
|
||||
{
|
||||
/* ignore it if it's not an open connection */
|
||||
if (hentry->rconn.conn == NULL)
|
||||
continue;
|
||||
/* stash away current value */
|
||||
astate = accumArrayResult(astate,
|
||||
CStringGetTextDatum(hentry->name),
|
||||
@ -2539,8 +2544,8 @@ getConnectionByName(const char *name)
|
||||
hentry = (remoteConnHashEnt *) hash_search(remoteConnHash,
|
||||
key, HASH_FIND, NULL);
|
||||
|
||||
if (hentry)
|
||||
return hentry->rconn;
|
||||
if (hentry && hentry->rconn.conn != NULL)
|
||||
return &hentry->rconn;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
@ -2557,8 +2562,8 @@ createConnHash(void)
|
||||
HASH_ELEM | HASH_STRINGS);
|
||||
}
|
||||
|
||||
static void
|
||||
createNewConnection(const char *name, remoteConn *rconn)
|
||||
static remoteConn *
|
||||
createNewConnection(const char *name)
|
||||
{
|
||||
remoteConnHashEnt *hentry;
|
||||
bool found;
|
||||
@ -2572,17 +2577,15 @@ createNewConnection(const char *name, remoteConn *rconn)
|
||||
hentry = (remoteConnHashEnt *) hash_search(remoteConnHash, key,
|
||||
HASH_ENTER, &found);
|
||||
|
||||
if (found)
|
||||
{
|
||||
libpqsrv_disconnect(rconn->conn);
|
||||
pfree(rconn);
|
||||
|
||||
if (found && hentry->rconn.conn != NULL)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_DUPLICATE_OBJECT),
|
||||
errmsg("duplicate connection name")));
|
||||
}
|
||||
|
||||
hentry->rconn = rconn;
|
||||
/* New, or reusable, so initialize the rconn struct to zeroes */
|
||||
memset(&hentry->rconn, 0, sizeof(remoteConn));
|
||||
|
||||
return &hentry->rconn;
|
||||
}
|
||||
|
||||
static void
|
||||
@ -2671,9 +2674,12 @@ dblink_connstr_has_required_scram_options(const char *connstr)
|
||||
* We need to make sure that the connection made used credentials
|
||||
* which were provided by the user, so check what credentials were
|
||||
* used to connect and then make sure that they came from the user.
|
||||
*
|
||||
* On failure, we close "conn" and also delete the hashtable entry
|
||||
* identified by "connname" (if that's not NULL).
|
||||
*/
|
||||
static void
|
||||
dblink_security_check(PGconn *conn, remoteConn *rconn, const char *connstr)
|
||||
dblink_security_check(PGconn *conn, const char *connname, const char *connstr)
|
||||
{
|
||||
/* Superuser bypasses security check */
|
||||
if (superuser())
|
||||
@ -2703,8 +2709,8 @@ dblink_security_check(PGconn *conn, remoteConn *rconn, const char *connstr)
|
||||
|
||||
/* Otherwise, fail out */
|
||||
libpqsrv_disconnect(conn);
|
||||
if (rconn)
|
||||
pfree(rconn);
|
||||
if (connname)
|
||||
deleteConnection(connname);
|
||||
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_S_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED),
|
||||
@ -3218,7 +3224,7 @@ appendSCRAMKeysInfo(StringInfo buf)
|
||||
len = pg_b64_enc_len(sizeof(MyProcPort->scram_ClientKey));
|
||||
/* don't forget the zero-terminator */
|
||||
client_key = palloc0(len + 1);
|
||||
encoded_len = pg_b64_encode((const char *) MyProcPort->scram_ClientKey,
|
||||
encoded_len = pg_b64_encode(MyProcPort->scram_ClientKey,
|
||||
sizeof(MyProcPort->scram_ClientKey),
|
||||
client_key, len);
|
||||
if (encoded_len < 0)
|
||||
@ -3227,7 +3233,7 @@ appendSCRAMKeysInfo(StringInfo buf)
|
||||
len = pg_b64_enc_len(sizeof(MyProcPort->scram_ServerKey));
|
||||
/* don't forget the zero-terminator */
|
||||
server_key = palloc0(len + 1);
|
||||
encoded_len = pg_b64_encode((const char *) MyProcPort->scram_ServerKey,
|
||||
encoded_len = pg_b64_encode(MyProcPort->scram_ServerKey,
|
||||
sizeof(MyProcPort->scram_ServerKey),
|
||||
server_key, len);
|
||||
if (encoded_len < 0)
|
||||
|
@ -251,4 +251,3 @@ sub setup_table
|
||||
}
|
||||
|
||||
done_testing();
|
||||
|
||||
|
@ -48,6 +48,10 @@ SET ROLE regress_file_fdw_superuser;
|
||||
CREATE USER MAPPING FOR regress_file_fdw_superuser SERVER file_server;
|
||||
CREATE USER MAPPING FOR regress_no_priv_user SERVER file_server;
|
||||
-- validator tests
|
||||
CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (foo 'bar'); -- ERROR
|
||||
ERROR: invalid option "foo"
|
||||
CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS ("a=b" 'true'); -- ERROR
|
||||
ERROR: invalid option name "a=b": must not contain "="
|
||||
CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'xml'); -- ERROR
|
||||
ERROR: COPY format "xml" not recognized
|
||||
CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'text', quote ':'); -- ERROR
|
||||
|
@ -55,6 +55,8 @@ CREATE USER MAPPING FOR regress_file_fdw_superuser SERVER file_server;
|
||||
CREATE USER MAPPING FOR regress_no_priv_user SERVER file_server;
|
||||
|
||||
-- validator tests
|
||||
CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (foo 'bar'); -- ERROR
|
||||
CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS ("a=b" 'true'); -- ERROR
|
||||
CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'xml'); -- ERROR
|
||||
CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'text', quote ':'); -- ERROR
|
||||
CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'text', escape ':'); -- ERROR
|
||||
|
@ -37,7 +37,7 @@ EXPLAIN (DEBUG) SELECT 1;
|
||||
Subplans Needing Rewind: none
|
||||
Relation OIDs: none
|
||||
Executor Parameter Types: none
|
||||
Parse Location: 16 for 8 bytes
|
||||
Parse Location: 0 to end
|
||||
(11 rows)
|
||||
|
||||
EXPLAIN (RANGE_TABLE) SELECT 1;
|
||||
@ -436,7 +436,7 @@ $$);
|
||||
Subplans Needing Rewind: none
|
||||
Relation OIDs: NNN...
|
||||
Executor Parameter Types: 23
|
||||
Parse Location: 75 for 62 bytes
|
||||
Parse Location: 0 to end
|
||||
(47 rows)
|
||||
|
||||
RESET enable_hashjoin;
|
||||
|
@ -10,6 +10,8 @@ EXTENSION = pg_prewarm
|
||||
DATA = pg_prewarm--1.1--1.2.sql pg_prewarm--1.1.sql pg_prewarm--1.0--1.1.sql
|
||||
PGFILEDESC = "pg_prewarm - preload relation data into system buffer cache"
|
||||
|
||||
REGRESS = pg_prewarm
|
||||
|
||||
TAP_TESTS = 1
|
||||
|
||||
ifdef USE_PGXS
|
||||
|
@ -693,8 +693,15 @@ apw_dump_now(bool is_bgworker, bool dump_unlogged)
|
||||
return 0;
|
||||
}
|
||||
|
||||
block_info_array =
|
||||
(BlockInfoRecord *) palloc(sizeof(BlockInfoRecord) * NBuffers);
|
||||
/*
|
||||
* With sufficiently large shared_buffers, allocation will exceed 1GB, so
|
||||
* allow for a huge allocation to prevent outright failure.
|
||||
*
|
||||
* (In the future, it might be a good idea to redesign this to use a more
|
||||
* memory-efficient data structure.)
|
||||
*/
|
||||
block_info_array = (BlockInfoRecord *)
|
||||
palloc_extended((sizeof(BlockInfoRecord) * NBuffers), MCXT_ALLOC_HUGE);
|
||||
|
||||
for (num_blocks = 0, i = 0; i < NBuffers; i++)
|
||||
{
|
||||
|
10
contrib/pg_prewarm/expected/pg_prewarm.out
Normal file
10
contrib/pg_prewarm/expected/pg_prewarm.out
Normal file
@ -0,0 +1,10 @@
|
||||
-- Test pg_prewarm extension
|
||||
CREATE EXTENSION pg_prewarm;
|
||||
-- pg_prewarm() should fail if the target relation has no storage.
|
||||
CREATE TABLE test (c1 int) PARTITION BY RANGE (c1);
|
||||
SELECT pg_prewarm('test', 'buffer');
|
||||
ERROR: relation "test" does not have storage
|
||||
DETAIL: This operation is not supported for partitioned tables.
|
||||
-- Cleanup
|
||||
DROP TABLE test;
|
||||
DROP EXTENSION pg_prewarm;
|
@ -29,6 +29,11 @@ tests += {
|
||||
'name': 'pg_prewarm',
|
||||
'sd': meson.current_source_dir(),
|
||||
'bd': meson.current_build_dir(),
|
||||
'regress': {
|
||||
'sql': [
|
||||
'pg_prewarm',
|
||||
],
|
||||
},
|
||||
'tap': {
|
||||
'tests': [
|
||||
't/001_basic.pl',
|
||||
|
@ -112,6 +112,14 @@ pg_prewarm(PG_FUNCTION_ARGS)
|
||||
if (aclresult != ACLCHECK_OK)
|
||||
aclcheck_error(aclresult, get_relkind_objtype(rel->rd_rel->relkind), get_rel_name(relOid));
|
||||
|
||||
/* Check that the relation has storage. */
|
||||
if (!RELKIND_HAS_STORAGE(rel->rd_rel->relkind))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
|
||||
errmsg("relation \"%s\" does not have storage",
|
||||
RelationGetRelationName(rel)),
|
||||
errdetail_relkind_not_supported(rel->rd_rel->relkind)));
|
||||
|
||||
/* Check that the fork exists. */
|
||||
if (!smgrexists(RelationGetSmgr(rel), forkNumber))
|
||||
ereport(ERROR,
|
||||
|
10
contrib/pg_prewarm/sql/pg_prewarm.sql
Normal file
10
contrib/pg_prewarm/sql/pg_prewarm.sql
Normal file
@ -0,0 +1,10 @@
|
||||
-- Test pg_prewarm extension
|
||||
CREATE EXTENSION pg_prewarm;
|
||||
|
||||
-- pg_prewarm() should fail if the target relation has no storage.
|
||||
CREATE TABLE test (c1 int) PARTITION BY RANGE (c1);
|
||||
SELECT pg_prewarm('test', 'buffer');
|
||||
|
||||
-- Cleanup
|
||||
DROP TABLE test;
|
||||
DROP EXTENSION pg_prewarm;
|
@ -68,3 +68,61 @@ SELECT calls, rows, query FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
1 | 1 | SELECT pg_stat_statements_reset() IS NOT NULL AS t
|
||||
(4 rows)
|
||||
|
||||
-- Various parameter numbering patterns
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
t
|
||||
---
|
||||
t
|
||||
(1 row)
|
||||
|
||||
-- Unique query IDs with parameter numbers switched.
|
||||
SELECT WHERE ($1::int, 7) IN ((8, $2::int), ($3::int, 9)) \bind '1' '2' '3' \g
|
||||
--
|
||||
(0 rows)
|
||||
|
||||
SELECT WHERE ($2::int, 10) IN ((11, $3::int), ($1::int, 12)) \bind '1' '2' '3' \g
|
||||
--
|
||||
(0 rows)
|
||||
|
||||
SELECT WHERE $1::int IN ($2::int, $3::int) \bind '1' '2' '3' \g
|
||||
--
|
||||
(0 rows)
|
||||
|
||||
SELECT WHERE $2::int IN ($3::int, $1::int) \bind '1' '2' '3' \g
|
||||
--
|
||||
(0 rows)
|
||||
|
||||
SELECT WHERE $3::int IN ($1::int, $2::int) \bind '1' '2' '3' \g
|
||||
--
|
||||
(0 rows)
|
||||
|
||||
-- Two groups of two queries with the same query ID.
|
||||
SELECT WHERE '1'::int IN ($1::int, '2'::int) \bind '1' \g
|
||||
--
|
||||
(1 row)
|
||||
|
||||
SELECT WHERE '4'::int IN ($1::int, '5'::int) \bind '2' \g
|
||||
--
|
||||
(0 rows)
|
||||
|
||||
SELECT WHERE $2::int IN ($1::int, '1'::int) \bind '1' '2' \g
|
||||
--
|
||||
(0 rows)
|
||||
|
||||
SELECT WHERE $2::int IN ($1::int, '2'::int) \bind '3' '4' \g
|
||||
--
|
||||
(0 rows)
|
||||
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
query | calls
|
||||
--------------------------------------------------------------+-------
|
||||
SELECT WHERE $1::int IN ($2::int, $3::int) | 1
|
||||
SELECT WHERE $2::int IN ($1::int, $3::int) | 2
|
||||
SELECT WHERE $2::int IN ($1::int, $3::int) | 2
|
||||
SELECT WHERE $2::int IN ($3::int, $1::int) | 1
|
||||
SELECT WHERE $3::int IN ($1::int, $2::int) | 1
|
||||
SELECT WHERE ($1::int, $4) IN (($5, $2::int), ($3::int, $6)) | 1
|
||||
SELECT WHERE ($2::int, $4) IN (($5, $3::int), ($1::int, $6)) | 1
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
|
||||
(8 rows)
|
||||
|
||||
|
@ -206,37 +206,37 @@ EXPLAIN (COSTS OFF) SELECT 1 UNION SELECT 2;
|
||||
|
||||
SELECT toplevel, calls, query FROM pg_stat_statements
|
||||
ORDER BY query COLLATE "C";
|
||||
toplevel | calls | query
|
||||
----------+-------+--------------------------------------------------------------------
|
||||
f | 1 | DELETE FROM stats_track_tab
|
||||
toplevel | calls | query
|
||||
----------+-------+---------------------------------------------------------------------
|
||||
t | 1 | EXPLAIN (COSTS OFF) (SELECT $1, $2)
|
||||
f | 1 | EXPLAIN (COSTS OFF) (SELECT $1, $2);
|
||||
t | 1 | EXPLAIN (COSTS OFF) (TABLE test_table)
|
||||
f | 1 | EXPLAIN (COSTS OFF) (TABLE test_table);
|
||||
t | 1 | EXPLAIN (COSTS OFF) (VALUES ($1, $2))
|
||||
f | 1 | EXPLAIN (COSTS OFF) (VALUES ($1, $2));
|
||||
t | 1 | EXPLAIN (COSTS OFF) DELETE FROM stats_track_tab
|
||||
f | 1 | EXPLAIN (COSTS OFF) DELETE FROM stats_track_tab;
|
||||
t | 1 | EXPLAIN (COSTS OFF) INSERT INTO stats_track_tab VALUES (($1))
|
||||
t | 1 | EXPLAIN (COSTS OFF) MERGE INTO stats_track_tab +
|
||||
| | USING (SELECT id FROM generate_series($1, $2) id) ON x = id +
|
||||
| | WHEN MATCHED THEN UPDATE SET x = id +
|
||||
f | 1 | EXPLAIN (COSTS OFF) INSERT INTO stats_track_tab VALUES (($1));
|
||||
t | 1 | EXPLAIN (COSTS OFF) MERGE INTO stats_track_tab +
|
||||
| | USING (SELECT id FROM generate_series($1, $2) id) ON x = id +
|
||||
| | WHEN MATCHED THEN UPDATE SET x = id +
|
||||
| | WHEN NOT MATCHED THEN INSERT (x) VALUES (id)
|
||||
f | 1 | EXPLAIN (COSTS OFF) MERGE INTO stats_track_tab +
|
||||
| | USING (SELECT id FROM generate_series($1, $2) id) ON x = id +
|
||||
| | WHEN MATCHED THEN UPDATE SET x = id +
|
||||
| | WHEN NOT MATCHED THEN INSERT (x) VALUES (id);
|
||||
t | 1 | EXPLAIN (COSTS OFF) SELECT $1
|
||||
t | 1 | EXPLAIN (COSTS OFF) SELECT $1 UNION SELECT $2
|
||||
f | 1 | EXPLAIN (COSTS OFF) SELECT $1 UNION SELECT $2;
|
||||
f | 1 | EXPLAIN (COSTS OFF) SELECT $1;
|
||||
t | 1 | EXPLAIN (COSTS OFF) TABLE stats_track_tab
|
||||
f | 1 | EXPLAIN (COSTS OFF) TABLE stats_track_tab;
|
||||
t | 1 | EXPLAIN (COSTS OFF) UPDATE stats_track_tab SET x = $1 WHERE x = $2
|
||||
f | 1 | EXPLAIN (COSTS OFF) UPDATE stats_track_tab SET x = $1 WHERE x = $2;
|
||||
t | 1 | EXPLAIN (COSTS OFF) VALUES ($1)
|
||||
f | 1 | INSERT INTO stats_track_tab VALUES (($1))
|
||||
f | 1 | MERGE INTO stats_track_tab +
|
||||
| | USING (SELECT id FROM generate_series($1, $2) id) ON x = id +
|
||||
| | WHEN MATCHED THEN UPDATE SET x = id +
|
||||
| | WHEN NOT MATCHED THEN INSERT (x) VALUES (id)
|
||||
f | 1 | SELECT $1
|
||||
f | 1 | SELECT $1 UNION SELECT $2
|
||||
f | 1 | SELECT $1, $2
|
||||
f | 1 | EXPLAIN (COSTS OFF) VALUES ($1);
|
||||
t | 1 | SELECT pg_stat_statements_reset() IS NOT NULL AS t
|
||||
f | 1 | TABLE stats_track_tab
|
||||
f | 1 | TABLE test_table
|
||||
f | 1 | UPDATE stats_track_tab SET x = $1 WHERE x = $2
|
||||
f | 1 | VALUES ($1)
|
||||
f | 1 | VALUES ($1, $2)
|
||||
(23 rows)
|
||||
|
||||
-- EXPLAIN - top-level tracking.
|
||||
@ -405,20 +405,20 @@ EXPLAIN (COSTS OFF) SELECT 1, 2 UNION SELECT 3, 4\; EXPLAIN (COSTS OFF) (SELECT
|
||||
|
||||
SELECT toplevel, calls, query FROM pg_stat_statements
|
||||
ORDER BY query COLLATE "C";
|
||||
toplevel | calls | query
|
||||
----------+-------+-----------------------------------------------------------------
|
||||
f | 1 | (SELECT $1, $2, $3) UNION SELECT $4, $5, $6
|
||||
toplevel | calls | query
|
||||
----------+-------+---------------------------------------------------------------------------------------------------------------------
|
||||
t | 1 | EXPLAIN (COSTS OFF) (SELECT $1, $2, $3)
|
||||
t | 1 | EXPLAIN (COSTS OFF) (SELECT $1, $2, $3) UNION SELECT $4, $5, $6
|
||||
f | 1 | EXPLAIN (COSTS OFF) (SELECT $1, $2, $3); EXPLAIN (COSTS OFF) (SELECT 1, 2, 3, 4);
|
||||
t | 1 | EXPLAIN (COSTS OFF) (SELECT $1, $2, $3, $4)
|
||||
f | 1 | EXPLAIN (COSTS OFF) (SELECT 1, 2, 3); EXPLAIN (COSTS OFF) (SELECT $1, $2, $3, $4);
|
||||
t | 1 | EXPLAIN (COSTS OFF) SELECT $1
|
||||
t | 1 | EXPLAIN (COSTS OFF) SELECT $1, $2
|
||||
t | 1 | EXPLAIN (COSTS OFF) SELECT $1, $2 UNION SELECT $3, $4
|
||||
f | 1 | SELECT $1
|
||||
f | 1 | SELECT $1, $2
|
||||
f | 1 | SELECT $1, $2 UNION SELECT $3, $4
|
||||
f | 1 | SELECT $1, $2, $3
|
||||
f | 1 | SELECT $1, $2, $3, $4
|
||||
f | 1 | EXPLAIN (COSTS OFF) SELECT $1, $2 UNION SELECT $3, $4; EXPLAIN (COSTS OFF) (SELECT 1, 2, 3) UNION SELECT 3, 4, 5;
|
||||
f | 1 | EXPLAIN (COSTS OFF) SELECT $1; EXPLAIN (COSTS OFF) SELECT 1, 2;
|
||||
f | 1 | EXPLAIN (COSTS OFF) SELECT 1, 2 UNION SELECT 3, 4; EXPLAIN (COSTS OFF) (SELECT $1, $2, $3) UNION SELECT $4, $5, $6;
|
||||
f | 1 | EXPLAIN (COSTS OFF) SELECT 1; EXPLAIN (COSTS OFF) SELECT $1, $2;
|
||||
t | 1 | SELECT pg_stat_statements_reset() IS NOT NULL AS t
|
||||
(13 rows)
|
||||
|
||||
@ -494,29 +494,29 @@ EXPLAIN (COSTS OFF) INSERT INTO stats_track_tab VALUES ((1))\; EXPLAIN (COSTS OF
|
||||
|
||||
SELECT toplevel, calls, query FROM pg_stat_statements
|
||||
ORDER BY query COLLATE "C";
|
||||
toplevel | calls | query
|
||||
----------+-------+--------------------------------------------------------------------
|
||||
f | 1 | DELETE FROM stats_track_tab
|
||||
f | 1 | DELETE FROM stats_track_tab WHERE x = $1
|
||||
toplevel | calls | query
|
||||
----------+-------+----------------------------------------------------------------------------------------------------------------------------------
|
||||
t | 1 | EXPLAIN (COSTS OFF) (TABLE test_table)
|
||||
t | 1 | EXPLAIN (COSTS OFF) (VALUES ($1, $2))
|
||||
t | 1 | EXPLAIN (COSTS OFF) DELETE FROM stats_track_tab
|
||||
t | 1 | EXPLAIN (COSTS OFF) DELETE FROM stats_track_tab WHERE x = $1
|
||||
f | 1 | EXPLAIN (COSTS OFF) DELETE FROM stats_track_tab; EXPLAIN (COSTS OFF) DELETE FROM stats_track_tab WHERE x = $1;
|
||||
f | 1 | EXPLAIN (COSTS OFF) DELETE FROM stats_track_tab; EXPLAIN (COSTS OFF) DELETE FROM stats_track_tab WHERE x = 1;
|
||||
t | 1 | EXPLAIN (COSTS OFF) INSERT INTO stats_track_tab VALUES ($1), ($2)
|
||||
t | 1 | EXPLAIN (COSTS OFF) INSERT INTO stats_track_tab VALUES (($1))
|
||||
f | 1 | EXPLAIN (COSTS OFF) INSERT INTO stats_track_tab VALUES (($1)); EXPLAIN (COSTS OFF) INSERT INTO stats_track_tab VALUES (1), (2);
|
||||
f | 1 | EXPLAIN (COSTS OFF) INSERT INTO stats_track_tab VALUES ((1)); EXPLAIN (COSTS OFF) INSERT INTO stats_track_tab VALUES ($1), ($2);
|
||||
t | 1 | EXPLAIN (COSTS OFF) TABLE stats_track_tab
|
||||
f | 1 | EXPLAIN (COSTS OFF) TABLE stats_track_tab; EXPLAIN (COSTS OFF) (TABLE test_table);
|
||||
f | 1 | EXPLAIN (COSTS OFF) TABLE stats_track_tab; EXPLAIN (COSTS OFF) (TABLE test_table);
|
||||
t | 1 | EXPLAIN (COSTS OFF) UPDATE stats_track_tab SET x = $1
|
||||
t | 1 | EXPLAIN (COSTS OFF) UPDATE stats_track_tab SET x = $1 WHERE x = $2
|
||||
f | 1 | EXPLAIN (COSTS OFF) UPDATE stats_track_tab SET x = $1 WHERE x = $2; EXPLAIN (COSTS OFF) UPDATE stats_track_tab SET x = 1;
|
||||
f | 1 | EXPLAIN (COSTS OFF) UPDATE stats_track_tab SET x = 1 WHERE x = 1; EXPLAIN (COSTS OFF) UPDATE stats_track_tab SET x = $1;
|
||||
t | 1 | EXPLAIN (COSTS OFF) VALUES ($1)
|
||||
f | 1 | INSERT INTO stats_track_tab VALUES ($1), ($2)
|
||||
f | 1 | INSERT INTO stats_track_tab VALUES (($1))
|
||||
f | 1 | EXPLAIN (COSTS OFF) VALUES ($1); EXPLAIN (COSTS OFF) (VALUES (1, 2));
|
||||
f | 1 | EXPLAIN (COSTS OFF) VALUES (1); EXPLAIN (COSTS OFF) (VALUES ($1, $2));
|
||||
t | 1 | SELECT pg_stat_statements_reset() IS NOT NULL AS t
|
||||
f | 1 | TABLE stats_track_tab
|
||||
f | 1 | TABLE test_table
|
||||
f | 1 | UPDATE stats_track_tab SET x = $1
|
||||
f | 1 | UPDATE stats_track_tab SET x = $1 WHERE x = $2
|
||||
f | 1 | VALUES ($1)
|
||||
f | 1 | VALUES ($1, $2)
|
||||
(21 rows)
|
||||
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
@ -547,18 +547,21 @@ EXPLAIN (COSTS OFF) MERGE INTO stats_track_tab
|
||||
|
||||
SELECT toplevel, calls, query FROM pg_stat_statements
|
||||
ORDER BY query COLLATE "C";
|
||||
toplevel | calls | query
|
||||
----------+-------+---------------------------------------------------------------
|
||||
t | 1 | EXPLAIN (COSTS OFF) MERGE INTO stats_track_tab +
|
||||
| | USING (SELECT id FROM generate_series($1, $2) id) ON x = id+
|
||||
| | WHEN MATCHED THEN UPDATE SET x = id +
|
||||
toplevel | calls | query
|
||||
----------+-------+------------------------------------------------------------------------------------------------
|
||||
t | 1 | EXPLAIN (COSTS OFF) MERGE INTO stats_track_tab +
|
||||
| | USING (SELECT id FROM generate_series($1, $2) id) ON x = id +
|
||||
| | WHEN MATCHED THEN UPDATE SET x = id +
|
||||
| | WHEN NOT MATCHED THEN INSERT (x) VALUES (id)
|
||||
f | 1 | EXPLAIN (COSTS OFF) MERGE INTO stats_track_tab +
|
||||
| | USING (SELECT id FROM generate_series($1, $2) id) ON x = id +
|
||||
| | WHEN MATCHED THEN UPDATE SET x = id +
|
||||
| | WHEN NOT MATCHED THEN INSERT (x) VALUES (id); EXPLAIN (COSTS OFF) SELECT 1, 2, 3, 4, 5;
|
||||
f | 1 | EXPLAIN (COSTS OFF) MERGE INTO stats_track_tab +
|
||||
| | USING (SELECT id FROM generate_series(1, 10) id) ON x = id +
|
||||
| | WHEN MATCHED THEN UPDATE SET x = id +
|
||||
| | WHEN NOT MATCHED THEN INSERT (x) VALUES (id); EXPLAIN (COSTS OFF) SELECT $1, $2, $3, $4, $5;
|
||||
t | 1 | EXPLAIN (COSTS OFF) SELECT $1, $2, $3, $4, $5
|
||||
f | 1 | MERGE INTO stats_track_tab +
|
||||
| | USING (SELECT id FROM generate_series($1, $2) id) ON x = id+
|
||||
| | WHEN MATCHED THEN UPDATE SET x = id +
|
||||
| | WHEN NOT MATCHED THEN INSERT (x) VALUES (id)
|
||||
f | 1 | SELECT $1, $2, $3, $4, $5
|
||||
t | 1 | SELECT pg_stat_statements_reset() IS NOT NULL AS t
|
||||
(5 rows)
|
||||
|
||||
@ -786,29 +789,29 @@ EXPLAIN (COSTS OFF) WITH a AS (select 4) SELECT 1 UNION SELECT 2;
|
||||
|
||||
SELECT toplevel, calls, query FROM pg_stat_statements
|
||||
ORDER BY query COLLATE "C";
|
||||
toplevel | calls | query
|
||||
----------+-------+------------------------------------------------------------------------------------------
|
||||
toplevel | calls | query
|
||||
----------+-------+-------------------------------------------------------------------------------------------
|
||||
t | 1 | EXPLAIN (COSTS OFF) (WITH a AS (SELECT $1) (SELECT $2, $3))
|
||||
f | 1 | EXPLAIN (COSTS OFF) (WITH a AS (SELECT $1) (SELECT $2, $3));
|
||||
t | 1 | EXPLAIN (COSTS OFF) WITH a AS (SELECT $1) DELETE FROM stats_track_tab
|
||||
f | 1 | EXPLAIN (COSTS OFF) WITH a AS (SELECT $1) DELETE FROM stats_track_tab;
|
||||
t | 1 | EXPLAIN (COSTS OFF) WITH a AS (SELECT $1) INSERT INTO stats_track_tab VALUES (($2))
|
||||
t | 1 | EXPLAIN (COSTS OFF) WITH a AS (SELECT $1) MERGE INTO stats_track_tab +
|
||||
| | USING (SELECT id FROM generate_series($2, $3) id) ON x = id +
|
||||
| | WHEN MATCHED THEN UPDATE SET x = id +
|
||||
f | 1 | EXPLAIN (COSTS OFF) WITH a AS (SELECT $1) INSERT INTO stats_track_tab VALUES (($2));
|
||||
t | 1 | EXPLAIN (COSTS OFF) WITH a AS (SELECT $1) MERGE INTO stats_track_tab +
|
||||
| | USING (SELECT id FROM generate_series($2, $3) id) ON x = id +
|
||||
| | WHEN MATCHED THEN UPDATE SET x = id +
|
||||
| | WHEN NOT MATCHED THEN INSERT (x) VALUES (id)
|
||||
f | 1 | EXPLAIN (COSTS OFF) WITH a AS (SELECT $1) MERGE INTO stats_track_tab +
|
||||
| | USING (SELECT id FROM generate_series($2, $3) id) ON x = id +
|
||||
| | WHEN MATCHED THEN UPDATE SET x = id +
|
||||
| | WHEN NOT MATCHED THEN INSERT (x) VALUES (id);
|
||||
t | 1 | EXPLAIN (COSTS OFF) WITH a AS (SELECT $1) SELECT $2
|
||||
f | 1 | EXPLAIN (COSTS OFF) WITH a AS (SELECT $1) SELECT $2;
|
||||
t | 1 | EXPLAIN (COSTS OFF) WITH a AS (SELECT $1) UPDATE stats_track_tab SET x = $2 WHERE x = $3
|
||||
f | 1 | EXPLAIN (COSTS OFF) WITH a AS (SELECT $1) UPDATE stats_track_tab SET x = $2 WHERE x = $3;
|
||||
t | 1 | EXPLAIN (COSTS OFF) WITH a AS (select $1) SELECT $2 UNION SELECT $3
|
||||
f | 1 | EXPLAIN (COSTS OFF) WITH a AS (select $1) SELECT $2 UNION SELECT $3;
|
||||
t | 1 | SELECT pg_stat_statements_reset() IS NOT NULL AS t
|
||||
f | 1 | WITH a AS (SELECT $1) (SELECT $2, $3)
|
||||
f | 1 | WITH a AS (SELECT $1) DELETE FROM stats_track_tab
|
||||
f | 1 | WITH a AS (SELECT $1) INSERT INTO stats_track_tab VALUES (($2))
|
||||
f | 1 | WITH a AS (SELECT $1) MERGE INTO stats_track_tab +
|
||||
| | USING (SELECT id FROM generate_series($2, $3) id) ON x = id +
|
||||
| | WHEN MATCHED THEN UPDATE SET x = id +
|
||||
| | WHEN NOT MATCHED THEN INSERT (x) VALUES (id)
|
||||
f | 1 | WITH a AS (SELECT $1) SELECT $2
|
||||
f | 1 | WITH a AS (SELECT $1) UPDATE stats_track_tab SET x = $2 WHERE x = $3
|
||||
f | 1 | WITH a AS (select $1) SELECT $2 UNION SELECT $3
|
||||
(15 rows)
|
||||
|
||||
-- EXPLAIN with CTEs - top-level tracking
|
||||
@ -918,13 +921,14 @@ EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF)
|
||||
|
||||
SELECT toplevel, calls, query FROM pg_stat_statements
|
||||
ORDER BY query COLLATE "C";
|
||||
toplevel | calls | query
|
||||
----------+-------+------------------------------------------------------------------------------
|
||||
t | 1 | EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) +
|
||||
toplevel | calls | query
|
||||
----------+-------+-------------------------------------------------------------------------------
|
||||
t | 1 | EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) +
|
||||
| | DECLARE foocur CURSOR FOR SELECT * FROM stats_track_tab
|
||||
f | 1 | EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) +
|
||||
| | DECLARE foocur CURSOR FOR SELECT * FROM stats_track_tab;
|
||||
t | 1 | EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) SELECT $1
|
||||
f | 1 | SELECT $1
|
||||
f | 1 | SELECT * FROM stats_track_tab
|
||||
f | 1 | EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) SELECT $1;
|
||||
t | 1 | SELECT pg_stat_statements_reset() IS NOT NULL AS t
|
||||
(5 rows)
|
||||
|
||||
@ -1047,10 +1051,10 @@ SELECT toplevel, calls, query FROM pg_stat_statements
|
||||
toplevel | calls | query
|
||||
----------+-------+-----------------------------------------------------------------
|
||||
t | 1 | CREATE TEMPORARY TABLE pgss_ctas_1 AS SELECT $1
|
||||
f | 1 | CREATE TEMPORARY TABLE pgss_ctas_1 AS SELECT $1;
|
||||
t | 1 | CREATE TEMPORARY TABLE pgss_ctas_2 AS EXECUTE test_prepare_pgss
|
||||
f | 1 | SELECT $1
|
||||
f | 1 | PREPARE test_prepare_pgss AS select generate_series($1, $2)
|
||||
t | 1 | SELECT pg_stat_statements_reset() IS NOT NULL AS t
|
||||
f | 1 | select generate_series($1, $2)
|
||||
(5 rows)
|
||||
|
||||
-- CREATE TABLE AS, top-level tracking.
|
||||
@ -1088,10 +1092,10 @@ EXPLAIN (COSTS OFF) CREATE TEMPORARY TABLE pgss_explain_ctas AS SELECT 1;
|
||||
|
||||
SELECT toplevel, calls, query FROM pg_stat_statements
|
||||
ORDER BY query COLLATE "C";
|
||||
toplevel | calls | query
|
||||
----------+-------+---------------------------------------------------------------------------
|
||||
toplevel | calls | query
|
||||
----------+-------+----------------------------------------------------------------------------
|
||||
t | 1 | EXPLAIN (COSTS OFF) CREATE TEMPORARY TABLE pgss_explain_ctas AS SELECT $1
|
||||
f | 1 | SELECT $1
|
||||
f | 1 | EXPLAIN (COSTS OFF) CREATE TEMPORARY TABLE pgss_explain_ctas AS SELECT $1;
|
||||
t | 1 | SELECT pg_stat_statements_reset() IS NOT NULL AS t
|
||||
(3 rows)
|
||||
|
||||
@ -1136,14 +1140,14 @@ CLOSE foocur;
|
||||
COMMIT;
|
||||
SELECT toplevel, calls, query FROM pg_stat_statements
|
||||
ORDER BY query COLLATE "C";
|
||||
toplevel | calls | query
|
||||
----------+-------+---------------------------------------------------------
|
||||
toplevel | calls | query
|
||||
----------+-------+----------------------------------------------------------
|
||||
t | 1 | BEGIN
|
||||
t | 1 | CLOSE foocur
|
||||
t | 1 | COMMIT
|
||||
t | 1 | DECLARE FOOCUR CURSOR FOR SELECT * from stats_track_tab
|
||||
f | 1 | DECLARE FOOCUR CURSOR FOR SELECT * from stats_track_tab;
|
||||
t | 1 | FETCH FORWARD 1 FROM foocur
|
||||
f | 1 | SELECT * from stats_track_tab
|
||||
t | 1 | SELECT pg_stat_statements_reset() IS NOT NULL AS t
|
||||
(7 rows)
|
||||
|
||||
@ -1203,25 +1207,25 @@ COPY (DELETE FROM stats_track_tab WHERE x = 2 RETURNING x) TO stdout;
|
||||
2
|
||||
SELECT toplevel, calls, query FROM pg_stat_statements
|
||||
ORDER BY query COLLATE "C";
|
||||
toplevel | calls | query
|
||||
----------+-------+---------------------------------------------------------------------------
|
||||
toplevel | calls | query
|
||||
----------+-------+-----------------------------------------------------------------------------
|
||||
f | 1 | COPY (DELETE FROM stats_track_tab WHERE x = $1 RETURNING x) TO stdout
|
||||
t | 1 | COPY (DELETE FROM stats_track_tab WHERE x = 2 RETURNING x) TO stdout
|
||||
f | 1 | COPY (INSERT INTO stats_track_tab (x) VALUES ($1) RETURNING x) TO stdout
|
||||
t | 1 | COPY (INSERT INTO stats_track_tab (x) VALUES (1) RETURNING x) TO stdout
|
||||
t | 1 | COPY (MERGE INTO stats_track_tab USING (SELECT 1 id) ON x = id +
|
||||
| | WHEN MATCHED THEN UPDATE SET x = id +
|
||||
f | 1 | COPY (MERGE INTO stats_track_tab USING (SELECT $1 id) ON x = id +
|
||||
| | WHEN MATCHED THEN UPDATE SET x = id +
|
||||
| | WHEN NOT MATCHED THEN INSERT (x) VALUES (id) RETURNING x) TO stdout
|
||||
t | 1 | COPY (MERGE INTO stats_track_tab USING (SELECT 1 id) ON x = id +
|
||||
| | WHEN MATCHED THEN UPDATE SET x = id +
|
||||
| | WHEN NOT MATCHED THEN INSERT (x) VALUES (id) RETURNING x) TO stdout
|
||||
f | 1 | COPY (SELECT $1 UNION SELECT $2) TO stdout
|
||||
f | 1 | COPY (SELECT $1) TO stdout
|
||||
t | 1 | COPY (SELECT 1 UNION SELECT 2) TO stdout
|
||||
t | 1 | COPY (SELECT 1) TO stdout
|
||||
f | 1 | COPY (UPDATE stats_track_tab SET x = $1 WHERE x = $2 RETURNING x) TO stdout
|
||||
t | 1 | COPY (UPDATE stats_track_tab SET x = 2 WHERE x = 1 RETURNING x) TO stdout
|
||||
f | 1 | DELETE FROM stats_track_tab WHERE x = $1 RETURNING x
|
||||
f | 1 | INSERT INTO stats_track_tab (x) VALUES ($1) RETURNING x
|
||||
f | 1 | MERGE INTO stats_track_tab USING (SELECT $1 id) ON x = id +
|
||||
| | WHEN MATCHED THEN UPDATE SET x = id +
|
||||
| | WHEN NOT MATCHED THEN INSERT (x) VALUES (id) RETURNING x
|
||||
f | 1 | SELECT $1
|
||||
f | 1 | SELECT $1 UNION SELECT $2
|
||||
t | 1 | SELECT pg_stat_statements_reset() IS NOT NULL AS t
|
||||
f | 1 | UPDATE stats_track_tab SET x = $1 WHERE x = $2 RETURNING x
|
||||
(13 rows)
|
||||
|
||||
-- COPY - top-level tracking.
|
||||
@ -1319,6 +1323,57 @@ SELECT toplevel, calls, query FROM pg_stat_statements
|
||||
t | 1 | SELECT pg_stat_statements_reset() IS NOT NULL AS t
|
||||
(4 rows)
|
||||
|
||||
-- DO block --- multiple inner queries with separators
|
||||
SET pg_stat_statements.track = 'all';
|
||||
SET pg_stat_statements.track_utility = TRUE;
|
||||
CREATE TABLE pgss_do_util_tab_1 (a int);
|
||||
CREATE TABLE pgss_do_util_tab_2 (a int);
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
t
|
||||
---
|
||||
t
|
||||
(1 row)
|
||||
|
||||
DO $$
|
||||
DECLARE BEGIN
|
||||
EXECUTE 'CREATE TABLE pgss_do_table (id INT); DROP TABLE pgss_do_table';
|
||||
EXECUTE 'SELECT a FROM pgss_do_util_tab_1; SELECT a FROM pgss_do_util_tab_2';
|
||||
END $$;
|
||||
SELECT toplevel, calls, rows, query FROM pg_stat_statements
|
||||
WHERE toplevel IS FALSE
|
||||
ORDER BY query COLLATE "C";
|
||||
toplevel | calls | rows | query
|
||||
----------+-------+------+-------------------------------------
|
||||
f | 1 | 0 | CREATE TABLE pgss_do_table (id INT)
|
||||
f | 1 | 0 | DROP TABLE pgss_do_table
|
||||
f | 1 | 0 | SELECT a FROM pgss_do_util_tab_1
|
||||
f | 1 | 0 | SELECT a FROM pgss_do_util_tab_2
|
||||
(4 rows)
|
||||
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
t
|
||||
---
|
||||
t
|
||||
(1 row)
|
||||
|
||||
-- Note the extra semicolon at the end of the query.
|
||||
DO $$
|
||||
DECLARE BEGIN
|
||||
EXECUTE 'CREATE TABLE pgss_do_table (id INT); DROP TABLE pgss_do_table;';
|
||||
EXECUTE 'SELECT a FROM pgss_do_util_tab_1; SELECT a FROM pgss_do_util_tab_2;';
|
||||
END $$;
|
||||
SELECT toplevel, calls, rows, query FROM pg_stat_statements
|
||||
WHERE toplevel IS FALSE
|
||||
ORDER BY query COLLATE "C";
|
||||
toplevel | calls | rows | query
|
||||
----------+-------+------+-------------------------------------
|
||||
f | 1 | 0 | CREATE TABLE pgss_do_table (id INT)
|
||||
f | 1 | 0 | DROP TABLE pgss_do_table
|
||||
f | 1 | 0 | SELECT a FROM pgss_do_util_tab_1
|
||||
f | 1 | 0 | SELECT a FROM pgss_do_util_tab_2
|
||||
(4 rows)
|
||||
|
||||
DROP TABLE pgss_do_util_tab_1, pgss_do_util_tab_2;
|
||||
-- PL/pgSQL function - top-level tracking.
|
||||
SET pg_stat_statements.track = 'top';
|
||||
SET pg_stat_statements.track_utility = FALSE;
|
||||
|
@ -58,7 +58,7 @@ SELECT 42;
|
||||
(1 row)
|
||||
|
||||
SELECT plans, calls, rows, query FROM pg_stat_statements
|
||||
WHERE query NOT LIKE 'SELECT COUNT%' ORDER BY query COLLATE "C";
|
||||
WHERE query NOT LIKE 'PREPARE%' ORDER BY query COLLATE "C";
|
||||
plans | calls | rows | query
|
||||
-------+-------+------+----------------------------------------------------------
|
||||
0 | 1 | 0 | ALTER TABLE stats_plan_test ADD COLUMN x int
|
||||
@ -72,10 +72,10 @@ SELECT plans, calls, rows, query FROM pg_stat_statements
|
||||
-- for the prepared statement we expect at least one replan, but cache
|
||||
-- invalidations could force more
|
||||
SELECT plans >= 2 AND plans <= calls AS plans_ok, calls, rows, query FROM pg_stat_statements
|
||||
WHERE query LIKE 'SELECT COUNT%' ORDER BY query COLLATE "C";
|
||||
plans_ok | calls | rows | query
|
||||
----------+-------+------+--------------------------------------
|
||||
t | 4 | 4 | SELECT COUNT(*) FROM stats_plan_test
|
||||
WHERE query LIKE 'PREPARE%' ORDER BY query COLLATE "C";
|
||||
plans_ok | calls | rows | query
|
||||
----------+-------+------+-------------------------------------------------------
|
||||
t | 4 | 4 | PREPARE prep1 AS SELECT COUNT(*) FROM stats_plan_test
|
||||
(1 row)
|
||||
|
||||
-- Cleanup
|
||||
|
@ -208,6 +208,7 @@ DEALLOCATE pgss_test;
|
||||
SELECT calls, rows, query FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
calls | rows | query
|
||||
-------+------+------------------------------------------------------------------------------
|
||||
1 | 1 | PREPARE pgss_test (int) AS SELECT $1, $2 LIMIT $3
|
||||
4 | 4 | SELECT $1 +
|
||||
| | -- but this one will appear +
|
||||
| | AS "text"
|
||||
@ -221,7 +222,6 @@ SELECT calls, rows, query FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
2 | 2 | SELECT $1 AS "int" ORDER BY 1
|
||||
1 | 2 | SELECT $1 AS i UNION SELECT $2 ORDER BY i
|
||||
1 | 1 | SELECT $1 || $2
|
||||
1 | 1 | SELECT $1, $2 LIMIT $3
|
||||
2 | 2 | SELECT DISTINCT $1 AS "int"
|
||||
0 | 0 | SELECT calls, rows, query FROM pg_stat_statements ORDER BY query COLLATE "C"
|
||||
1 | 1 | SELECT pg_stat_statements_reset() IS NOT NULL AS t
|
||||
@ -238,6 +238,65 @@ SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
t
|
||||
(1 row)
|
||||
|
||||
-- normalization of constants and parameters, with constant locations
|
||||
-- recorded one or more times.
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
t
|
||||
---
|
||||
t
|
||||
(1 row)
|
||||
|
||||
SELECT WHERE '1' IN ('1'::int, '3'::int::text);
|
||||
--
|
||||
(1 row)
|
||||
|
||||
SELECT WHERE (1, 2) IN ((1, 2), (2, 3));
|
||||
--
|
||||
(1 row)
|
||||
|
||||
SELECT WHERE (3, 4) IN ((5, 6), (8, 7));
|
||||
--
|
||||
(0 rows)
|
||||
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
query | calls
|
||||
------------------------------------------------------------------------+-------
|
||||
SELECT WHERE $1 IN ($2::int, $3::int::text) | 1
|
||||
SELECT WHERE ($1, $2) IN (($3, $4), ($5, $6)) | 2
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C" | 0
|
||||
(4 rows)
|
||||
|
||||
-- with the last element being an explicit function call with an argument, ensure
|
||||
-- the normalization of the squashing interval is correct.
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
t
|
||||
---
|
||||
t
|
||||
(1 row)
|
||||
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
t
|
||||
---
|
||||
t
|
||||
(1 row)
|
||||
|
||||
SELECT WHERE 1 IN (1, int4(1), int4(2));
|
||||
--
|
||||
(1 row)
|
||||
|
||||
SELECT WHERE 1 = ANY (ARRAY[1, int4(1), int4(2)]);
|
||||
--
|
||||
(1 row)
|
||||
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
query | calls
|
||||
------------------------------------------------------------------------+-------
|
||||
SELECT WHERE $1 IN ($2 /*, ... */) | 2
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C" | 0
|
||||
(3 rows)
|
||||
|
||||
--
|
||||
-- queries with locking clauses
|
||||
--
|
||||
|
@ -2,9 +2,11 @@
|
||||
-- Const squashing functionality
|
||||
--
|
||||
CREATE EXTENSION pg_stat_statements;
|
||||
--
|
||||
-- Simple Lists
|
||||
--
|
||||
CREATE TABLE test_squash (id int, data int);
|
||||
-- IN queries
|
||||
-- Normal scenario, too many simple constants for an IN query
|
||||
-- single element will not be squashed
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
t
|
||||
---
|
||||
@ -16,42 +18,150 @@ SELECT * FROM test_squash WHERE id IN (1);
|
||||
----+------
|
||||
(0 rows)
|
||||
|
||||
SELECT ARRAY[1];
|
||||
array
|
||||
-------
|
||||
{1}
|
||||
(1 row)
|
||||
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
query | calls
|
||||
----------------------------------------------------+-------
|
||||
SELECT * FROM test_squash WHERE id IN ($1) | 1
|
||||
SELECT ARRAY[$1] | 1
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
|
||||
(3 rows)
|
||||
|
||||
-- more than 1 element in a list will be squashed
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
t
|
||||
---
|
||||
t
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM test_squash WHERE id IN (1, 2, 3);
|
||||
id | data
|
||||
----+------
|
||||
(0 rows)
|
||||
|
||||
SELECT * FROM test_squash WHERE id IN (1, 2, 3, 4);
|
||||
id | data
|
||||
----+------
|
||||
(0 rows)
|
||||
|
||||
SELECT * FROM test_squash WHERE id IN (1, 2, 3, 4, 5);
|
||||
id | data
|
||||
----+------
|
||||
(0 rows)
|
||||
|
||||
SELECT ARRAY[1, 2, 3];
|
||||
array
|
||||
---------
|
||||
{1,2,3}
|
||||
(1 row)
|
||||
|
||||
SELECT ARRAY[1, 2, 3, 4];
|
||||
array
|
||||
-----------
|
||||
{1,2,3,4}
|
||||
(1 row)
|
||||
|
||||
SELECT ARRAY[1, 2, 3, 4, 5];
|
||||
array
|
||||
-------------
|
||||
{1,2,3,4,5}
|
||||
(1 row)
|
||||
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
query | calls
|
||||
-------------------------------------------------------+-------
|
||||
SELECT * FROM test_squash WHERE id IN ($1 /*, ... */) | 1
|
||||
SELECT * FROM test_squash WHERE id IN ($1) | 1
|
||||
SELECT * FROM test_squash WHERE id IN ($1 /*, ... */) | 3
|
||||
SELECT ARRAY[$1 /*, ... */] | 3
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
|
||||
(3 rows)
|
||||
|
||||
SELECT * FROM test_squash WHERE id IN (1, 2, 3, 4, 5, 6, 7, 8, 9);
|
||||
-- built-in functions will be squashed
|
||||
-- the IN and ARRAY forms of this statement will have the same queryId
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
t
|
||||
---
|
||||
t
|
||||
(1 row)
|
||||
|
||||
SELECT WHERE 1 IN (1, int4(1), int4(2), 2);
|
||||
--
|
||||
(1 row)
|
||||
|
||||
SELECT WHERE 1 = ANY (ARRAY[1, int4(1), int4(2), 2]);
|
||||
--
|
||||
(1 row)
|
||||
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
query | calls
|
||||
----------------------------------------------------+-------
|
||||
SELECT WHERE $1 IN ($2 /*, ... */) | 2
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
|
||||
(2 rows)
|
||||
|
||||
-- external parameters will not be squashed
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
t
|
||||
---
|
||||
t
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM test_squash WHERE id IN ($1, $2, $3, $4, $5) \bind 1 2 3 4 5
|
||||
;
|
||||
id | data
|
||||
----+------
|
||||
(0 rows)
|
||||
|
||||
SELECT * FROM test_squash WHERE id IN (1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
|
||||
id | data
|
||||
----+------
|
||||
(0 rows)
|
||||
|
||||
SELECT * FROM test_squash WHERE id IN (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11);
|
||||
SELECT * FROM test_squash WHERE id::text = ANY(ARRAY[$1, $2, $3, $4, $5]) \bind 1 2 3 4 5
|
||||
;
|
||||
id | data
|
||||
----+------
|
||||
(0 rows)
|
||||
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
query | calls
|
||||
------------------------------------------------------------------------+-------
|
||||
SELECT * FROM test_squash WHERE id IN ($1 /*, ... */) | 4
|
||||
SELECT * FROM test_squash WHERE id IN ($1) | 1
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C" | 1
|
||||
(4 rows)
|
||||
query | calls
|
||||
---------------------------------------------------------------------------+-------
|
||||
SELECT * FROM test_squash WHERE id IN ($1, $2, $3, $4, $5) | 1
|
||||
SELECT * FROM test_squash WHERE id::text = ANY(ARRAY[$1, $2, $3, $4, $5]) | 1
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
|
||||
(3 rows)
|
||||
|
||||
-- neither are prepared statements
|
||||
-- the IN and ARRAY forms of this statement will have the same queryId
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
t
|
||||
---
|
||||
t
|
||||
(1 row)
|
||||
|
||||
PREPARE p1(int, int, int, int, int) AS
|
||||
SELECT * FROM test_squash WHERE id IN ($1, $2, $3, $4, $5);
|
||||
EXECUTE p1(1, 2, 3, 4, 5);
|
||||
id | data
|
||||
----+------
|
||||
(0 rows)
|
||||
|
||||
DEALLOCATE p1;
|
||||
PREPARE p1(int, int, int, int, int) AS
|
||||
SELECT * FROM test_squash WHERE id = ANY(ARRAY[$1, $2, $3, $4, $5]);
|
||||
EXECUTE p1(1, 2, 3, 4, 5);
|
||||
id | data
|
||||
----+------
|
||||
(0 rows)
|
||||
|
||||
DEALLOCATE p1;
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
query | calls
|
||||
------------------------------------------------------------+-------
|
||||
DEALLOCATE $1 | 2
|
||||
PREPARE p1(int, int, int, int, int) AS +| 2
|
||||
SELECT * FROM test_squash WHERE id IN ($1, $2, $3, $4, $5) |
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
|
||||
(3 rows)
|
||||
|
||||
-- More conditions in the query
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
@ -75,10 +185,25 @@ SELECT * FROM test_squash WHERE id IN (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11) AND da
|
||||
----+------
|
||||
(0 rows)
|
||||
|
||||
SELECT * FROM test_squash WHERE id = ANY (ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9]) AND data = 2;
|
||||
id | data
|
||||
----+------
|
||||
(0 rows)
|
||||
|
||||
SELECT * FROM test_squash WHERE id = ANY (ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) AND data = 2;
|
||||
id | data
|
||||
----+------
|
||||
(0 rows)
|
||||
|
||||
SELECT * FROM test_squash WHERE id = ANY (ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) AND data = 2;
|
||||
id | data
|
||||
----+------
|
||||
(0 rows)
|
||||
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
query | calls
|
||||
---------------------------------------------------------------------+-------
|
||||
SELECT * FROM test_squash WHERE id IN ($1 /*, ... */) AND data = $2 | 3
|
||||
SELECT * FROM test_squash WHERE id IN ($1 /*, ... */) AND data = $2 | 6
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
|
||||
(2 rows)
|
||||
|
||||
@ -107,24 +232,46 @@ SELECT * FROM test_squash WHERE id IN (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11)
|
||||
----+------
|
||||
(0 rows)
|
||||
|
||||
SELECT * FROM test_squash WHERE id = ANY (ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9])
|
||||
AND data = ANY (ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9]);
|
||||
id | data
|
||||
----+------
|
||||
(0 rows)
|
||||
|
||||
SELECT * FROM test_squash WHERE id = ANY (ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
|
||||
AND data = ANY (ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
|
||||
id | data
|
||||
----+------
|
||||
(0 rows)
|
||||
|
||||
SELECT * FROM test_squash WHERE id = ANY (ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
|
||||
AND data = ANY (ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]);
|
||||
id | data
|
||||
----+------
|
||||
(0 rows)
|
||||
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
query | calls
|
||||
-------------------------------------------------------+-------
|
||||
SELECT * FROM test_squash WHERE id IN ($1 /*, ... */)+| 3
|
||||
SELECT * FROM test_squash WHERE id IN ($1 /*, ... */)+| 6
|
||||
AND data IN ($2 /*, ... */) |
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
|
||||
(2 rows)
|
||||
|
||||
-- No constants simplification for OpExpr
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
t
|
||||
---
|
||||
t
|
||||
(1 row)
|
||||
|
||||
-- In the following two queries the operator expressions (+) and (@) have
|
||||
-- different oppno, and will be given different query_id if squashed, even though
|
||||
-- the normalized query will be the same
|
||||
-- No constants squashing for OpExpr
|
||||
-- The IN and ARRAY forms of this statement will have the same queryId
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
t
|
||||
---
|
||||
t
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM test_squash WHERE id IN
|
||||
(1 + 1, 2 + 2, 3 + 3, 4 + 4, 5 + 5, 6 + 6, 7 + 7, 8 + 8, 9 + 9);
|
||||
id | data
|
||||
@ -137,19 +284,35 @@ SELECT * FROM test_squash WHERE id IN
|
||||
----+------
|
||||
(0 rows)
|
||||
|
||||
SELECT * FROM test_squash WHERE id = ANY(ARRAY
|
||||
[1 + 1, 2 + 2, 3 + 3, 4 + 4, 5 + 5, 6 + 6, 7 + 7, 8 + 8, 9 + 9]);
|
||||
id | data
|
||||
----+------
|
||||
(0 rows)
|
||||
|
||||
SELECT * FROM test_squash WHERE id = ANY(ARRAY
|
||||
[@ '-1', @ '-2', @ '-3', @ '-4', @ '-5', @ '-6', @ '-7', @ '-8', @ '-9']);
|
||||
id | data
|
||||
----+------
|
||||
(0 rows)
|
||||
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
query | calls
|
||||
----------------------------------------------------------------------------------------------------+-------
|
||||
SELECT * FROM test_squash WHERE id IN +| 1
|
||||
SELECT * FROM test_squash WHERE id IN +| 2
|
||||
($1 + $2, $3 + $4, $5 + $6, $7 + $8, $9 + $10, $11 + $12, $13 + $14, $15 + $16, $17 + $18) |
|
||||
SELECT * FROM test_squash WHERE id IN +| 1
|
||||
SELECT * FROM test_squash WHERE id IN +| 2
|
||||
(@ $1, @ $2, @ $3, @ $4, @ $5, @ $6, @ $7, @ $8, @ $9) |
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
|
||||
(3 rows)
|
||||
|
||||
--
|
||||
-- FuncExpr
|
||||
--
|
||||
-- Verify multiple type representation end up with the same query_id
|
||||
CREATE TABLE test_float (data float);
|
||||
-- The casted ARRAY expressions will have the same queryId as the IN clause
|
||||
-- form of the query
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
t
|
||||
---
|
||||
@ -181,12 +344,38 @@ SELECT data FROM test_float WHERE data IN (1.0, 1.0);
|
||||
------
|
||||
(0 rows)
|
||||
|
||||
SELECT data FROM test_float WHERE data = ANY(ARRAY['1'::double precision, '2'::double precision]);
|
||||
data
|
||||
------
|
||||
(0 rows)
|
||||
|
||||
SELECT data FROM test_float WHERE data = ANY(ARRAY[1.0::double precision, 1.0::double precision]);
|
||||
data
|
||||
------
|
||||
(0 rows)
|
||||
|
||||
SELECT data FROM test_float WHERE data = ANY(ARRAY[1, 2]);
|
||||
data
|
||||
------
|
||||
(0 rows)
|
||||
|
||||
SELECT data FROM test_float WHERE data = ANY(ARRAY[1, '2']);
|
||||
data
|
||||
------
|
||||
(0 rows)
|
||||
|
||||
SELECT data FROM test_float WHERE data = ANY(ARRAY['1', 2]);
|
||||
data
|
||||
------
|
||||
(0 rows)
|
||||
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
query | calls
|
||||
-----------------------------------------------------------+-------
|
||||
SELECT data FROM test_float WHERE data IN ($1 /*, ... */) | 5
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
|
||||
(2 rows)
|
||||
query | calls
|
||||
--------------------------------------------------------------------+-------
|
||||
SELECT data FROM test_float WHERE data = ANY(ARRAY[$1 /*, ... */]) | 3
|
||||
SELECT data FROM test_float WHERE data IN ($1 /*, ... */) | 7
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
|
||||
(3 rows)
|
||||
|
||||
-- Numeric type, implicit cast is squashed
|
||||
CREATE TABLE test_squash_numeric (id int, data numeric(5, 2));
|
||||
@ -201,12 +390,18 @@ SELECT * FROM test_squash_numeric WHERE data IN (1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
|
||||
----+------
|
||||
(0 rows)
|
||||
|
||||
SELECT * FROM test_squash_numeric WHERE data = ANY(ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]);
|
||||
id | data
|
||||
----+------
|
||||
(0 rows)
|
||||
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
query | calls
|
||||
-----------------------------------------------------------------+-------
|
||||
SELECT * FROM test_squash_numeric WHERE data IN ($1 /*, ... */) | 1
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
|
||||
(2 rows)
|
||||
query | calls
|
||||
--------------------------------------------------------------------------+-------
|
||||
SELECT * FROM test_squash_numeric WHERE data = ANY(ARRAY[$1 /*, ... */]) | 1
|
||||
SELECT * FROM test_squash_numeric WHERE data IN ($1 /*, ... */) | 1
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
|
||||
(3 rows)
|
||||
|
||||
-- Bigint, implicit cast is squashed
|
||||
CREATE TABLE test_squash_bigint (id int, data bigint);
|
||||
@ -221,14 +416,20 @@ SELECT * FROM test_squash_bigint WHERE data IN (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1
|
||||
----+------
|
||||
(0 rows)
|
||||
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
query | calls
|
||||
----------------------------------------------------------------+-------
|
||||
SELECT * FROM test_squash_bigint WHERE data IN ($1 /*, ... */) | 1
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
|
||||
(2 rows)
|
||||
SELECT * FROM test_squash_bigint WHERE data = ANY(ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]);
|
||||
id | data
|
||||
----+------
|
||||
(0 rows)
|
||||
|
||||
-- Bigint, explicit cast is not squashed
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
query | calls
|
||||
-------------------------------------------------------------------------+-------
|
||||
SELECT * FROM test_squash_bigint WHERE data = ANY(ARRAY[$1 /*, ... */]) | 1
|
||||
SELECT * FROM test_squash_bigint WHERE data IN ($1 /*, ... */) | 1
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
|
||||
(3 rows)
|
||||
|
||||
-- Bigint, explicit cast is squashed
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
t
|
||||
---
|
||||
@ -242,15 +443,22 @@ SELECT * FROM test_squash_bigint WHERE data IN
|
||||
----+------
|
||||
(0 rows)
|
||||
|
||||
SELECT * FROM test_squash_bigint WHERE data = ANY(ARRAY[
|
||||
1::bigint, 2::bigint, 3::bigint, 4::bigint, 5::bigint, 6::bigint,
|
||||
7::bigint, 8::bigint, 9::bigint, 10::bigint, 11::bigint]);
|
||||
id | data
|
||||
----+------
|
||||
(0 rows)
|
||||
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
query | calls
|
||||
----------------------------------------------------+-------
|
||||
SELECT * FROM test_squash_bigint WHERE data IN +| 1
|
||||
($1 /*, ... */::bigint) |
|
||||
SELECT * FROM test_squash_bigint WHERE data IN +| 2
|
||||
($1 /*, ... */) |
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
|
||||
(2 rows)
|
||||
|
||||
-- Bigint, long tokens with parenthesis
|
||||
-- Bigint, long tokens with parenthesis, will not squash
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
t
|
||||
---
|
||||
@ -264,44 +472,47 @@ SELECT * FROM test_squash_bigint WHERE id IN
|
||||
----+------
|
||||
(0 rows)
|
||||
|
||||
SELECT * FROM test_squash_bigint WHERE id = ANY(ARRAY[
|
||||
abs(100), abs(200), abs(300), abs(400), abs(500), abs(600), abs(700),
|
||||
abs(800), abs(900), abs(1000), ((abs(1100)))]);
|
||||
id | data
|
||||
----+------
|
||||
(0 rows)
|
||||
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
query | calls
|
||||
-------------------------------------------------------------------------+-------
|
||||
SELECT * FROM test_squash_bigint WHERE id IN +| 1
|
||||
SELECT * FROM test_squash_bigint WHERE id IN +| 2
|
||||
(abs($1), abs($2), abs($3), abs($4), abs($5), abs($6), abs($7),+|
|
||||
abs($8), abs($9), abs($10), ((abs($11)))) |
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
|
||||
(2 rows)
|
||||
|
||||
-- CoerceViaIO, SubLink instead of a Const
|
||||
CREATE TABLE test_squash_jsonb (id int, data jsonb);
|
||||
-- Multiple FuncExpr's. Will not squash
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
t
|
||||
---
|
||||
t
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM test_squash_jsonb WHERE data IN
|
||||
((SELECT '"1"')::jsonb, (SELECT '"2"')::jsonb, (SELECT '"3"')::jsonb,
|
||||
(SELECT '"4"')::jsonb, (SELECT '"5"')::jsonb, (SELECT '"6"')::jsonb,
|
||||
(SELECT '"7"')::jsonb, (SELECT '"8"')::jsonb, (SELECT '"9"')::jsonb,
|
||||
(SELECT '"10"')::jsonb);
|
||||
id | data
|
||||
----+------
|
||||
(0 rows)
|
||||
SELECT WHERE 1 IN (1::int::bigint::int, 2::int::bigint::int);
|
||||
--
|
||||
(1 row)
|
||||
|
||||
SELECT WHERE 1 = ANY(ARRAY[1::int::bigint::int, 2::int::bigint::int]);
|
||||
--
|
||||
(1 row)
|
||||
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
query | calls
|
||||
----------------------------------------------------------------------+-------
|
||||
SELECT * FROM test_squash_jsonb WHERE data IN +| 1
|
||||
((SELECT $1)::jsonb, (SELECT $2)::jsonb, (SELECT $3)::jsonb,+|
|
||||
(SELECT $4)::jsonb, (SELECT $5)::jsonb, (SELECT $6)::jsonb,+|
|
||||
(SELECT $7)::jsonb, (SELECT $8)::jsonb, (SELECT $9)::jsonb,+|
|
||||
(SELECT $10)::jsonb) |
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
|
||||
query | calls
|
||||
----------------------------------------------------+-------
|
||||
SELECT WHERE $1 IN ($2 /*, ... */) | 2
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
|
||||
(2 rows)
|
||||
|
||||
--
|
||||
-- CoerceViaIO
|
||||
--
|
||||
-- Create some dummy type to force CoerceViaIO
|
||||
CREATE TYPE casttesttype;
|
||||
CREATE FUNCTION casttesttype_in(cstring)
|
||||
@ -349,15 +560,25 @@ SELECT * FROM test_squash_cast WHERE data IN
|
||||
----+------
|
||||
(0 rows)
|
||||
|
||||
SELECT * FROM test_squash_cast WHERE data = ANY (ARRAY
|
||||
[1::int4::casttesttype, 2::int4::casttesttype, 3::int4::casttesttype,
|
||||
4::int4::casttesttype, 5::int4::casttesttype, 6::int4::casttesttype,
|
||||
7::int4::casttesttype, 8::int4::casttesttype, 9::int4::casttesttype,
|
||||
10::int4::casttesttype, 11::int4::casttesttype]);
|
||||
id | data
|
||||
----+------
|
||||
(0 rows)
|
||||
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
query | calls
|
||||
----------------------------------------------------+-------
|
||||
SELECT * FROM test_squash_cast WHERE data IN +| 1
|
||||
($1 /*, ... */::int4::casttesttype) |
|
||||
SELECT * FROM test_squash_cast WHERE data IN +| 2
|
||||
($1 /*, ... */) |
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
|
||||
(2 rows)
|
||||
|
||||
-- Some casting expression are simplified to Const
|
||||
CREATE TABLE test_squash_jsonb (id int, data jsonb);
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
t
|
||||
---
|
||||
@ -366,8 +587,16 @@ SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
|
||||
SELECT * FROM test_squash_jsonb WHERE data IN
|
||||
(('"1"')::jsonb, ('"2"')::jsonb, ('"3"')::jsonb, ('"4"')::jsonb,
|
||||
( '"5"')::jsonb, ( '"6"')::jsonb, ( '"7"')::jsonb, ( '"8"')::jsonb,
|
||||
( '"9"')::jsonb, ( '"10"')::jsonb);
|
||||
('"5"')::jsonb, ('"6"')::jsonb, ('"7"')::jsonb, ('"8"')::jsonb,
|
||||
('"9"')::jsonb, ('"10"')::jsonb);
|
||||
id | data
|
||||
----+------
|
||||
(0 rows)
|
||||
|
||||
SELECT * FROM test_squash_jsonb WHERE data = ANY (ARRAY
|
||||
[('"1"')::jsonb, ('"2"')::jsonb, ('"3"')::jsonb, ('"4"')::jsonb,
|
||||
('"5"')::jsonb, ('"6"')::jsonb, ('"7"')::jsonb, ('"8"')::jsonb,
|
||||
('"9"')::jsonb, ('"10"')::jsonb]);
|
||||
id | data
|
||||
----+------
|
||||
(0 rows)
|
||||
@ -375,28 +604,144 @@ SELECT * FROM test_squash_jsonb WHERE data IN
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
query | calls
|
||||
----------------------------------------------------+-------
|
||||
SELECT * FROM test_squash_jsonb WHERE data IN +| 1
|
||||
(($1 /*, ... */)::jsonb) |
|
||||
SELECT * FROM test_squash_jsonb WHERE data IN +| 2
|
||||
($1 /*, ... */) |
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
|
||||
(2 rows)
|
||||
|
||||
-- RelabelType
|
||||
-- CoerceViaIO, SubLink instead of a Const. Will not squash
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
t
|
||||
---
|
||||
t
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM test_squash WHERE id IN (1::oid, 2::oid, 3::oid, 4::oid, 5::oid, 6::oid, 7::oid, 8::oid, 9::oid);
|
||||
SELECT * FROM test_squash_jsonb WHERE data IN
|
||||
((SELECT '"1"')::jsonb, (SELECT '"2"')::jsonb, (SELECT '"3"')::jsonb,
|
||||
(SELECT '"4"')::jsonb, (SELECT '"5"')::jsonb, (SELECT '"6"')::jsonb,
|
||||
(SELECT '"7"')::jsonb, (SELECT '"8"')::jsonb, (SELECT '"9"')::jsonb,
|
||||
(SELECT '"10"')::jsonb);
|
||||
id | data
|
||||
----+------
|
||||
(0 rows)
|
||||
|
||||
SELECT * FROM test_squash_jsonb WHERE data = ANY(ARRAY
|
||||
[(SELECT '"1"')::jsonb, (SELECT '"2"')::jsonb, (SELECT '"3"')::jsonb,
|
||||
(SELECT '"4"')::jsonb, (SELECT '"5"')::jsonb, (SELECT '"6"')::jsonb,
|
||||
(SELECT '"7"')::jsonb, (SELECT '"8"')::jsonb, (SELECT '"9"')::jsonb,
|
||||
(SELECT '"10"')::jsonb]);
|
||||
id | data
|
||||
----+------
|
||||
(0 rows)
|
||||
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
query | calls
|
||||
------------------------------------------------------------+-------
|
||||
SELECT * FROM test_squash WHERE id IN ($1 /*, ... */::oid) | 1
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
|
||||
query | calls
|
||||
----------------------------------------------------------------------+-------
|
||||
SELECT * FROM test_squash_jsonb WHERE data IN +| 2
|
||||
((SELECT $1)::jsonb, (SELECT $2)::jsonb, (SELECT $3)::jsonb,+|
|
||||
(SELECT $4)::jsonb, (SELECT $5)::jsonb, (SELECT $6)::jsonb,+|
|
||||
(SELECT $7)::jsonb, (SELECT $8)::jsonb, (SELECT $9)::jsonb,+|
|
||||
(SELECT $10)::jsonb) |
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
|
||||
(2 rows)
|
||||
|
||||
-- Multiple CoerceViaIO wrapping a constant. Will not squash
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
t
|
||||
---
|
||||
t
|
||||
(1 row)
|
||||
|
||||
SELECT WHERE 1 IN (1::text::int::text::int, 1::text::int::text::int);
|
||||
--
|
||||
(1 row)
|
||||
|
||||
SELECT WHERE 1 = ANY(ARRAY[1::text::int::text::int, 1::text::int::text::int]);
|
||||
--
|
||||
(1 row)
|
||||
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
query | calls
|
||||
-------------------------------------------------------------------------+-------
|
||||
SELECT WHERE $1 IN ($2::text::int::text::int, $3::text::int::text::int) | 2
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
|
||||
(2 rows)
|
||||
|
||||
--
|
||||
-- RelabelType
|
||||
--
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
t
|
||||
---
|
||||
t
|
||||
(1 row)
|
||||
|
||||
-- if there is only one level of RelabelType, the list will be squashable
|
||||
SELECT * FROM test_squash WHERE id IN
|
||||
(1::oid, 2::oid, 3::oid, 4::oid, 5::oid, 6::oid, 7::oid, 8::oid, 9::oid);
|
||||
id | data
|
||||
----+------
|
||||
(0 rows)
|
||||
|
||||
SELECT ARRAY[1::oid, 2::oid, 3::oid, 4::oid, 5::oid, 6::oid, 7::oid, 8::oid, 9::oid];
|
||||
array
|
||||
---------------------
|
||||
{1,2,3,4,5,6,7,8,9}
|
||||
(1 row)
|
||||
|
||||
-- if there is at least one element with multiple levels of RelabelType,
|
||||
-- the list will not be squashable
|
||||
SELECT * FROM test_squash WHERE id IN (1::oid, 2::oid::int::oid);
|
||||
id | data
|
||||
----+------
|
||||
(0 rows)
|
||||
|
||||
SELECT * FROM test_squash WHERE id = ANY(ARRAY[1::oid, 2::oid::int::oid]);
|
||||
id | data
|
||||
----+------
|
||||
(0 rows)
|
||||
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
query | calls
|
||||
--------------------------------------------------------------------+-------
|
||||
SELECT * FROM test_squash WHERE id IN +| 1
|
||||
($1 /*, ... */) |
|
||||
SELECT * FROM test_squash WHERE id IN ($1::oid, $2::oid::int::oid) | 2
|
||||
SELECT ARRAY[$1 /*, ... */] | 1
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
|
||||
(4 rows)
|
||||
|
||||
--
|
||||
-- edge cases
|
||||
--
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
t
|
||||
---
|
||||
t
|
||||
(1 row)
|
||||
|
||||
-- for nested arrays, only constants are squashed
|
||||
SELECT ARRAY[
|
||||
ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
|
||||
ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
|
||||
ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
|
||||
ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
|
||||
];
|
||||
array
|
||||
-----------------------------------------------------------------------------------------------
|
||||
{{1,2,3,4,5,6,7,8,9,10},{1,2,3,4,5,6,7,8,9,10},{1,2,3,4,5,6,7,8,9,10},{1,2,3,4,5,6,7,8,9,10}}
|
||||
(1 row)
|
||||
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
query | calls
|
||||
----------------------------------------------------+-------
|
||||
SELECT ARRAY[ +| 1
|
||||
ARRAY[$1 /*, ... */], +|
|
||||
ARRAY[$2 /*, ... */], +|
|
||||
ARRAY[$3 /*, ... */], +|
|
||||
ARRAY[$4 /*, ... */] +|
|
||||
] |
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
|
||||
(2 rows)
|
||||
|
||||
-- Test constants evaluation in a CTE, which was causing issues in the past
|
||||
@ -409,23 +754,59 @@ FROM cte;
|
||||
--------
|
||||
(0 rows)
|
||||
|
||||
-- Simple array would be squashed as well
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
t
|
||||
---
|
||||
t
|
||||
(1 row)
|
||||
|
||||
SELECT ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
|
||||
array
|
||||
------------------------
|
||||
{1,2,3,4,5,6,7,8,9,10}
|
||||
-- Rewritten as an OpExpr, so it will not be squashed
|
||||
select where '1' IN ('1'::int, '2'::int::text);
|
||||
--
|
||||
(1 row)
|
||||
|
||||
-- Rewritten as an ArrayExpr, so it will be squashed
|
||||
select where '1' IN ('1'::int, '2'::int);
|
||||
--
|
||||
(1 row)
|
||||
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
query | calls
|
||||
----------------------------------------------------+-------
|
||||
SELECT ARRAY[$1 /*, ... */] | 1
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
|
||||
select where $1 IN ($2 /*, ... */) | 1
|
||||
select where $1 IN ($2::int, $3::int::text) | 1
|
||||
(3 rows)
|
||||
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
t
|
||||
---
|
||||
t
|
||||
(1 row)
|
||||
|
||||
-- Both of these queries will be rewritten as an ArrayExpr, so they
|
||||
-- will be squashed, and have a similar queryId
|
||||
select where '1' IN ('1'::int::text, '2'::int::text);
|
||||
--
|
||||
(1 row)
|
||||
|
||||
select where '1' = ANY (array['1'::int::text, '2'::int::text]);
|
||||
--
|
||||
(1 row)
|
||||
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
query | calls
|
||||
----------------------------------------------------+-------
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
|
||||
select where $1 IN ($2 /*, ... */) | 2
|
||||
(2 rows)
|
||||
|
||||
--
|
||||
-- cleanup
|
||||
--
|
||||
DROP TABLE test_squash;
|
||||
DROP TABLE test_float;
|
||||
DROP TABLE test_squash_numeric;
|
||||
DROP TABLE test_squash_bigint;
|
||||
DROP TABLE test_squash_cast CASCADE;
|
||||
DROP TABLE test_squash_jsonb;
|
||||
|
@ -540,7 +540,7 @@ SELECT calls, rows, query FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
-------+------+----------------------------------------------------
|
||||
2 | 0 | DEALLOCATE $1
|
||||
2 | 0 | DEALLOCATE ALL
|
||||
2 | 2 | SELECT $1 AS a
|
||||
2 | 2 | PREPARE stat_select AS SELECT $1 AS a
|
||||
1 | 1 | SELECT $1 as a
|
||||
1 | 1 | SELECT pg_stat_statements_reset() IS NOT NULL AS t
|
||||
(5 rows)
|
||||
|
@ -144,7 +144,7 @@ typedef struct pgssHashKey
|
||||
{
|
||||
Oid userid; /* user OID */
|
||||
Oid dbid; /* database OID */
|
||||
uint64 queryid; /* query identifier */
|
||||
int64 queryid; /* query identifier */
|
||||
bool toplevel; /* query executed at top level */
|
||||
} pgssHashKey;
|
||||
|
||||
@ -335,7 +335,7 @@ static PlannedStmt *pgss_planner(Query *parse,
|
||||
const char *query_string,
|
||||
int cursorOptions,
|
||||
ParamListInfo boundParams);
|
||||
static bool pgss_ExecutorStart(QueryDesc *queryDesc, int eflags);
|
||||
static void pgss_ExecutorStart(QueryDesc *queryDesc, int eflags);
|
||||
static void pgss_ExecutorRun(QueryDesc *queryDesc,
|
||||
ScanDirection direction,
|
||||
uint64 count);
|
||||
@ -346,7 +346,7 @@ static void pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString,
|
||||
ProcessUtilityContext context, ParamListInfo params,
|
||||
QueryEnvironment *queryEnv,
|
||||
DestReceiver *dest, QueryCompletion *qc);
|
||||
static void pgss_store(const char *query, uint64 queryId,
|
||||
static void pgss_store(const char *query, int64 queryId,
|
||||
int query_location, int query_len,
|
||||
pgssStoreKind kind,
|
||||
double total_time, uint64 rows,
|
||||
@ -370,7 +370,7 @@ static char *qtext_fetch(Size query_offset, int query_len,
|
||||
char *buffer, Size buffer_size);
|
||||
static bool need_gc_qtexts(void);
|
||||
static void gc_qtexts(void);
|
||||
static TimestampTz entry_reset(Oid userid, Oid dbid, uint64 queryid, bool minmax_only);
|
||||
static TimestampTz entry_reset(Oid userid, Oid dbid, int64 queryid, bool minmax_only);
|
||||
static char *generate_normalized_query(JumbleState *jstate, const char *query,
|
||||
int query_loc, int *query_len_p);
|
||||
static void fill_in_constant_lengths(JumbleState *jstate, const char *query,
|
||||
@ -852,7 +852,7 @@ pgss_post_parse_analyze(ParseState *pstate, Query *query, JumbleState *jstate)
|
||||
{
|
||||
if (pgss_track_utility && IsA(query->utilityStmt, ExecuteStmt))
|
||||
{
|
||||
query->queryId = UINT64CONST(0);
|
||||
query->queryId = INT64CONST(0);
|
||||
return;
|
||||
}
|
||||
}
|
||||
@ -899,7 +899,7 @@ pgss_planner(Query *parse,
|
||||
*/
|
||||
if (pgss_enabled(nesting_level)
|
||||
&& pgss_track_planning && query_string
|
||||
&& parse->queryId != UINT64CONST(0))
|
||||
&& parse->queryId != INT64CONST(0))
|
||||
{
|
||||
instr_time start;
|
||||
instr_time duration;
|
||||
@ -989,26 +989,20 @@ pgss_planner(Query *parse,
|
||||
/*
|
||||
* ExecutorStart hook: start up tracking if needed
|
||||
*/
|
||||
static bool
|
||||
static void
|
||||
pgss_ExecutorStart(QueryDesc *queryDesc, int eflags)
|
||||
{
|
||||
bool plan_valid;
|
||||
|
||||
if (prev_ExecutorStart)
|
||||
plan_valid = prev_ExecutorStart(queryDesc, eflags);
|
||||
prev_ExecutorStart(queryDesc, eflags);
|
||||
else
|
||||
plan_valid = standard_ExecutorStart(queryDesc, eflags);
|
||||
|
||||
/* The plan may have become invalid during standard_ExecutorStart() */
|
||||
if (!plan_valid)
|
||||
return false;
|
||||
standard_ExecutorStart(queryDesc, eflags);
|
||||
|
||||
/*
|
||||
* If query has queryId zero, don't track it. This prevents double
|
||||
* counting of optimizable statements that are directly contained in
|
||||
* utility statements.
|
||||
*/
|
||||
if (pgss_enabled(nesting_level) && queryDesc->plannedstmt->queryId != UINT64CONST(0))
|
||||
if (pgss_enabled(nesting_level) && queryDesc->plannedstmt->queryId != INT64CONST(0))
|
||||
{
|
||||
/*
|
||||
* Set up to track total elapsed time in ExecutorRun. Make sure the
|
||||
@ -1024,8 +1018,6 @@ pgss_ExecutorStart(QueryDesc *queryDesc, int eflags)
|
||||
MemoryContextSwitchTo(oldcxt);
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1076,9 +1068,9 @@ pgss_ExecutorFinish(QueryDesc *queryDesc)
|
||||
static void
|
||||
pgss_ExecutorEnd(QueryDesc *queryDesc)
|
||||
{
|
||||
uint64 queryId = queryDesc->plannedstmt->queryId;
|
||||
int64 queryId = queryDesc->plannedstmt->queryId;
|
||||
|
||||
if (queryId != UINT64CONST(0) && queryDesc->totaltime &&
|
||||
if (queryId != INT64CONST(0) && queryDesc->totaltime &&
|
||||
pgss_enabled(nesting_level))
|
||||
{
|
||||
/*
|
||||
@ -1119,7 +1111,7 @@ pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString,
|
||||
DestReceiver *dest, QueryCompletion *qc)
|
||||
{
|
||||
Node *parsetree = pstmt->utilityStmt;
|
||||
uint64 saved_queryId = pstmt->queryId;
|
||||
int64 saved_queryId = pstmt->queryId;
|
||||
int saved_stmt_location = pstmt->stmt_location;
|
||||
int saved_stmt_len = pstmt->stmt_len;
|
||||
bool enabled = pgss_track_utility && pgss_enabled(nesting_level);
|
||||
@ -1139,7 +1131,7 @@ pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString,
|
||||
* only.
|
||||
*/
|
||||
if (enabled)
|
||||
pstmt->queryId = UINT64CONST(0);
|
||||
pstmt->queryId = INT64CONST(0);
|
||||
|
||||
/*
|
||||
* If it's an EXECUTE statement, we don't track it and don't increment the
|
||||
@ -1286,7 +1278,7 @@ pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString,
|
||||
* for the arrays in the Counters field.
|
||||
*/
|
||||
static void
|
||||
pgss_store(const char *query, uint64 queryId,
|
||||
pgss_store(const char *query, int64 queryId,
|
||||
int query_location, int query_len,
|
||||
pgssStoreKind kind,
|
||||
double total_time, uint64 rows,
|
||||
@ -1312,7 +1304,7 @@ pgss_store(const char *query, uint64 queryId,
|
||||
* Nothing to do if compute_query_id isn't enabled and no other module
|
||||
* computed a query identifier.
|
||||
*/
|
||||
if (queryId == UINT64CONST(0))
|
||||
if (queryId == INT64CONST(0))
|
||||
return;
|
||||
|
||||
/*
|
||||
@ -1522,11 +1514,11 @@ pg_stat_statements_reset_1_7(PG_FUNCTION_ARGS)
|
||||
{
|
||||
Oid userid;
|
||||
Oid dbid;
|
||||
uint64 queryid;
|
||||
int64 queryid;
|
||||
|
||||
userid = PG_GETARG_OID(0);
|
||||
dbid = PG_GETARG_OID(1);
|
||||
queryid = (uint64) PG_GETARG_INT64(2);
|
||||
queryid = PG_GETARG_INT64(2);
|
||||
|
||||
entry_reset(userid, dbid, queryid, false);
|
||||
|
||||
@ -1538,12 +1530,12 @@ pg_stat_statements_reset_1_11(PG_FUNCTION_ARGS)
|
||||
{
|
||||
Oid userid;
|
||||
Oid dbid;
|
||||
uint64 queryid;
|
||||
int64 queryid;
|
||||
bool minmax_only;
|
||||
|
||||
userid = PG_GETARG_OID(0);
|
||||
dbid = PG_GETARG_OID(1);
|
||||
queryid = (uint64) PG_GETARG_INT64(2);
|
||||
queryid = PG_GETARG_INT64(2);
|
||||
minmax_only = PG_GETARG_BOOL(3);
|
||||
|
||||
PG_RETURN_TIMESTAMPTZ(entry_reset(userid, dbid, queryid, minmax_only));
|
||||
@ -2679,7 +2671,7 @@ if (e) { \
|
||||
* Reset entries corresponding to parameters passed.
|
||||
*/
|
||||
static TimestampTz
|
||||
entry_reset(Oid userid, Oid dbid, uint64 queryid, bool minmax_only)
|
||||
entry_reset(Oid userid, Oid dbid, int64 queryid, bool minmax_only)
|
||||
{
|
||||
HASH_SEQ_STATUS hash_seq;
|
||||
pgssEntry *entry;
|
||||
@ -2699,7 +2691,7 @@ entry_reset(Oid userid, Oid dbid, uint64 queryid, bool minmax_only)
|
||||
|
||||
stats_reset = GetCurrentTimestamp();
|
||||
|
||||
if (userid != 0 && dbid != 0 && queryid != UINT64CONST(0))
|
||||
if (userid != 0 && dbid != 0 && queryid != INT64CONST(0))
|
||||
{
|
||||
/* If all the parameters are available, use the fast path. */
|
||||
memset(&key, 0, sizeof(pgssHashKey));
|
||||
@ -2722,7 +2714,7 @@ entry_reset(Oid userid, Oid dbid, uint64 queryid, bool minmax_only)
|
||||
|
||||
SINGLE_ENTRY_RESET(entry);
|
||||
}
|
||||
else if (userid != 0 || dbid != 0 || queryid != UINT64CONST(0))
|
||||
else if (userid != 0 || dbid != 0 || queryid != INT64CONST(0))
|
||||
{
|
||||
/* Reset entries corresponding to valid parameters. */
|
||||
hash_seq_init(&hash_seq, pgss_hash);
|
||||
@ -2818,17 +2810,13 @@ generate_normalized_query(JumbleState *jstate, const char *query,
|
||||
{
|
||||
char *norm_query;
|
||||
int query_len = *query_len_p;
|
||||
int i,
|
||||
norm_query_buflen, /* Space allowed for norm_query */
|
||||
int norm_query_buflen, /* Space allowed for norm_query */
|
||||
len_to_wrt, /* Length (in bytes) to write */
|
||||
quer_loc = 0, /* Source query byte location */
|
||||
n_quer_loc = 0, /* Normalized query byte location */
|
||||
last_off = 0, /* Offset from start for previous tok */
|
||||
last_tok_len = 0; /* Length (in bytes) of that tok */
|
||||
bool in_squashed = false; /* in a run of squashed consts? */
|
||||
int skipped_constants = 0; /* Position adjustment of later
|
||||
* constants after squashed ones */
|
||||
|
||||
int num_constants_replaced = 0;
|
||||
|
||||
/*
|
||||
* Get constants' lengths (core system only gives us locations). Note
|
||||
@ -2842,16 +2830,13 @@ generate_normalized_query(JumbleState *jstate, const char *query,
|
||||
* certainly isn't more than 11 bytes, even if n reaches INT_MAX. We
|
||||
* could refine that limit based on the max value of n for the current
|
||||
* query, but it hardly seems worth any extra effort to do so.
|
||||
*
|
||||
* Note this also gives enough room for the commented-out ", ..." list
|
||||
* syntax used by constant squashing.
|
||||
*/
|
||||
norm_query_buflen = query_len + jstate->clocations_count * 10;
|
||||
|
||||
/* Allocate result buffer */
|
||||
norm_query = palloc(norm_query_buflen + 1);
|
||||
|
||||
for (i = 0; i < jstate->clocations_count; i++)
|
||||
for (int i = 0; i < jstate->clocations_count; i++)
|
||||
{
|
||||
int off, /* Offset from start for cur tok */
|
||||
tok_len; /* Length (in bytes) of that tok */
|
||||
@ -2866,67 +2851,24 @@ generate_normalized_query(JumbleState *jstate, const char *query,
|
||||
if (tok_len < 0)
|
||||
continue; /* ignore any duplicates */
|
||||
|
||||
/* Copy next chunk (what precedes the next constant) */
|
||||
len_to_wrt = off - last_off;
|
||||
len_to_wrt -= last_tok_len;
|
||||
Assert(len_to_wrt >= 0);
|
||||
memcpy(norm_query + n_quer_loc, query + quer_loc, len_to_wrt);
|
||||
n_quer_loc += len_to_wrt;
|
||||
|
||||
/*
|
||||
* What to do next depends on whether we're squashing constant lists,
|
||||
* and whether we're already in a run of such constants.
|
||||
* And insert a param symbol in place of the constant token; and, if
|
||||
* we have a squashable list, insert a placeholder comment starting
|
||||
* from the list's second value.
|
||||
*/
|
||||
if (!jstate->clocations[i].squashed)
|
||||
{
|
||||
/*
|
||||
* This location corresponds to a constant not to be squashed.
|
||||
* Print what comes before the constant ...
|
||||
*/
|
||||
len_to_wrt = off - last_off;
|
||||
len_to_wrt -= last_tok_len;
|
||||
n_quer_loc += sprintf(norm_query + n_quer_loc, "$%d%s",
|
||||
num_constants_replaced + 1 + jstate->highest_extern_param_id,
|
||||
jstate->clocations[i].squashed ? " /*, ... */" : "");
|
||||
num_constants_replaced++;
|
||||
|
||||
Assert(len_to_wrt >= 0);
|
||||
|
||||
memcpy(norm_query + n_quer_loc, query + quer_loc, len_to_wrt);
|
||||
n_quer_loc += len_to_wrt;
|
||||
|
||||
/* ... and then a param symbol replacing the constant itself */
|
||||
n_quer_loc += sprintf(norm_query + n_quer_loc, "$%d",
|
||||
i + 1 + jstate->highest_extern_param_id - skipped_constants);
|
||||
|
||||
/* In case previous constants were merged away, stop doing that */
|
||||
in_squashed = false;
|
||||
}
|
||||
else if (!in_squashed)
|
||||
{
|
||||
/*
|
||||
* This location is the start position of a run of constants to be
|
||||
* squashed, so we need to print the representation of starting a
|
||||
* group of stashed constants.
|
||||
*
|
||||
* Print what comes before the constant ...
|
||||
*/
|
||||
len_to_wrt = off - last_off;
|
||||
len_to_wrt -= last_tok_len;
|
||||
Assert(len_to_wrt >= 0);
|
||||
Assert(i + 1 < jstate->clocations_count);
|
||||
Assert(jstate->clocations[i + 1].squashed);
|
||||
memcpy(norm_query + n_quer_loc, query + quer_loc, len_to_wrt);
|
||||
n_quer_loc += len_to_wrt;
|
||||
|
||||
/* ... and then start a run of squashed constants */
|
||||
n_quer_loc += sprintf(norm_query + n_quer_loc, "$%d /*, ... */",
|
||||
i + 1 + jstate->highest_extern_param_id - skipped_constants);
|
||||
|
||||
/* The next location will match the block below, to end the run */
|
||||
in_squashed = true;
|
||||
|
||||
skipped_constants++;
|
||||
}
|
||||
else
|
||||
{
|
||||
/*
|
||||
* The second location of a run of squashable elements; this
|
||||
* indicates its end.
|
||||
*/
|
||||
in_squashed = false;
|
||||
}
|
||||
|
||||
/* Otherwise the constant is squashed away -- move forward */
|
||||
/* move forward */
|
||||
quer_loc = off + tok_len;
|
||||
last_off = off;
|
||||
last_tok_len = tok_len;
|
||||
@ -3017,6 +2959,9 @@ fill_in_constant_lengths(JumbleState *jstate, const char *query,
|
||||
|
||||
Assert(loc >= 0);
|
||||
|
||||
if (locs[i].squashed)
|
||||
continue; /* squashable list, ignore */
|
||||
|
||||
if (loc <= last_loc)
|
||||
continue; /* Duplicate constant, ignore */
|
||||
|
||||
|
@ -19,3 +19,19 @@ SELECT $1 \bind 'unnamed_val1' \g
|
||||
\bind_named stmt1 'stmt1_val1' \g
|
||||
|
||||
SELECT calls, rows, query FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
|
||||
-- Various parameter numbering patterns
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
-- Unique query IDs with parameter numbers switched.
|
||||
SELECT WHERE ($1::int, 7) IN ((8, $2::int), ($3::int, 9)) \bind '1' '2' '3' \g
|
||||
SELECT WHERE ($2::int, 10) IN ((11, $3::int), ($1::int, 12)) \bind '1' '2' '3' \g
|
||||
SELECT WHERE $1::int IN ($2::int, $3::int) \bind '1' '2' '3' \g
|
||||
SELECT WHERE $2::int IN ($3::int, $1::int) \bind '1' '2' '3' \g
|
||||
SELECT WHERE $3::int IN ($1::int, $2::int) \bind '1' '2' '3' \g
|
||||
-- Two groups of two queries with the same query ID.
|
||||
SELECT WHERE '1'::int IN ($1::int, '2'::int) \bind '1' \g
|
||||
SELECT WHERE '4'::int IN ($1::int, '5'::int) \bind '2' \g
|
||||
SELECT WHERE $2::int IN ($1::int, '1'::int) \bind '1' '2' \g
|
||||
SELECT WHERE $2::int IN ($1::int, '2'::int) \bind '3' '4' \g
|
||||
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
|
@ -334,6 +334,32 @@ END; $$;
|
||||
SELECT toplevel, calls, query FROM pg_stat_statements
|
||||
ORDER BY query COLLATE "C", toplevel;
|
||||
|
||||
-- DO block --- multiple inner queries with separators
|
||||
SET pg_stat_statements.track = 'all';
|
||||
SET pg_stat_statements.track_utility = TRUE;
|
||||
CREATE TABLE pgss_do_util_tab_1 (a int);
|
||||
CREATE TABLE pgss_do_util_tab_2 (a int);
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
DO $$
|
||||
DECLARE BEGIN
|
||||
EXECUTE 'CREATE TABLE pgss_do_table (id INT); DROP TABLE pgss_do_table';
|
||||
EXECUTE 'SELECT a FROM pgss_do_util_tab_1; SELECT a FROM pgss_do_util_tab_2';
|
||||
END $$;
|
||||
SELECT toplevel, calls, rows, query FROM pg_stat_statements
|
||||
WHERE toplevel IS FALSE
|
||||
ORDER BY query COLLATE "C";
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
-- Note the extra semicolon at the end of the query.
|
||||
DO $$
|
||||
DECLARE BEGIN
|
||||
EXECUTE 'CREATE TABLE pgss_do_table (id INT); DROP TABLE pgss_do_table;';
|
||||
EXECUTE 'SELECT a FROM pgss_do_util_tab_1; SELECT a FROM pgss_do_util_tab_2;';
|
||||
END $$;
|
||||
SELECT toplevel, calls, rows, query FROM pg_stat_statements
|
||||
WHERE toplevel IS FALSE
|
||||
ORDER BY query COLLATE "C";
|
||||
DROP TABLE pgss_do_util_tab_1, pgss_do_util_tab_2;
|
||||
|
||||
-- PL/pgSQL function - top-level tracking.
|
||||
SET pg_stat_statements.track = 'top';
|
||||
SET pg_stat_statements.track_utility = FALSE;
|
||||
|
@ -20,11 +20,11 @@ SELECT 42;
|
||||
SELECT 42;
|
||||
SELECT 42;
|
||||
SELECT plans, calls, rows, query FROM pg_stat_statements
|
||||
WHERE query NOT LIKE 'SELECT COUNT%' ORDER BY query COLLATE "C";
|
||||
WHERE query NOT LIKE 'PREPARE%' ORDER BY query COLLATE "C";
|
||||
-- for the prepared statement we expect at least one replan, but cache
|
||||
-- invalidations could force more
|
||||
SELECT plans >= 2 AND plans <= calls AS plans_ok, calls, rows, query FROM pg_stat_statements
|
||||
WHERE query LIKE 'SELECT COUNT%' ORDER BY query COLLATE "C";
|
||||
WHERE query LIKE 'PREPARE%' ORDER BY query COLLATE "C";
|
||||
|
||||
-- Cleanup
|
||||
DROP TABLE stats_plan_test;
|
||||
|
@ -79,6 +79,22 @@ DEALLOCATE pgss_test;
|
||||
SELECT calls, rows, query FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
|
||||
-- normalization of constants and parameters, with constant locations
|
||||
-- recorded one or more times.
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
SELECT WHERE '1' IN ('1'::int, '3'::int::text);
|
||||
SELECT WHERE (1, 2) IN ((1, 2), (2, 3));
|
||||
SELECT WHERE (3, 4) IN ((5, 6), (8, 7));
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
|
||||
-- with the last element being an explicit function call with an argument, ensure
|
||||
-- the normalization of the squashing interval is correct.
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
SELECT WHERE 1 IN (1, int4(1), int4(2));
|
||||
SELECT WHERE 1 = ANY (ARRAY[1, int4(1), int4(2)]);
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
|
||||
--
|
||||
-- queries with locking clauses
|
||||
--
|
||||
|
@ -3,101 +3,160 @@
|
||||
--
|
||||
CREATE EXTENSION pg_stat_statements;
|
||||
|
||||
--
|
||||
-- Simple Lists
|
||||
--
|
||||
|
||||
CREATE TABLE test_squash (id int, data int);
|
||||
|
||||
-- IN queries
|
||||
|
||||
-- Normal scenario, too many simple constants for an IN query
|
||||
-- single element will not be squashed
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
SELECT * FROM test_squash WHERE id IN (1);
|
||||
SELECT * FROM test_squash WHERE id IN (1, 2, 3);
|
||||
SELECT ARRAY[1];
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
|
||||
SELECT * FROM test_squash WHERE id IN (1, 2, 3, 4, 5, 6, 7, 8, 9);
|
||||
SELECT * FROM test_squash WHERE id IN (1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
|
||||
SELECT * FROM test_squash WHERE id IN (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11);
|
||||
-- more than 1 element in a list will be squashed
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
SELECT * FROM test_squash WHERE id IN (1, 2, 3);
|
||||
SELECT * FROM test_squash WHERE id IN (1, 2, 3, 4);
|
||||
SELECT * FROM test_squash WHERE id IN (1, 2, 3, 4, 5);
|
||||
SELECT ARRAY[1, 2, 3];
|
||||
SELECT ARRAY[1, 2, 3, 4];
|
||||
SELECT ARRAY[1, 2, 3, 4, 5];
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
|
||||
-- built-in functions will be squashed
|
||||
-- the IN and ARRAY forms of this statement will have the same queryId
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
SELECT WHERE 1 IN (1, int4(1), int4(2), 2);
|
||||
SELECT WHERE 1 = ANY (ARRAY[1, int4(1), int4(2), 2]);
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
|
||||
-- external parameters will not be squashed
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
SELECT * FROM test_squash WHERE id IN ($1, $2, $3, $4, $5) \bind 1 2 3 4 5
|
||||
;
|
||||
SELECT * FROM test_squash WHERE id::text = ANY(ARRAY[$1, $2, $3, $4, $5]) \bind 1 2 3 4 5
|
||||
;
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
|
||||
-- neither are prepared statements
|
||||
-- the IN and ARRAY forms of this statement will have the same queryId
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
PREPARE p1(int, int, int, int, int) AS
|
||||
SELECT * FROM test_squash WHERE id IN ($1, $2, $3, $4, $5);
|
||||
EXECUTE p1(1, 2, 3, 4, 5);
|
||||
DEALLOCATE p1;
|
||||
PREPARE p1(int, int, int, int, int) AS
|
||||
SELECT * FROM test_squash WHERE id = ANY(ARRAY[$1, $2, $3, $4, $5]);
|
||||
EXECUTE p1(1, 2, 3, 4, 5);
|
||||
DEALLOCATE p1;
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
|
||||
-- More conditions in the query
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
|
||||
SELECT * FROM test_squash WHERE id IN (1, 2, 3, 4, 5, 6, 7, 8, 9) AND data = 2;
|
||||
SELECT * FROM test_squash WHERE id IN (1, 2, 3, 4, 5, 6, 7, 8, 9, 10) AND data = 2;
|
||||
SELECT * FROM test_squash WHERE id IN (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11) AND data = 2;
|
||||
SELECT * FROM test_squash WHERE id = ANY (ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9]) AND data = 2;
|
||||
SELECT * FROM test_squash WHERE id = ANY (ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) AND data = 2;
|
||||
SELECT * FROM test_squash WHERE id = ANY (ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) AND data = 2;
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
|
||||
-- Multiple squashed intervals
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
|
||||
SELECT * FROM test_squash WHERE id IN (1, 2, 3, 4, 5, 6, 7, 8, 9)
|
||||
AND data IN (1, 2, 3, 4, 5, 6, 7, 8, 9);
|
||||
SELECT * FROM test_squash WHERE id IN (1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
|
||||
AND data IN (1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
|
||||
SELECT * FROM test_squash WHERE id IN (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11)
|
||||
AND data IN (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11);
|
||||
SELECT * FROM test_squash WHERE id = ANY (ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9])
|
||||
AND data = ANY (ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9]);
|
||||
SELECT * FROM test_squash WHERE id = ANY (ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
|
||||
AND data = ANY (ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
|
||||
SELECT * FROM test_squash WHERE id = ANY (ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
|
||||
AND data = ANY (ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]);
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
|
||||
-- No constants simplification for OpExpr
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
|
||||
-- In the following two queries the operator expressions (+) and (@) have
|
||||
-- different oppno, and will be given different query_id if squashed, even though
|
||||
-- the normalized query will be the same
|
||||
-- No constants squashing for OpExpr
|
||||
-- The IN and ARRAY forms of this statement will have the same queryId
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
SELECT * FROM test_squash WHERE id IN
|
||||
(1 + 1, 2 + 2, 3 + 3, 4 + 4, 5 + 5, 6 + 6, 7 + 7, 8 + 8, 9 + 9);
|
||||
SELECT * FROM test_squash WHERE id IN
|
||||
(@ '-1', @ '-2', @ '-3', @ '-4', @ '-5', @ '-6', @ '-7', @ '-8', @ '-9');
|
||||
SELECT * FROM test_squash WHERE id = ANY(ARRAY
|
||||
[1 + 1, 2 + 2, 3 + 3, 4 + 4, 5 + 5, 6 + 6, 7 + 7, 8 + 8, 9 + 9]);
|
||||
SELECT * FROM test_squash WHERE id = ANY(ARRAY
|
||||
[@ '-1', @ '-2', @ '-3', @ '-4', @ '-5', @ '-6', @ '-7', @ '-8', @ '-9']);
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
|
||||
--
|
||||
-- FuncExpr
|
||||
--
|
||||
|
||||
-- Verify multiple type representation end up with the same query_id
|
||||
CREATE TABLE test_float (data float);
|
||||
-- The casted ARRAY expressions will have the same queryId as the IN clause
|
||||
-- form of the query
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
SELECT data FROM test_float WHERE data IN (1, 2);
|
||||
SELECT data FROM test_float WHERE data IN (1, '2');
|
||||
SELECT data FROM test_float WHERE data IN ('1', 2);
|
||||
SELECT data FROM test_float WHERE data IN ('1', '2');
|
||||
SELECT data FROM test_float WHERE data IN (1.0, 1.0);
|
||||
SELECT data FROM test_float WHERE data = ANY(ARRAY['1'::double precision, '2'::double precision]);
|
||||
SELECT data FROM test_float WHERE data = ANY(ARRAY[1.0::double precision, 1.0::double precision]);
|
||||
SELECT data FROM test_float WHERE data = ANY(ARRAY[1, 2]);
|
||||
SELECT data FROM test_float WHERE data = ANY(ARRAY[1, '2']);
|
||||
SELECT data FROM test_float WHERE data = ANY(ARRAY['1', 2]);
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
|
||||
-- Numeric type, implicit cast is squashed
|
||||
CREATE TABLE test_squash_numeric (id int, data numeric(5, 2));
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
SELECT * FROM test_squash_numeric WHERE data IN (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11);
|
||||
SELECT * FROM test_squash_numeric WHERE data = ANY(ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]);
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
|
||||
-- Bigint, implicit cast is squashed
|
||||
CREATE TABLE test_squash_bigint (id int, data bigint);
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
SELECT * FROM test_squash_bigint WHERE data IN (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11);
|
||||
SELECT * FROM test_squash_bigint WHERE data = ANY(ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]);
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
|
||||
-- Bigint, explicit cast is not squashed
|
||||
-- Bigint, explicit cast is squashed
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
SELECT * FROM test_squash_bigint WHERE data IN
|
||||
(1::bigint, 2::bigint, 3::bigint, 4::bigint, 5::bigint, 6::bigint,
|
||||
7::bigint, 8::bigint, 9::bigint, 10::bigint, 11::bigint);
|
||||
SELECT * FROM test_squash_bigint WHERE data = ANY(ARRAY[
|
||||
1::bigint, 2::bigint, 3::bigint, 4::bigint, 5::bigint, 6::bigint,
|
||||
7::bigint, 8::bigint, 9::bigint, 10::bigint, 11::bigint]);
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
|
||||
-- Bigint, long tokens with parenthesis
|
||||
-- Bigint, long tokens with parenthesis, will not squash
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
SELECT * FROM test_squash_bigint WHERE id IN
|
||||
(abs(100), abs(200), abs(300), abs(400), abs(500), abs(600), abs(700),
|
||||
abs(800), abs(900), abs(1000), ((abs(1100))));
|
||||
SELECT * FROM test_squash_bigint WHERE id = ANY(ARRAY[
|
||||
abs(100), abs(200), abs(300), abs(400), abs(500), abs(600), abs(700),
|
||||
abs(800), abs(900), abs(1000), ((abs(1100)))]);
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
|
||||
-- CoerceViaIO, SubLink instead of a Const
|
||||
CREATE TABLE test_squash_jsonb (id int, data jsonb);
|
||||
-- Multiple FuncExpr's. Will not squash
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
SELECT * FROM test_squash_jsonb WHERE data IN
|
||||
((SELECT '"1"')::jsonb, (SELECT '"2"')::jsonb, (SELECT '"3"')::jsonb,
|
||||
(SELECT '"4"')::jsonb, (SELECT '"5"')::jsonb, (SELECT '"6"')::jsonb,
|
||||
(SELECT '"7"')::jsonb, (SELECT '"8"')::jsonb, (SELECT '"9"')::jsonb,
|
||||
(SELECT '"10"')::jsonb);
|
||||
SELECT WHERE 1 IN (1::int::bigint::int, 2::int::bigint::int);
|
||||
SELECT WHERE 1 = ANY(ARRAY[1::int::bigint::int, 2::int::bigint::int]);
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
|
||||
--
|
||||
-- CoerceViaIO
|
||||
--
|
||||
|
||||
-- Create some dummy type to force CoerceViaIO
|
||||
CREATE TYPE casttesttype;
|
||||
@ -141,19 +200,73 @@ SELECT * FROM test_squash_cast WHERE data IN
|
||||
4::int4::casttesttype, 5::int4::casttesttype, 6::int4::casttesttype,
|
||||
7::int4::casttesttype, 8::int4::casttesttype, 9::int4::casttesttype,
|
||||
10::int4::casttesttype, 11::int4::casttesttype);
|
||||
SELECT * FROM test_squash_cast WHERE data = ANY (ARRAY
|
||||
[1::int4::casttesttype, 2::int4::casttesttype, 3::int4::casttesttype,
|
||||
4::int4::casttesttype, 5::int4::casttesttype, 6::int4::casttesttype,
|
||||
7::int4::casttesttype, 8::int4::casttesttype, 9::int4::casttesttype,
|
||||
10::int4::casttesttype, 11::int4::casttesttype]);
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
|
||||
-- Some casting expression are simplified to Const
|
||||
CREATE TABLE test_squash_jsonb (id int, data jsonb);
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
SELECT * FROM test_squash_jsonb WHERE data IN
|
||||
(('"1"')::jsonb, ('"2"')::jsonb, ('"3"')::jsonb, ('"4"')::jsonb,
|
||||
( '"5"')::jsonb, ( '"6"')::jsonb, ( '"7"')::jsonb, ( '"8"')::jsonb,
|
||||
( '"9"')::jsonb, ( '"10"')::jsonb);
|
||||
('"5"')::jsonb, ('"6"')::jsonb, ('"7"')::jsonb, ('"8"')::jsonb,
|
||||
('"9"')::jsonb, ('"10"')::jsonb);
|
||||
SELECT * FROM test_squash_jsonb WHERE data = ANY (ARRAY
|
||||
[('"1"')::jsonb, ('"2"')::jsonb, ('"3"')::jsonb, ('"4"')::jsonb,
|
||||
('"5"')::jsonb, ('"6"')::jsonb, ('"7"')::jsonb, ('"8"')::jsonb,
|
||||
('"9"')::jsonb, ('"10"')::jsonb]);
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
|
||||
-- RelabelType
|
||||
-- CoerceViaIO, SubLink instead of a Const. Will not squash
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
SELECT * FROM test_squash WHERE id IN (1::oid, 2::oid, 3::oid, 4::oid, 5::oid, 6::oid, 7::oid, 8::oid, 9::oid);
|
||||
SELECT * FROM test_squash_jsonb WHERE data IN
|
||||
((SELECT '"1"')::jsonb, (SELECT '"2"')::jsonb, (SELECT '"3"')::jsonb,
|
||||
(SELECT '"4"')::jsonb, (SELECT '"5"')::jsonb, (SELECT '"6"')::jsonb,
|
||||
(SELECT '"7"')::jsonb, (SELECT '"8"')::jsonb, (SELECT '"9"')::jsonb,
|
||||
(SELECT '"10"')::jsonb);
|
||||
SELECT * FROM test_squash_jsonb WHERE data = ANY(ARRAY
|
||||
[(SELECT '"1"')::jsonb, (SELECT '"2"')::jsonb, (SELECT '"3"')::jsonb,
|
||||
(SELECT '"4"')::jsonb, (SELECT '"5"')::jsonb, (SELECT '"6"')::jsonb,
|
||||
(SELECT '"7"')::jsonb, (SELECT '"8"')::jsonb, (SELECT '"9"')::jsonb,
|
||||
(SELECT '"10"')::jsonb]);
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
|
||||
-- Multiple CoerceViaIO wrapping a constant. Will not squash
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
SELECT WHERE 1 IN (1::text::int::text::int, 1::text::int::text::int);
|
||||
SELECT WHERE 1 = ANY(ARRAY[1::text::int::text::int, 1::text::int::text::int]);
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
|
||||
--
|
||||
-- RelabelType
|
||||
--
|
||||
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
-- if there is only one level of RelabelType, the list will be squashable
|
||||
SELECT * FROM test_squash WHERE id IN
|
||||
(1::oid, 2::oid, 3::oid, 4::oid, 5::oid, 6::oid, 7::oid, 8::oid, 9::oid);
|
||||
SELECT ARRAY[1::oid, 2::oid, 3::oid, 4::oid, 5::oid, 6::oid, 7::oid, 8::oid, 9::oid];
|
||||
-- if there is at least one element with multiple levels of RelabelType,
|
||||
-- the list will not be squashable
|
||||
SELECT * FROM test_squash WHERE id IN (1::oid, 2::oid::int::oid);
|
||||
SELECT * FROM test_squash WHERE id = ANY(ARRAY[1::oid, 2::oid::int::oid]);
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
|
||||
--
|
||||
-- edge cases
|
||||
--
|
||||
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
-- for nested arrays, only constants are squashed
|
||||
SELECT ARRAY[
|
||||
ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
|
||||
ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
|
||||
ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
|
||||
ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
|
||||
];
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
|
||||
-- Test constants evaluation in a CTE, which was causing issues in the past
|
||||
@ -163,8 +276,26 @@ WITH cte AS (
|
||||
SELECT ARRAY['a', 'b', 'c', const::varchar] AS result
|
||||
FROM cte;
|
||||
|
||||
-- Simple array would be squashed as well
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
SELECT ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
|
||||
-- Rewritten as an OpExpr, so it will not be squashed
|
||||
select where '1' IN ('1'::int, '2'::int::text);
|
||||
-- Rewritten as an ArrayExpr, so it will be squashed
|
||||
select where '1' IN ('1'::int, '2'::int);
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
|
||||
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
|
||||
-- Both of these queries will be rewritten as an ArrayExpr, so they
|
||||
-- will be squashed, and have a similar queryId
|
||||
select where '1' IN ('1'::int::text, '2'::int::text);
|
||||
select where '1' = ANY (array['1'::int::text, '2'::int::text]);
|
||||
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
|
||||
|
||||
--
|
||||
-- cleanup
|
||||
--
|
||||
DROP TABLE test_squash;
|
||||
DROP TABLE test_float;
|
||||
DROP TABLE test_squash_numeric;
|
||||
DROP TABLE test_squash_bigint;
|
||||
DROP TABLE test_squash_cast CASCADE;
|
||||
DROP TABLE test_squash_jsonb;
|
||||
|
@ -577,7 +577,7 @@ connect_pg_server(ForeignServer *server, UserMapping *user)
|
||||
len = pg_b64_enc_len(sizeof(MyProcPort->scram_ClientKey));
|
||||
/* don't forget the zero-terminator */
|
||||
values[n] = palloc0(len + 1);
|
||||
encoded_len = pg_b64_encode((const char *) MyProcPort->scram_ClientKey,
|
||||
encoded_len = pg_b64_encode(MyProcPort->scram_ClientKey,
|
||||
sizeof(MyProcPort->scram_ClientKey),
|
||||
(char *) values[n], len);
|
||||
if (encoded_len < 0)
|
||||
@ -588,7 +588,7 @@ connect_pg_server(ForeignServer *server, UserMapping *user)
|
||||
len = pg_b64_enc_len(sizeof(MyProcPort->scram_ServerKey));
|
||||
/* don't forget the zero-terminator */
|
||||
values[n] = palloc0(len + 1);
|
||||
encoded_len = pg_b64_encode((const char *) MyProcPort->scram_ServerKey,
|
||||
encoded_len = pg_b64_encode(MyProcPort->scram_ServerKey,
|
||||
sizeof(MyProcPort->scram_ServerKey),
|
||||
(char *) values[n], len);
|
||||
if (encoded_len < 0)
|
||||
|
@ -2565,6 +2565,70 @@ SELECT * FROM ft1, ft2, ft4, ft5, local_tbl WHERE ft1.c1 = ft2.c1 AND ft1.c2 = f
|
||||
96 | 6 | 00096 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo | 96 | 6 | 00096 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo | 6 | 7 | AAA006 | 6 | 7 | AAA006 | 6 | 6 | 0006
|
||||
(10 rows)
|
||||
|
||||
EXPLAIN (VERBOSE, COSTS OFF)
|
||||
SELECT * FROM ft1, ft4, ft5, local_tbl WHERE ft1.c1 = ft4.c1 AND ft1.c1 = ft5.c1
|
||||
AND ft1.c2 = local_tbl.c1 AND ft1.c1 < 100 AND ft5.c1 < 100 FOR UPDATE;
|
||||
QUERY PLAN
|
||||
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
LockRows
|
||||
Output: ft1.c1, ft1.c2, ft1.c3, ft1.c4, ft1.c5, ft1.c6, ft1.c7, ft1.c8, ft4.c1, ft4.c2, ft4.c3, ft5.c1, ft5.c2, ft5.c3, local_tbl.c1, local_tbl.c2, local_tbl.c3, ft1.*, ft4.*, ft5.*, local_tbl.ctid
|
||||
-> Merge Join
|
||||
Output: ft1.c1, ft1.c2, ft1.c3, ft1.c4, ft1.c5, ft1.c6, ft1.c7, ft1.c8, ft4.c1, ft4.c2, ft4.c3, ft5.c1, ft5.c2, ft5.c3, local_tbl.c1, local_tbl.c2, local_tbl.c3, ft1.*, ft4.*, ft5.*, local_tbl.ctid
|
||||
Merge Cond: (local_tbl.c1 = ft1.c2)
|
||||
-> Index Scan using local_tbl_pkey on public.local_tbl
|
||||
Output: local_tbl.c1, local_tbl.c2, local_tbl.c3, local_tbl.ctid
|
||||
-> Sort
|
||||
Output: ft1.c1, ft1.c2, ft1.c3, ft1.c4, ft1.c5, ft1.c6, ft1.c7, ft1.c8, ft1.*, ft4.c1, ft4.c2, ft4.c3, ft4.*, ft5.c1, ft5.c2, ft5.c3, ft5.*
|
||||
Sort Key: ft1.c2
|
||||
-> Foreign Scan
|
||||
Output: ft1.c1, ft1.c2, ft1.c3, ft1.c4, ft1.c5, ft1.c6, ft1.c7, ft1.c8, ft1.*, ft4.c1, ft4.c2, ft4.c3, ft4.*, ft5.c1, ft5.c2, ft5.c3, ft5.*
|
||||
Relations: ((public.ft1) INNER JOIN (public.ft4)) INNER JOIN (public.ft5)
|
||||
Remote SQL: SELECT r1."C 1", r1.c2, r1.c3, r1.c4, r1.c5, r1.c6, r1.c7, r1.c8, CASE WHEN (r1.*)::text IS NOT NULL THEN ROW(r1."C 1", r1.c2, r1.c3, r1.c4, r1.c5, r1.c6, r1.c7, r1.c8) END, r2.c1, r2.c2, r2.c3, CASE WHEN (r2.*)::text IS NOT NULL THEN ROW(r2.c1, r2.c2, r2.c3) END, r3.c1, r3.c2, r3.c3, CASE WHEN (r3.*)::text IS NOT NULL THEN ROW(r3.c1, r3.c2, r3.c3) END FROM (("S 1"."T 1" r1 INNER JOIN "S 1"."T 3" r2 ON (((r1."C 1" = r2.c1)) AND ((r1."C 1" < 100)))) INNER JOIN "S 1"."T 4" r3 ON (((r1."C 1" = r3.c1)) AND ((r3.c1 < 100)))) FOR UPDATE OF r1 FOR UPDATE OF r2 FOR UPDATE OF r3
|
||||
-> Merge Join
|
||||
Output: ft1.c1, ft1.c2, ft1.c3, ft1.c4, ft1.c5, ft1.c6, ft1.c7, ft1.c8, ft1.*, ft4.c1, ft4.c2, ft4.c3, ft4.*, ft5.c1, ft5.c2, ft5.c3, ft5.*
|
||||
Merge Cond: (ft1.c1 = ft5.c1)
|
||||
-> Merge Join
|
||||
Output: ft1.c1, ft1.c2, ft1.c3, ft1.c4, ft1.c5, ft1.c6, ft1.c7, ft1.c8, ft1.*, ft4.c1, ft4.c2, ft4.c3, ft4.*
|
||||
Merge Cond: (ft1.c1 = ft4.c1)
|
||||
-> Sort
|
||||
Output: ft1.c1, ft1.c2, ft1.c3, ft1.c4, ft1.c5, ft1.c6, ft1.c7, ft1.c8, ft1.*
|
||||
Sort Key: ft1.c1
|
||||
-> Foreign Scan on public.ft1
|
||||
Output: ft1.c1, ft1.c2, ft1.c3, ft1.c4, ft1.c5, ft1.c6, ft1.c7, ft1.c8, ft1.*
|
||||
Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" < 100)) FOR UPDATE
|
||||
-> Sort
|
||||
Output: ft4.c1, ft4.c2, ft4.c3, ft4.*
|
||||
Sort Key: ft4.c1
|
||||
-> Foreign Scan on public.ft4
|
||||
Output: ft4.c1, ft4.c2, ft4.c3, ft4.*
|
||||
Remote SQL: SELECT c1, c2, c3 FROM "S 1"."T 3" FOR UPDATE
|
||||
-> Sort
|
||||
Output: ft5.c1, ft5.c2, ft5.c3, ft5.*
|
||||
Sort Key: ft5.c1
|
||||
-> Foreign Scan on public.ft5
|
||||
Output: ft5.c1, ft5.c2, ft5.c3, ft5.*
|
||||
Remote SQL: SELECT c1, c2, c3 FROM "S 1"."T 4" WHERE ((c1 < 100)) FOR UPDATE
|
||||
(38 rows)
|
||||
|
||||
SELECT * FROM ft1, ft4, ft5, local_tbl WHERE ft1.c1 = ft4.c1 AND ft1.c1 = ft5.c1
|
||||
AND ft1.c2 = local_tbl.c1 AND ft1.c1 < 100 AND ft5.c1 < 100 FOR UPDATE;
|
||||
c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8 | c1 | c2 | c3 | c1 | c2 | c3 | c1 | c2 | c3
|
||||
----+----+-------+------------------------------+--------------------------+----+------------+-----+----+----+--------+----+----+--------+----+----+------
|
||||
12 | 2 | 00012 | Tue Jan 13 00:00:00 1970 PST | Tue Jan 13 00:00:00 1970 | 2 | 2 | foo | 12 | 13 | AAA012 | 12 | 13 | AAA012 | 2 | 2 | 0002
|
||||
42 | 2 | 00042 | Thu Feb 12 00:00:00 1970 PST | Thu Feb 12 00:00:00 1970 | 2 | 2 | foo | 42 | 43 | AAA042 | 42 | 43 | AAA042 | 2 | 2 | 0002
|
||||
72 | 2 | 00072 | Sat Mar 14 00:00:00 1970 PST | Sat Mar 14 00:00:00 1970 | 2 | 2 | foo | 72 | 73 | AAA072 | 72 | 73 | | 2 | 2 | 0002
|
||||
24 | 4 | 00024 | Sun Jan 25 00:00:00 1970 PST | Sun Jan 25 00:00:00 1970 | 4 | 4 | foo | 24 | 25 | AAA024 | 24 | 25 | AAA024 | 4 | 4 | 0004
|
||||
54 | 4 | 00054 | Tue Feb 24 00:00:00 1970 PST | Tue Feb 24 00:00:00 1970 | 4 | 4 | foo | 54 | 55 | AAA054 | 54 | 55 | | 4 | 4 | 0004
|
||||
84 | 4 | 00084 | Thu Mar 26 00:00:00 1970 PST | Thu Mar 26 00:00:00 1970 | 4 | 4 | foo | 84 | 85 | AAA084 | 84 | 85 | AAA084 | 4 | 4 | 0004
|
||||
96 | 6 | 00096 | Tue Apr 07 00:00:00 1970 PST | Tue Apr 07 00:00:00 1970 | 6 | 6 | foo | 96 | 97 | AAA096 | 96 | 97 | AAA096 | 6 | 6 | 0006
|
||||
36 | 6 | 00036 | Fri Feb 06 00:00:00 1970 PST | Fri Feb 06 00:00:00 1970 | 6 | 6 | foo | 36 | 37 | AAA036 | 36 | 37 | | 6 | 6 | 0006
|
||||
66 | 6 | 00066 | Sun Mar 08 00:00:00 1970 PST | Sun Mar 08 00:00:00 1970 | 6 | 6 | foo | 66 | 67 | AAA066 | 66 | 67 | AAA066 | 6 | 6 | 0006
|
||||
6 | 6 | 00006 | Wed Jan 07 00:00:00 1970 PST | Wed Jan 07 00:00:00 1970 | 6 | 6 | foo | 6 | 7 | AAA006 | 6 | 7 | AAA006 | 6 | 6 | 0006
|
||||
48 | 8 | 00048 | Wed Feb 18 00:00:00 1970 PST | Wed Feb 18 00:00:00 1970 | 8 | 8 | foo | 48 | 49 | AAA048 | 48 | 49 | AAA048 | 8 | 8 | 0008
|
||||
18 | 8 | 00018 | Mon Jan 19 00:00:00 1970 PST | Mon Jan 19 00:00:00 1970 | 8 | 8 | foo | 18 | 19 | AAA018 | 18 | 19 | | 8 | 8 | 0008
|
||||
78 | 8 | 00078 | Fri Mar 20 00:00:00 1970 PST | Fri Mar 20 00:00:00 1970 | 8 | 8 | foo | 78 | 79 | AAA078 | 78 | 79 | AAA078 | 8 | 8 | 0008
|
||||
(13 rows)
|
||||
|
||||
RESET enable_nestloop;
|
||||
RESET enable_hashjoin;
|
||||
-- test that add_paths_with_pathkeys_for_rel() arranges for the epq_path to
|
||||
|
@ -240,6 +240,7 @@ typedef struct PgFdwDirectModifyState
|
||||
PGresult *result; /* result for query */
|
||||
int num_tuples; /* # of result tuples */
|
||||
int next_tuple; /* index of next one to return */
|
||||
MemoryContextCallback result_cb; /* ensures result will get freed */
|
||||
Relation resultRel; /* relcache entry for the target relation */
|
||||
AttrNumber *attnoMap; /* array of attnums of input user columns */
|
||||
AttrNumber ctidAttno; /* attnum of input ctid column */
|
||||
@ -2670,6 +2671,17 @@ postgresBeginDirectModify(ForeignScanState *node, int eflags)
|
||||
dmstate = (PgFdwDirectModifyState *) palloc0(sizeof(PgFdwDirectModifyState));
|
||||
node->fdw_state = dmstate;
|
||||
|
||||
/*
|
||||
* We use a memory context callback to ensure that the dmstate's PGresult
|
||||
* (if any) will be released, even if the query fails somewhere that's
|
||||
* outside our control. The callback is always armed for the duration of
|
||||
* the query; this relies on PQclear(NULL) being a no-op.
|
||||
*/
|
||||
dmstate->result_cb.func = (MemoryContextCallbackFunction) PQclear;
|
||||
dmstate->result_cb.arg = NULL;
|
||||
MemoryContextRegisterResetCallback(CurrentMemoryContext,
|
||||
&dmstate->result_cb);
|
||||
|
||||
/*
|
||||
* Identify which user to do the remote access as. This should match what
|
||||
* ExecCheckPermissions() does.
|
||||
@ -2817,7 +2829,13 @@ postgresEndDirectModify(ForeignScanState *node)
|
||||
return;
|
||||
|
||||
/* Release PGresult */
|
||||
PQclear(dmstate->result);
|
||||
if (dmstate->result)
|
||||
{
|
||||
PQclear(dmstate->result);
|
||||
dmstate->result = NULL;
|
||||
/* ... and don't forget to disable the callback */
|
||||
dmstate->result_cb.arg = NULL;
|
||||
}
|
||||
|
||||
/* Release remote connection */
|
||||
ReleaseConnection(dmstate->conn);
|
||||
@ -4591,13 +4609,17 @@ execute_dml_stmt(ForeignScanState *node)
|
||||
/*
|
||||
* Get the result, and check for success.
|
||||
*
|
||||
* We don't use a PG_TRY block here, so be careful not to throw error
|
||||
* without releasing the PGresult.
|
||||
* We use a memory context callback to ensure that the PGresult will be
|
||||
* released, even if the query fails somewhere that's outside our control.
|
||||
* The callback is already registered, just need to fill in its arg.
|
||||
*/
|
||||
Assert(dmstate->result == NULL);
|
||||
dmstate->result = pgfdw_get_result(dmstate->conn);
|
||||
dmstate->result_cb.arg = dmstate->result;
|
||||
|
||||
if (PQresultStatus(dmstate->result) !=
|
||||
(dmstate->has_returning ? PGRES_TUPLES_OK : PGRES_COMMAND_OK))
|
||||
pgfdw_report_error(ERROR, dmstate->result, dmstate->conn, true,
|
||||
pgfdw_report_error(ERROR, dmstate->result, dmstate->conn, false,
|
||||
dmstate->query);
|
||||
|
||||
/* Get the number of rows affected. */
|
||||
@ -4641,30 +4663,16 @@ get_returning_data(ForeignScanState *node)
|
||||
}
|
||||
else
|
||||
{
|
||||
/*
|
||||
* On error, be sure to release the PGresult on the way out. Callers
|
||||
* do not have PG_TRY blocks to ensure this happens.
|
||||
*/
|
||||
PG_TRY();
|
||||
{
|
||||
HeapTuple newtup;
|
||||
|
||||
newtup = make_tuple_from_result_row(dmstate->result,
|
||||
dmstate->next_tuple,
|
||||
dmstate->rel,
|
||||
dmstate->attinmeta,
|
||||
dmstate->retrieved_attrs,
|
||||
node,
|
||||
dmstate->temp_cxt);
|
||||
ExecStoreHeapTuple(newtup, slot, false);
|
||||
}
|
||||
PG_CATCH();
|
||||
{
|
||||
PQclear(dmstate->result);
|
||||
PG_RE_THROW();
|
||||
}
|
||||
PG_END_TRY();
|
||||
HeapTuple newtup;
|
||||
|
||||
newtup = make_tuple_from_result_row(dmstate->result,
|
||||
dmstate->next_tuple,
|
||||
dmstate->rel,
|
||||
dmstate->attinmeta,
|
||||
dmstate->retrieved_attrs,
|
||||
node,
|
||||
dmstate->temp_cxt);
|
||||
ExecStoreHeapTuple(newtup, slot, false);
|
||||
/* Get the updated/deleted tuple. */
|
||||
if (dmstate->rel)
|
||||
resultSlot = slot;
|
||||
|
@ -715,6 +715,11 @@ SELECT * FROM ft1, ft2, ft4, ft5, local_tbl WHERE ft1.c1 = ft2.c1 AND ft1.c2 = f
|
||||
AND ft1.c2 = ft5.c1 AND ft1.c2 = local_tbl.c1 AND ft1.c1 < 100 AND ft2.c1 < 100 FOR UPDATE;
|
||||
SELECT * FROM ft1, ft2, ft4, ft5, local_tbl WHERE ft1.c1 = ft2.c1 AND ft1.c2 = ft4.c1
|
||||
AND ft1.c2 = ft5.c1 AND ft1.c2 = local_tbl.c1 AND ft1.c1 < 100 AND ft2.c1 < 100 FOR UPDATE;
|
||||
EXPLAIN (VERBOSE, COSTS OFF)
|
||||
SELECT * FROM ft1, ft4, ft5, local_tbl WHERE ft1.c1 = ft4.c1 AND ft1.c1 = ft5.c1
|
||||
AND ft1.c2 = local_tbl.c1 AND ft1.c1 < 100 AND ft5.c1 < 100 FOR UPDATE;
|
||||
SELECT * FROM ft1, ft4, ft5, local_tbl WHERE ft1.c1 = ft4.c1 AND ft1.c1 = ft5.c1
|
||||
AND ft1.c2 = local_tbl.c1 AND ft1.c1 < 100 AND ft5.c1 < 100 FOR UPDATE;
|
||||
RESET enable_nestloop;
|
||||
RESET enable_hashjoin;
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
|
||||
# Copyright (c) 2024, PostgreSQL Global Development Group
|
||||
# Copyright (c) 2024-2025, PostgreSQL Global Development Group
|
||||
|
||||
use strict;
|
||||
use warnings FATAL => 'all';
|
||||
|
@ -1582,7 +1582,7 @@
|
||||
<structfield>rolpassword</structfield> <type>text</type>
|
||||
</para>
|
||||
<para>
|
||||
Password (possibly encrypted); null if none. The format depends
|
||||
Encrypted password; null if none. The format depends
|
||||
on the form of encryption used.
|
||||
</para></entry>
|
||||
</row>
|
||||
@ -1627,11 +1627,6 @@ SCRAM-SHA-256$<replaceable><iteration count></replaceable>:<replaceable>&l
|
||||
<replaceable>ServerKey</replaceable> are in Base64 encoded format. This format is
|
||||
the same as that specified by <ulink url="https://datatracker.ietf.org/doc/html/rfc5803">RFC 5803</ulink>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
A password that does not follow either of those formats is assumed to be
|
||||
unencrypted.
|
||||
</para>
|
||||
</sect1>
|
||||
|
||||
|
||||
@ -2629,7 +2624,6 @@ SCRAM-SHA-256$<replaceable><iteration count></replaceable>:<replaceable>&l
|
||||
</para>
|
||||
<para>
|
||||
Has the constraint been validated?
|
||||
Currently, can be false only for foreign keys and CHECK constraints
|
||||
</para></entry>
|
||||
</row>
|
||||
|
||||
|
@ -140,7 +140,7 @@
|
||||
An example of what this file might look like is:
|
||||
<programlisting>
|
||||
# This is a comment
|
||||
log_connections = yes
|
||||
log_connections = all
|
||||
log_destination = 'syslog'
|
||||
search_path = '"$user", public'
|
||||
shared_buffers = 128MB
|
||||
@ -337,7 +337,7 @@ UPDATE pg_settings SET setting = reset_val WHERE name = 'configuration_parameter
|
||||
<option>-c name=value</option> command-line parameter, or its equivalent
|
||||
<option>--name=value</option> variation. For example,
|
||||
<programlisting>
|
||||
postgres -c log_connections=yes --log-destination='syslog'
|
||||
postgres -c log_connections=all --log-destination='syslog'
|
||||
</programlisting>
|
||||
Settings provided in this way override those set via
|
||||
<filename>postgresql.conf</filename> or <command>ALTER SYSTEM</command>,
|
||||
@ -1155,6 +1155,22 @@ include_dir 'conf.d'
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="guc-md5-password-warnings" xreflabel="md5_password_warnings">
|
||||
<term><varname>md5_password_warnings</varname> (<type>boolean</type>)
|
||||
<indexterm>
|
||||
<primary><varname>md5_password_warnings</varname> configuration parameter</primary>
|
||||
</indexterm>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
Controls whether a <literal>WARNING</literal> about MD5 password
|
||||
deprecation is produced when a <command>CREATE ROLE</command> or
|
||||
<command>ALTER ROLE</command> statement sets an MD5-encrypted password.
|
||||
The default value is <literal>on</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="guc-krb-server-keyfile" xreflabel="krb_server_keyfile">
|
||||
<term><varname>krb_server_keyfile</varname> (<type>string</type>)
|
||||
<indexterm>
|
||||
@ -7511,12 +7527,12 @@ local0.* /var/log/postgresql
|
||||
<entry><literal>setup_durations</literal></entry>
|
||||
<entry>
|
||||
Logs the time spent establishing the connection and setting up the
|
||||
backend at the time the connection is ready to execute its first
|
||||
query. The log message includes the total setup duration, starting
|
||||
from the postmaster accepting the incoming connection and ending
|
||||
when the connection is ready for query. It also includes the time
|
||||
it took to fork the new backend and the time it took to
|
||||
authenticate the user.
|
||||
backend until the connection is ready to execute its first
|
||||
query. The log message includes three durations: the total
|
||||
setup duration (starting from the postmaster accepting the
|
||||
incoming connection and ending when the connection is ready
|
||||
for query), the time it took to fork the new backend, and
|
||||
the time it took to authenticate the user.
|
||||
</entry>
|
||||
</row>
|
||||
|
||||
@ -7907,10 +7923,10 @@ log_line_prefix = '%m [%p] %q%u@%d/%a '
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="guc-log-lock-failure" xreflabel="log_lock_failure">
|
||||
<term><varname>log_lock_failure</varname> (<type>boolean</type>)
|
||||
<varlistentry id="guc-log-lock-failures" xreflabel="log_lock_failures">
|
||||
<term><varname>log_lock_failures</varname> (<type>boolean</type>)
|
||||
<indexterm>
|
||||
<primary><varname>log_lock_failure</varname> configuration parameter</primary>
|
||||
<primary><varname>log_lock_failures</varname> configuration parameter</primary>
|
||||
</indexterm>
|
||||
</term>
|
||||
<listitem>
|
||||
@ -8118,22 +8134,6 @@ log_line_prefix = '%m [%p] %q%u@%d/%a '
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="guc-md5-password-warnings" xreflabel="md5_password_warnings">
|
||||
<term><varname>md5_password_warnings</varname> (<type>boolean</type>)
|
||||
<indexterm>
|
||||
<primary><varname>md5_password_warnings</varname> configuration parameter</primary>
|
||||
</indexterm>
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
Controls whether a <literal>WARNING</literal> about MD5 password
|
||||
deprecation is produced when a <command>CREATE ROLE</command> or
|
||||
<command>ALTER ROLE</command> statement sets an MD5-encrypted password.
|
||||
The default value is <literal>on</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
</variablelist>
|
||||
</sect2>
|
||||
<sect2 id="runtime-config-logging-csvlog">
|
||||
|
@ -2223,8 +2223,9 @@ REVOKE ALL ON accounts FROM PUBLIC;
|
||||
<para>
|
||||
Allows <command>VACUUM</command>, <command>ANALYZE</command>,
|
||||
<command>CLUSTER</command>, <command>REFRESH MATERIALIZED VIEW</command>,
|
||||
<command>REINDEX</command>, and <command>LOCK TABLE</command> on a
|
||||
relation.
|
||||
<command>REINDEX</command>, <command>LOCK TABLE</command>,
|
||||
and database object statistics manipulation functions
|
||||
(see <xref linkend="functions-admin-statsmod"/>) on a relation.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
@ -10905,7 +10905,7 @@ SELECT date_bin('15 minutes', TIMESTAMP '2020-02-11 15:44:17', TIMESTAMP '2001-0
|
||||
</sect2>
|
||||
|
||||
<sect2 id="functions-datetime-zoneconvert">
|
||||
<title><literal>AT TIME ZONE and AT LOCAL</literal></title>
|
||||
<title><literal>AT TIME ZONE</literal> and <literal>AT LOCAL</literal></title>
|
||||
|
||||
<indexterm>
|
||||
<primary>time zone</primary>
|
||||
@ -28663,143 +28663,6 @@ acl | {postgres=arwdDxtm/postgres,foo=r/postgres}
|
||||
</para></entry>
|
||||
</row>
|
||||
|
||||
<row>
|
||||
<entry role="func_table_entry"><para role="func_signature">
|
||||
<indexterm>
|
||||
<primary>pg_get_process_memory_contexts</primary>
|
||||
</indexterm>
|
||||
<function>pg_get_process_memory_contexts</function> ( <parameter>pid</parameter> <type>integer</type>, <parameter>summary</parameter> <type>boolean</type>, <parameter>timeout</parameter> <type>float</type> )
|
||||
<returnvalue>setof record</returnvalue>
|
||||
( <parameter>name</parameter> <type>text</type>,
|
||||
<parameter>ident</parameter> <type>text</type>,
|
||||
<parameter>type</parameter> <type>text</type>,
|
||||
<parameter>path</parameter> <type>integer[]</type>,
|
||||
<parameter>level</parameter> <type>integer</type>,
|
||||
<parameter>total_bytes</parameter> <type>bigint</type>,
|
||||
<parameter>total_nblocks</parameter> <type>bigint</type>,
|
||||
<parameter>free_bytes</parameter> <type>bigint</type>,
|
||||
<parameter>free_chunks</parameter> <type>bigint</type>,
|
||||
<parameter>used_bytes</parameter> <type>bigint</type>,
|
||||
<parameter>num_agg_contexts</parameter> <type>integer</type>,
|
||||
<parameter>stats_timestamp</parameter> <type>timestamptz</type> )
|
||||
</para>
|
||||
<para>
|
||||
This function handles requests to display the memory contexts of a
|
||||
<productname>PostgreSQL</productname> process with the specified
|
||||
process ID. The function can be used to send requests to backends as
|
||||
well as <glossterm linkend="glossary-auxiliary-proc">auxiliary processes</glossterm>.
|
||||
</para>
|
||||
<para>
|
||||
The returned record contains extended statistics per each memory
|
||||
context:
|
||||
<itemizedlist spacing="compact">
|
||||
<listitem>
|
||||
<para>
|
||||
<parameter>name</parameter> - The name of the memory context.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<parameter>ident</parameter> - Memory context ID (if any).
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<parameter>type</parameter> - The type of memory context, possible
|
||||
values are: AllocSet, Generation, Slab and Bump.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<parameter>path</parameter> - Memory contexts are organized in a
|
||||
tree model with TopMemoryContext as the root, and all other memory
|
||||
contexts as nodes in the tree. The <parameter>path</parameter>
|
||||
displays the path from the root to the current memory context. The
|
||||
path is limited to 100 children per node, which each node limited
|
||||
to a max depth of 100, to preserve memory during reporting. The
|
||||
printed path will also be limited to 100 nodes counting from the
|
||||
TopMemoryContext.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<parameter>level</parameter> - The level in the tree of the current
|
||||
memory context.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<parameter>total_bytes</parameter> - The total number of bytes
|
||||
allocated to this memory context.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<parameter>total_nblocks</parameter> - The total number of blocks
|
||||
used for the allocated memory.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<parameter>free_bytes</parameter> - The amount of free memory in
|
||||
this memory context.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<parameter>free_chunks</parameter> - The number of chunks that
|
||||
<parameter>free_bytes</parameter> corresponds to.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<parameter>used_bytes</parameter> - The total number of bytes
|
||||
currently occupied.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<parameter>num_agg_contexts</parameter> - The number of memory
|
||||
contexts aggregated in the displayed statistics.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<parameter>stats_timestamp</parameter> - When the statistics were
|
||||
extracted from the process.
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
<para>
|
||||
When <parameter>summary</parameter> is <literal>true</literal>, statistics
|
||||
for memory contexts at levels 1 and 2 are displayed, with level 1
|
||||
representing the root node (i.e., <literal>TopMemoryContext</literal>).
|
||||
Statistics for contexts on level 2 and below are aggregates of all
|
||||
child contexts' statistics, where <literal>num_agg_contexts</literal>
|
||||
indicate the number aggregated child contexts. When
|
||||
<parameter>summary</parameter> is <literal>false</literal>,
|
||||
<literal>the num_agg_contexts</literal> value is <literal>1</literal>,
|
||||
indicating that individual statistics are being displayed.
|
||||
</para>
|
||||
<para>
|
||||
Busy processes can delay reporting memory context statistics,
|
||||
<parameter>timeout</parameter> specifies the number of seconds
|
||||
to wait for updated statistics. <parameter>timeout</parameter> can be
|
||||
specified in fractions of a second.
|
||||
</para>
|
||||
<para>
|
||||
After receiving memory context statistics from the target process, it
|
||||
returns the results as one row per context. If all the contexts don't
|
||||
fit within the pre-determined size limit, the remaining context
|
||||
statistics are aggregated and a cumulative total is displayed. The
|
||||
<literal>num_agg_contexts</literal> column indicates the number of
|
||||
contexts aggregated in the displayed statistics. When
|
||||
<literal>num_agg_contexts</literal> is <literal>1</literal> it means
|
||||
that the context statistics are displayed separately.
|
||||
</para></entry>
|
||||
</row>
|
||||
|
||||
<row>
|
||||
<entry role="func_table_entry"><para role="func_signature">
|
||||
<indexterm>
|
||||
@ -28939,40 +28802,6 @@ LOG: Grand total: 1651920 bytes in 201 blocks; 622360 free (88 chunks); 1029560
|
||||
because it may generate a large number of log messages.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
<function>pg_get_process_memory_contexts</function> can be used to request
|
||||
memory contexts statistics of any <productname>PostgreSQL</productname>
|
||||
process. For example:
|
||||
<programlisting>
|
||||
postgres=# SELECT * FROM pg_get_process_memory_contexts(
|
||||
(SELECT pid FROM pg_stat_activity
|
||||
WHERE backend_type = 'checkpointer'),
|
||||
false, 0.5) LIMIT 1;
|
||||
-[ RECORD 1 ]----+------------------------------
|
||||
name | TopMemoryContext
|
||||
ident |
|
||||
type | AllocSet
|
||||
path | {1}
|
||||
level | 1
|
||||
total_bytes | 90304
|
||||
total_nblocks | 3
|
||||
free_bytes | 2880
|
||||
free_chunks | 1
|
||||
used_bytes | 87424
|
||||
num_agg_contexts | 1
|
||||
stats_timestamp | 2025-03-24 13:55:47.796698+01
|
||||
</programlisting>
|
||||
<note>
|
||||
<para>
|
||||
While <function>pg_get_process_memory_contexts</function> can be used to
|
||||
query memory contexts of the local backend,
|
||||
<structname>pg_backend_memory_contexts</structname>
|
||||
(see <xref linkend="view-pg-backend-memory-contexts"/> for more details)
|
||||
will be less resource intensive when only the local backend is of interest.
|
||||
</para>
|
||||
</note>
|
||||
</para>
|
||||
|
||||
</sect2>
|
||||
|
||||
<sect2 id="functions-admin-backup">
|
||||
@ -29940,6 +29769,7 @@ postgres=# SELECT '0/0'::pg_lsn + pd.segment_number * ps.setting::int + :offset
|
||||
<para>
|
||||
Creates a replication origin with the given external
|
||||
name, and returns the internal ID assigned to it.
|
||||
The name must be no longer than 512 bytes.
|
||||
</para></entry>
|
||||
</row>
|
||||
|
||||
@ -30639,7 +30469,7 @@ postgres=# SELECT '0/0'::pg_lsn + pd.segment_number * ps.setting::int + :offset
|
||||
arguments are passed as pairs of <replaceable>argname</replaceable>
|
||||
and <replaceable>argvalue</replaceable> in the form:
|
||||
<programlisting>
|
||||
SELECT pg_restore_relation_stats(
|
||||
SELECT pg_restore_relation_stats(
|
||||
'<replaceable>arg1name</replaceable>', '<replaceable>arg1value</replaceable>'::<replaceable>arg1type</replaceable>,
|
||||
'<replaceable>arg2name</replaceable>', '<replaceable>arg2value</replaceable>'::<replaceable>arg2type</replaceable>,
|
||||
'<replaceable>arg3name</replaceable>', '<replaceable>arg3value</replaceable>'::<replaceable>arg3type</replaceable>);
|
||||
@ -30650,7 +30480,7 @@ postgres=# SELECT '0/0'::pg_lsn + pd.segment_number * ps.setting::int + :offset
|
||||
<structfield>reltuples</structfield> values for the table
|
||||
<structname>mytable</structname>:
|
||||
<programlisting>
|
||||
SELECT pg_restore_relation_stats(
|
||||
SELECT pg_restore_relation_stats(
|
||||
'schemaname', 'myschema',
|
||||
'relname', 'mytable',
|
||||
'relpages', 173::integer,
|
||||
@ -30732,7 +30562,7 @@ postgres=# SELECT '0/0'::pg_lsn + pd.segment_number * ps.setting::int + :offset
|
||||
arguments are passed as pairs of <replaceable>argname</replaceable>
|
||||
and <replaceable>argvalue</replaceable> in the form:
|
||||
<programlisting>
|
||||
SELECT pg_restore_attribute_stats(
|
||||
SELECT pg_restore_attribute_stats(
|
||||
'<replaceable>arg1name</replaceable>', '<replaceable>arg1value</replaceable>'::<replaceable>arg1type</replaceable>,
|
||||
'<replaceable>arg2name</replaceable>', '<replaceable>arg2value</replaceable>'::<replaceable>arg2type</replaceable>,
|
||||
'<replaceable>arg3name</replaceable>', '<replaceable>arg3value</replaceable>'::<replaceable>arg3type</replaceable>);
|
||||
@ -30744,7 +30574,7 @@ postgres=# SELECT '0/0'::pg_lsn + pd.segment_number * ps.setting::int + :offset
|
||||
<structfield>col1</structfield> of the table
|
||||
<structname>mytable</structname>:
|
||||
<programlisting>
|
||||
SELECT pg_restore_attribute_stats(
|
||||
SELECT pg_restore_attribute_stats(
|
||||
'schemaname', 'myschema',
|
||||
'relname', 'mytable',
|
||||
'attname', 'col1',
|
||||
|
@ -1170,7 +1170,7 @@ my_sortsupport(PG_FUNCTION_ARGS)
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><function>stratnum</function></term>
|
||||
<term><function>translate_cmptype</function></term>
|
||||
<listitem>
|
||||
<para>
|
||||
Given a <literal>CompareType</literal> value from
|
||||
@ -1188,12 +1188,23 @@ my_sortsupport(PG_FUNCTION_ARGS)
|
||||
non-<literal>WITHOUT OVERLAPS</literal> part(s) of an index constraint.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
This support function corresponds to the index access method callback
|
||||
function <structfield>amtranslatecmptype</structfield> (see <xref
|
||||
linkend="index-functions"/>). The
|
||||
<structfield>amtranslatecmptype</structfield> callback function for
|
||||
GiST indexes merely calls down to the
|
||||
<function>translate_cmptype</function> support function of the
|
||||
respective operator family, since the GiST index access method has no
|
||||
fixed strategy numbers itself.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The <acronym>SQL</acronym> declaration of the function must look like
|
||||
this:
|
||||
|
||||
<programlisting>
|
||||
CREATE OR REPLACE FUNCTION my_stratnum(integer)
|
||||
CREATE OR REPLACE FUNCTION my_translate_cmptype(integer)
|
||||
RETURNS smallint
|
||||
AS 'MODULE_PATHNAME'
|
||||
LANGUAGE C STRICT;
|
||||
@ -1202,7 +1213,7 @@ LANGUAGE C STRICT;
|
||||
And the operator family registration must look like this:
|
||||
<programlisting>
|
||||
ALTER OPERATOR FAMILY my_opfamily USING gist ADD
|
||||
FUNCTION 12 ("any", "any") my_stratnum(int);
|
||||
FUNCTION 12 ("any", "any") my_translate_cmptype(int);
|
||||
</programlisting>
|
||||
</para>
|
||||
|
||||
@ -1210,10 +1221,10 @@ ALTER OPERATOR FAMILY my_opfamily USING gist ADD
|
||||
The matching code in the C module could then follow this skeleton:
|
||||
|
||||
<programlisting>
|
||||
PG_FUNCTION_INFO_V1(my_stratnum);
|
||||
PG_FUNCTION_INFO_V1(my_translate_cmptype);
|
||||
|
||||
Datum
|
||||
my_stratnum(PG_FUNCTION_ARGS)
|
||||
my_translate_cmptype(PG_FUNCTION_ARGS)
|
||||
{
|
||||
CompareType cmptype = PG_GETARG_INT32(0);
|
||||
StrategyNumber ret = InvalidStrategy;
|
||||
@ -1232,11 +1243,11 @@ my_stratnum(PG_FUNCTION_ARGS)
|
||||
<para>
|
||||
One translation function is provided by
|
||||
<productname>PostgreSQL</productname>:
|
||||
<literal>gist_stratnum_common</literal> is for operator classes that
|
||||
<literal>gist_translate_cmptype_common</literal> is for operator classes that
|
||||
use the <literal>RT*StrategyNumber</literal> constants.
|
||||
The <literal>btree_gist</literal>
|
||||
extension defines a second translation function,
|
||||
<literal>gist_stratnum_btree</literal>, for operator classes that use
|
||||
<literal>gist_translate_cmptype_btree</literal>, for operator classes that use
|
||||
the <literal>BT*StrategyNumber</literal> constants.
|
||||
</para>
|
||||
</listitem>
|
||||
|
@ -11,13 +11,15 @@
|
||||
<title>Legal Notice</title>
|
||||
|
||||
<para>
|
||||
<productname>PostgreSQL</productname> is Copyright © 1996–2025
|
||||
by the PostgreSQL Global Development Group.
|
||||
<productname>PostgreSQL</productname> Database Management System
|
||||
(also known as Postgres, formerly known as Postgres95)
|
||||
</para>
|
||||
|
||||
<para>
|
||||
<productname>Postgres95</productname> is Copyright © 1994–5
|
||||
by the Regents of the University of California.
|
||||
Portions Copyright © 1996-2025, PostgreSQL Global Development Group
|
||||
</para>
|
||||
<para>
|
||||
Portions Copyright © 1994, The Regents of the University of California
|
||||
</para>
|
||||
|
||||
<para>
|
||||
|
@ -363,34 +363,25 @@
|
||||
<para>
|
||||
Create some test tables on the publisher.
|
||||
<programlisting>
|
||||
test_pub=# CREATE TABLE t1(a int, b text, PRIMARY KEY(a));
|
||||
CREATE TABLE
|
||||
test_pub=# CREATE TABLE t2(c int, d text, PRIMARY KEY(c));
|
||||
CREATE TABLE
|
||||
test_pub=# CREATE TABLE t3(e int, f text, PRIMARY KEY(e));
|
||||
CREATE TABLE
|
||||
/* pub # */ CREATE TABLE t1(a int, b text, PRIMARY KEY(a));
|
||||
/* pub # */ CREATE TABLE t2(c int, d text, PRIMARY KEY(c));
|
||||
/* pub # */ CREATE TABLE t3(e int, f text, PRIMARY KEY(e));
|
||||
</programlisting></para>
|
||||
|
||||
<para>
|
||||
Create the same tables on the subscriber.
|
||||
<programlisting>
|
||||
test_sub=# CREATE TABLE t1(a int, b text, PRIMARY KEY(a));
|
||||
CREATE TABLE
|
||||
test_sub=# CREATE TABLE t2(c int, d text, PRIMARY KEY(c));
|
||||
CREATE TABLE
|
||||
test_sub=# CREATE TABLE t3(e int, f text, PRIMARY KEY(e));
|
||||
CREATE TABLE
|
||||
/* sub # */ CREATE TABLE t1(a int, b text, PRIMARY KEY(a));
|
||||
/* sub # */ CREATE TABLE t2(c int, d text, PRIMARY KEY(c));
|
||||
/* sub # */ CREATE TABLE t3(e int, f text, PRIMARY KEY(e));
|
||||
</programlisting></para>
|
||||
|
||||
<para>
|
||||
Insert data to the tables at the publisher side.
|
||||
<programlisting>
|
||||
test_pub=# INSERT INTO t1 VALUES (1, 'one'), (2, 'two'), (3, 'three');
|
||||
INSERT 0 3
|
||||
test_pub=# INSERT INTO t2 VALUES (1, 'A'), (2, 'B'), (3, 'C');
|
||||
INSERT 0 3
|
||||
test_pub=# INSERT INTO t3 VALUES (1, 'i'), (2, 'ii'), (3, 'iii');
|
||||
INSERT 0 3
|
||||
/* pub # */ INSERT INTO t1 VALUES (1, 'one'), (2, 'two'), (3, 'three');
|
||||
/* pub # */ INSERT INTO t2 VALUES (1, 'A'), (2, 'B'), (3, 'C');
|
||||
/* pub # */ INSERT INTO t3 VALUES (1, 'i'), (2, 'ii'), (3, 'iii');
|
||||
</programlisting></para>
|
||||
|
||||
<para>
|
||||
@ -399,41 +390,34 @@ INSERT 0 3
|
||||
<link linkend="sql-createpublication-params-with-publish"><literal>publish</literal></link>
|
||||
operations. The publication <literal>pub3b</literal> has a row filter (see
|
||||
<xref linkend="logical-replication-row-filter"/>).
|
||||
<programlisting>
|
||||
test_pub=# CREATE PUBLICATION pub1 FOR TABLE t1;
|
||||
CREATE PUBLICATION
|
||||
test_pub=# CREATE PUBLICATION pub2 FOR TABLE t2 WITH (publish = 'truncate');
|
||||
CREATE PUBLICATION
|
||||
test_pub=# CREATE PUBLICATION pub3a FOR TABLE t3 WITH (publish = 'truncate');
|
||||
CREATE PUBLICATION
|
||||
test_pub=# CREATE PUBLICATION pub3b FOR TABLE t3 WHERE (e > 5);
|
||||
CREATE PUBLICATION
|
||||
</programlisting></para>
|
||||
<programlisting><![CDATA[
|
||||
/* pub # */ CREATE PUBLICATION pub1 FOR TABLE t1;
|
||||
/* pub # */ CREATE PUBLICATION pub2 FOR TABLE t2 WITH (publish = 'truncate');
|
||||
/* pub # */ CREATE PUBLICATION pub3a FOR TABLE t3 WITH (publish = 'truncate');
|
||||
/* pub # */ CREATE PUBLICATION pub3b FOR TABLE t3 WHERE (e > 5);
|
||||
]]></programlisting></para>
|
||||
|
||||
<para>
|
||||
Create subscriptions for the publications. The subscription
|
||||
<literal>sub3</literal> subscribes to both <literal>pub3a</literal> and
|
||||
<literal>pub3b</literal>. All subscriptions will copy initial data by default.
|
||||
<programlisting>
|
||||
test_sub=# CREATE SUBSCRIPTION sub1
|
||||
test_sub-# CONNECTION 'host=localhost dbname=test_pub application_name=sub1'
|
||||
test_sub-# PUBLICATION pub1;
|
||||
CREATE SUBSCRIPTION
|
||||
test_sub=# CREATE SUBSCRIPTION sub2
|
||||
test_sub-# CONNECTION 'host=localhost dbname=test_pub application_name=sub2'
|
||||
test_sub-# PUBLICATION pub2;
|
||||
CREATE SUBSCRIPTION
|
||||
test_sub=# CREATE SUBSCRIPTION sub3
|
||||
test_sub-# CONNECTION 'host=localhost dbname=test_pub application_name=sub3'
|
||||
test_sub-# PUBLICATION pub3a, pub3b;
|
||||
CREATE SUBSCRIPTION
|
||||
/* sub # */ CREATE SUBSCRIPTION sub1
|
||||
/* sub - */ CONNECTION 'host=localhost dbname=test_pub application_name=sub1'
|
||||
/* sub - */ PUBLICATION pub1;
|
||||
/* sub # */ CREATE SUBSCRIPTION sub2
|
||||
/* sub - */ CONNECTION 'host=localhost dbname=test_pub application_name=sub2'
|
||||
/* sub - */ PUBLICATION pub2;
|
||||
/* sub # */ CREATE SUBSCRIPTION sub3
|
||||
/* sub - */ CONNECTION 'host=localhost dbname=test_pub application_name=sub3'
|
||||
/* sub - */ PUBLICATION pub3a, pub3b;
|
||||
</programlisting></para>
|
||||
|
||||
<para>
|
||||
Observe that initial table data is copied, regardless of the
|
||||
<literal>publish</literal> operation of the publication.
|
||||
<programlisting>
|
||||
test_sub=# SELECT * FROM t1;
|
||||
/* sub # */ SELECT * FROM t1;
|
||||
a | b
|
||||
---+-------
|
||||
1 | one
|
||||
@ -441,7 +425,7 @@ test_sub=# SELECT * FROM t1;
|
||||
3 | three
|
||||
(3 rows)
|
||||
|
||||
test_sub=# SELECT * FROM t2;
|
||||
/* sub # */ SELECT * FROM t2;
|
||||
c | d
|
||||
---+---
|
||||
1 | A
|
||||
@ -456,7 +440,7 @@ test_sub=# SELECT * FROM t2;
|
||||
it means the copied table <literal>t3</literal> contains all rows even when
|
||||
they do not match the row filter of publication <literal>pub3b</literal>.
|
||||
<programlisting>
|
||||
test_sub=# SELECT * FROM t3;
|
||||
/* sub # */ SELECT * FROM t3;
|
||||
e | f
|
||||
---+-----
|
||||
1 | i
|
||||
@ -468,18 +452,15 @@ test_sub=# SELECT * FROM t3;
|
||||
<para>
|
||||
Insert more data to the tables at the publisher side.
|
||||
<programlisting>
|
||||
test_pub=# INSERT INTO t1 VALUES (4, 'four'), (5, 'five'), (6, 'six');
|
||||
INSERT 0 3
|
||||
test_pub=# INSERT INTO t2 VALUES (4, 'D'), (5, 'E'), (6, 'F');
|
||||
INSERT 0 3
|
||||
test_pub=# INSERT INTO t3 VALUES (4, 'iv'), (5, 'v'), (6, 'vi');
|
||||
INSERT 0 3
|
||||
/* pub # */ INSERT INTO t1 VALUES (4, 'four'), (5, 'five'), (6, 'six');
|
||||
/* pub # */ INSERT INTO t2 VALUES (4, 'D'), (5, 'E'), (6, 'F');
|
||||
/* pub # */ INSERT INTO t3 VALUES (4, 'iv'), (5, 'v'), (6, 'vi');
|
||||
</programlisting></para>
|
||||
|
||||
<para>
|
||||
Now the publisher side data looks like:
|
||||
<programlisting>
|
||||
test_pub=# SELECT * FROM t1;
|
||||
/* pub # */ SELECT * FROM t1;
|
||||
a | b
|
||||
---+-------
|
||||
1 | one
|
||||
@ -490,7 +471,7 @@ test_pub=# SELECT * FROM t1;
|
||||
6 | six
|
||||
(6 rows)
|
||||
|
||||
test_pub=# SELECT * FROM t2;
|
||||
/* pub # */ SELECT * FROM t2;
|
||||
c | d
|
||||
---+---
|
||||
1 | A
|
||||
@ -501,7 +482,7 @@ test_pub=# SELECT * FROM t2;
|
||||
6 | F
|
||||
(6 rows)
|
||||
|
||||
test_pub=# SELECT * FROM t3;
|
||||
/* pub # */ SELECT * FROM t3;
|
||||
e | f
|
||||
---+-----
|
||||
1 | i
|
||||
@ -521,7 +502,7 @@ test_pub=# SELECT * FROM t3;
|
||||
only replicate data that matches the row filter of <literal>pub3b</literal>.
|
||||
Now the subscriber side data looks like:
|
||||
<programlisting>
|
||||
test_sub=# SELECT * FROM t1;
|
||||
/* sub # */ SELECT * FROM t1;
|
||||
a | b
|
||||
---+-------
|
||||
1 | one
|
||||
@ -532,7 +513,7 @@ test_sub=# SELECT * FROM t1;
|
||||
6 | six
|
||||
(6 rows)
|
||||
|
||||
test_sub=# SELECT * FROM t2;
|
||||
/* sub # */ SELECT * FROM t2;
|
||||
c | d
|
||||
---+---
|
||||
1 | A
|
||||
@ -540,7 +521,7 @@ test_sub=# SELECT * FROM t2;
|
||||
3 | C
|
||||
(3 rows)
|
||||
|
||||
test_sub=# SELECT * FROM t3;
|
||||
/* sub # */ SELECT * FROM t3;
|
||||
e | f
|
||||
---+-----
|
||||
1 | i
|
||||
@ -567,8 +548,7 @@ test_sub=# SELECT * FROM t3;
|
||||
<para>
|
||||
First, create a publication for the examples to use.
|
||||
<programlisting>
|
||||
test_pub=# CREATE PUBLICATION pub1 FOR ALL TABLES;
|
||||
CREATE PUBLICATION
|
||||
/* pub # */ CREATE PUBLICATION pub1 FOR ALL TABLES;
|
||||
</programlisting></para>
|
||||
<para>
|
||||
Example 1: Where the subscription says <literal>connect = false</literal>
|
||||
@ -579,13 +559,12 @@ CREATE PUBLICATION
|
||||
<para>
|
||||
Create the subscription.
|
||||
<programlisting>
|
||||
test_sub=# CREATE SUBSCRIPTION sub1
|
||||
test_sub-# CONNECTION 'host=localhost dbname=test_pub'
|
||||
test_sub-# PUBLICATION pub1
|
||||
test_sub-# WITH (connect=false);
|
||||
/* sub # */ CREATE SUBSCRIPTION sub1
|
||||
/* sub - */ CONNECTION 'host=localhost dbname=test_pub'
|
||||
/* sub - */ PUBLICATION pub1
|
||||
/* sub - */ WITH (connect=false);
|
||||
WARNING: subscription was created, but is not connected
|
||||
HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription.
|
||||
CREATE SUBSCRIPTION
|
||||
</programlisting></para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
@ -594,7 +573,7 @@ CREATE SUBSCRIPTION
|
||||
specified during <literal>CREATE SUBSCRIPTION</literal>, the name of the
|
||||
slot to create is same as the subscription name, e.g. "sub1".
|
||||
<programlisting>
|
||||
test_pub=# SELECT * FROM pg_create_logical_replication_slot('sub1', 'pgoutput');
|
||||
/* pub # */ SELECT * FROM pg_create_logical_replication_slot('sub1', 'pgoutput');
|
||||
slot_name | lsn
|
||||
-----------+-----------
|
||||
sub1 | 0/19404D0
|
||||
@ -606,10 +585,8 @@ test_pub=# SELECT * FROM pg_create_logical_replication_slot('sub1', 'pgoutput');
|
||||
On the subscriber, complete the activation of the subscription. After
|
||||
this the tables of <literal>pub1</literal> will start replicating.
|
||||
<programlisting>
|
||||
test_sub=# ALTER SUBSCRIPTION sub1 ENABLE;
|
||||
ALTER SUBSCRIPTION
|
||||
test_sub=# ALTER SUBSCRIPTION sub1 REFRESH PUBLICATION;
|
||||
ALTER SUBSCRIPTION
|
||||
/* sub # */ ALTER SUBSCRIPTION sub1 ENABLE;
|
||||
/* sub # */ ALTER SUBSCRIPTION sub1 REFRESH PUBLICATION;
|
||||
</programlisting></para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
@ -625,13 +602,12 @@ ALTER SUBSCRIPTION
|
||||
<para>
|
||||
Create the subscription.
|
||||
<programlisting>
|
||||
test_sub=# CREATE SUBSCRIPTION sub1
|
||||
test_sub-# CONNECTION 'host=localhost dbname=test_pub'
|
||||
test_sub-# PUBLICATION pub1
|
||||
test_sub-# WITH (connect=false, slot_name='myslot');
|
||||
/* sub # */ CREATE SUBSCRIPTION sub1
|
||||
/* sub - */ CONNECTION 'host=localhost dbname=test_pub'
|
||||
/* sub - */ PUBLICATION pub1
|
||||
/* sub - */ WITH (connect=false, slot_name='myslot');
|
||||
WARNING: subscription was created, but is not connected
|
||||
HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription.
|
||||
CREATE SUBSCRIPTION
|
||||
</programlisting></para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
@ -639,7 +615,7 @@ CREATE SUBSCRIPTION
|
||||
On the publisher, manually create a slot using the same name that was
|
||||
specified during <literal>CREATE SUBSCRIPTION</literal>, e.g. "myslot".
|
||||
<programlisting>
|
||||
test_pub=# SELECT * FROM pg_create_logical_replication_slot('myslot', 'pgoutput');
|
||||
/* pub # */ SELECT * FROM pg_create_logical_replication_slot('myslot', 'pgoutput');
|
||||
slot_name | lsn
|
||||
-----------+-----------
|
||||
myslot | 0/19059A0
|
||||
@ -651,10 +627,8 @@ test_pub=# SELECT * FROM pg_create_logical_replication_slot('myslot', 'pgoutput'
|
||||
On the subscriber, the remaining subscription activation steps are the
|
||||
same as before.
|
||||
<programlisting>
|
||||
test_sub=# ALTER SUBSCRIPTION sub1 ENABLE;
|
||||
ALTER SUBSCRIPTION
|
||||
test_sub=# ALTER SUBSCRIPTION sub1 REFRESH PUBLICATION;
|
||||
ALTER SUBSCRIPTION
|
||||
/* sub # */ ALTER SUBSCRIPTION sub1 ENABLE;
|
||||
/* sub # */ ALTER SUBSCRIPTION sub1 REFRESH PUBLICATION;
|
||||
</programlisting></para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
@ -669,18 +643,17 @@ ALTER SUBSCRIPTION
|
||||
<literal>enabled = false</literal>, and
|
||||
<literal>create_slot = false</literal> are also needed.
|
||||
<programlisting>
|
||||
test_sub=# CREATE SUBSCRIPTION sub1
|
||||
test_sub-# CONNECTION 'host=localhost dbname=test_pub'
|
||||
test_sub-# PUBLICATION pub1
|
||||
test_sub-# WITH (slot_name=NONE, enabled=false, create_slot=false);
|
||||
CREATE SUBSCRIPTION
|
||||
/* sub # */ CREATE SUBSCRIPTION sub1
|
||||
/* sub - */ CONNECTION 'host=localhost dbname=test_pub'
|
||||
/* sub - */ PUBLICATION pub1
|
||||
/* sub - */ WITH (slot_name=NONE, enabled=false, create_slot=false);
|
||||
</programlisting></para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
On the publisher, manually create a slot using any name, e.g. "myslot".
|
||||
<programlisting>
|
||||
test_pub=# SELECT * FROM pg_create_logical_replication_slot('myslot', 'pgoutput');
|
||||
/* pub # */ SELECT * FROM pg_create_logical_replication_slot('myslot', 'pgoutput');
|
||||
slot_name | lsn
|
||||
-----------+-----------
|
||||
myslot | 0/1905930
|
||||
@ -692,18 +665,15 @@ test_pub=# SELECT * FROM pg_create_logical_replication_slot('myslot', 'pgoutput'
|
||||
On the subscriber, associate the subscription with the slot name just
|
||||
created.
|
||||
<programlisting>
|
||||
test_sub=# ALTER SUBSCRIPTION sub1 SET (slot_name='myslot');
|
||||
ALTER SUBSCRIPTION
|
||||
/* sub # */ ALTER SUBSCRIPTION sub1 SET (slot_name='myslot');
|
||||
</programlisting></para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
The remaining subscription activation steps are same as before.
|
||||
<programlisting>
|
||||
test_sub=# ALTER SUBSCRIPTION sub1 ENABLE;
|
||||
ALTER SUBSCRIPTION
|
||||
test_sub=# ALTER SUBSCRIPTION sub1 REFRESH PUBLICATION;
|
||||
ALTER SUBSCRIPTION
|
||||
/* sub # */ ALTER SUBSCRIPTION sub1 ENABLE;
|
||||
/* sub # */ ALTER SUBSCRIPTION sub1 REFRESH PUBLICATION;
|
||||
</programlisting></para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
@ -752,7 +722,7 @@ ALTER SUBSCRIPTION
|
||||
will return the relevant replication slots associated with the
|
||||
failover-enabled subscriptions.
|
||||
<programlisting>
|
||||
test_sub=# SELECT
|
||||
/* sub # */ SELECT
|
||||
array_agg(quote_literal(s.subslotname)) AS slots
|
||||
FROM pg_subscription s
|
||||
WHERE s.subfailover AND
|
||||
@ -775,7 +745,7 @@ test_sub=# SELECT
|
||||
as they will either be dropped or re-created on the new primary server in those
|
||||
cases.
|
||||
<programlisting>
|
||||
test_sub=# SELECT
|
||||
/* sub # */ SELECT
|
||||
array_agg(quote_literal(slot_name)) AS slots
|
||||
FROM
|
||||
(
|
||||
@ -794,7 +764,7 @@ test_sub=# SELECT
|
||||
Check that the logical replication slots identified above exist on
|
||||
the standby server and are ready for failover.
|
||||
<programlisting>
|
||||
test_standby=# SELECT slot_name, (synced AND NOT temporary AND NOT conflicting) AS failover_ready
|
||||
/* standby # */ SELECT slot_name, (synced AND NOT temporary AND NOT conflicting) AS failover_ready
|
||||
FROM pg_replication_slots
|
||||
WHERE slot_name IN
|
||||
('sub1','sub2','sub3', 'pg_16394_sync_16385_7394666715149055164');
|
||||
@ -1024,12 +994,9 @@ test_standby=# SELECT slot_name, (synced AND NOT temporary AND NOT conflicting)
|
||||
<para>
|
||||
Create some tables to be used in the following examples.
|
||||
<programlisting>
|
||||
test_pub=# CREATE TABLE t1(a int, b int, c text, PRIMARY KEY(a,c));
|
||||
CREATE TABLE
|
||||
test_pub=# CREATE TABLE t2(d int, e int, f int, PRIMARY KEY(d));
|
||||
CREATE TABLE
|
||||
test_pub=# CREATE TABLE t3(g int, h int, i int, PRIMARY KEY(g));
|
||||
CREATE TABLE
|
||||
/* pub # */ CREATE TABLE t1(a int, b int, c text, PRIMARY KEY(a,c));
|
||||
/* pub # */ CREATE TABLE t2(d int, e int, f int, PRIMARY KEY(d));
|
||||
/* pub # */ CREATE TABLE t3(g int, h int, i int, PRIMARY KEY(g));
|
||||
</programlisting></para>
|
||||
|
||||
<para>
|
||||
@ -1038,43 +1005,40 @@ CREATE TABLE
|
||||
<literal>p2</literal> has two tables. Table <literal>t1</literal> has no row
|
||||
filter, and table <literal>t2</literal> has a row filter. Publication
|
||||
<literal>p3</literal> has two tables, and both of them have a row filter.
|
||||
<programlisting>
|
||||
test_pub=# CREATE PUBLICATION p1 FOR TABLE t1 WHERE (a > 5 AND c = 'NSW');
|
||||
CREATE PUBLICATION
|
||||
test_pub=# CREATE PUBLICATION p2 FOR TABLE t1, t2 WHERE (e = 99);
|
||||
CREATE PUBLICATION
|
||||
test_pub=# CREATE PUBLICATION p3 FOR TABLE t2 WHERE (d = 10), t3 WHERE (g = 10);
|
||||
CREATE PUBLICATION
|
||||
</programlisting></para>
|
||||
<programlisting><![CDATA[
|
||||
/* pub # */ CREATE PUBLICATION p1 FOR TABLE t1 WHERE (a > 5 AND c = 'NSW');
|
||||
/* pub # */ CREATE PUBLICATION p2 FOR TABLE t1, t2 WHERE (e = 99);
|
||||
/* pub # */ CREATE PUBLICATION p3 FOR TABLE t2 WHERE (d = 10), t3 WHERE (g = 10);
|
||||
]]></programlisting></para>
|
||||
|
||||
<para>
|
||||
<command>psql</command> can be used to show the row filter expressions (if
|
||||
defined) for each publication.
|
||||
<programlisting>
|
||||
test_pub=# \dRp+
|
||||
Publication p1
|
||||
<programlisting><![CDATA[
|
||||
/* pub # */ \dRp+
|
||||
Publication p1
|
||||
Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root
|
||||
----------+------------+---------+---------+---------+-----------+----------
|
||||
postgres | f | t | t | t | t | f
|
||||
Tables:
|
||||
"public.t1" WHERE ((a > 5) AND (c = 'NSW'::text))
|
||||
"public.t1" WHERE ((a > 5) AND (c = 'NSW'::text))
|
||||
|
||||
Publication p2
|
||||
Publication p2
|
||||
Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root
|
||||
----------+------------+---------+---------+---------+-----------+----------
|
||||
postgres | f | t | t | t | t | f
|
||||
Tables:
|
||||
"public.t1"
|
||||
"public.t2" WHERE (e = 99)
|
||||
"public.t1"
|
||||
"public.t2" WHERE (e = 99)
|
||||
|
||||
Publication p3
|
||||
Publication p3
|
||||
Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root
|
||||
----------+------------+---------+---------+---------+-----------+----------
|
||||
postgres | f | t | t | t | t | f
|
||||
Tables:
|
||||
"public.t2" WHERE (d = 10)
|
||||
"public.t3" WHERE (g = 10)
|
||||
</programlisting></para>
|
||||
"public.t2" WHERE (d = 10)
|
||||
"public.t3" WHERE (g = 10)
|
||||
]]></programlisting></para>
|
||||
|
||||
<para>
|
||||
<command>psql</command> can be used to show the row filter expressions (if
|
||||
@ -1082,8 +1046,8 @@ Tables:
|
||||
of two publications, but has a row filter only in <literal>p1</literal>.
|
||||
See that table <literal>t2</literal> is a member of two publications, and
|
||||
has a different row filter in each of them.
|
||||
<programlisting>
|
||||
test_pub=# \d t1
|
||||
<programlisting><![CDATA[
|
||||
/* pub # */ \d t1
|
||||
Table "public.t1"
|
||||
Column | Type | Collation | Nullable | Default
|
||||
--------+---------+-----------+----------+---------
|
||||
@ -1096,7 +1060,7 @@ Publications:
|
||||
"p1" WHERE ((a > 5) AND (c = 'NSW'::text))
|
||||
"p2"
|
||||
|
||||
test_pub=# \d t2
|
||||
/* pub # */ \d t2
|
||||
Table "public.t2"
|
||||
Column | Type | Collation | Nullable | Default
|
||||
--------+---------+-----------+----------+---------
|
||||
@ -1109,7 +1073,7 @@ Publications:
|
||||
"p2" WHERE (e = 99)
|
||||
"p3" WHERE (d = 10)
|
||||
|
||||
test_pub=# \d t3
|
||||
/* pub # */ \d t3
|
||||
Table "public.t3"
|
||||
Column | Type | Collation | Nullable | Default
|
||||
--------+---------+-----------+----------+---------
|
||||
@ -1120,43 +1084,33 @@ Indexes:
|
||||
"t3_pkey" PRIMARY KEY, btree (g)
|
||||
Publications:
|
||||
"p3" WHERE (g = 10)
|
||||
</programlisting></para>
|
||||
]]></programlisting></para>
|
||||
|
||||
<para>
|
||||
On the subscriber node, create a table <literal>t1</literal> with the same
|
||||
definition as the one on the publisher, and also create the subscription
|
||||
<literal>s1</literal> that subscribes to the publication <literal>p1</literal>.
|
||||
<programlisting>
|
||||
test_sub=# CREATE TABLE t1(a int, b int, c text, PRIMARY KEY(a,c));
|
||||
CREATE TABLE
|
||||
test_sub=# CREATE SUBSCRIPTION s1
|
||||
test_sub-# CONNECTION 'host=localhost dbname=test_pub application_name=s1'
|
||||
test_sub-# PUBLICATION p1;
|
||||
CREATE SUBSCRIPTION
|
||||
/* sub # */ CREATE TABLE t1(a int, b int, c text, PRIMARY KEY(a,c));
|
||||
/* sub # */ CREATE SUBSCRIPTION s1
|
||||
/* sub - */ CONNECTION 'host=localhost dbname=test_pub application_name=s1'
|
||||
/* sub - */ PUBLICATION p1;
|
||||
</programlisting></para>
|
||||
|
||||
<para>
|
||||
Insert some rows. Only the rows satisfying the <literal>t1 WHERE</literal>
|
||||
clause of publication <literal>p1</literal> are replicated.
|
||||
<programlisting>
|
||||
test_pub=# INSERT INTO t1 VALUES (2, 102, 'NSW');
|
||||
INSERT 0 1
|
||||
test_pub=# INSERT INTO t1 VALUES (3, 103, 'QLD');
|
||||
INSERT 0 1
|
||||
test_pub=# INSERT INTO t1 VALUES (4, 104, 'VIC');
|
||||
INSERT 0 1
|
||||
test_pub=# INSERT INTO t1 VALUES (5, 105, 'ACT');
|
||||
INSERT 0 1
|
||||
test_pub=# INSERT INTO t1 VALUES (6, 106, 'NSW');
|
||||
INSERT 0 1
|
||||
test_pub=# INSERT INTO t1 VALUES (7, 107, 'NT');
|
||||
INSERT 0 1
|
||||
test_pub=# INSERT INTO t1 VALUES (8, 108, 'QLD');
|
||||
INSERT 0 1
|
||||
test_pub=# INSERT INTO t1 VALUES (9, 109, 'NSW');
|
||||
INSERT 0 1
|
||||
/* pub # */ INSERT INTO t1 VALUES (2, 102, 'NSW');
|
||||
/* pub # */ INSERT INTO t1 VALUES (3, 103, 'QLD');
|
||||
/* pub # */ INSERT INTO t1 VALUES (4, 104, 'VIC');
|
||||
/* pub # */ INSERT INTO t1 VALUES (5, 105, 'ACT');
|
||||
/* pub # */ INSERT INTO t1 VALUES (6, 106, 'NSW');
|
||||
/* pub # */ INSERT INTO t1 VALUES (7, 107, 'NT');
|
||||
/* pub # */ INSERT INTO t1 VALUES (8, 108, 'QLD');
|
||||
/* pub # */ INSERT INTO t1 VALUES (9, 109, 'NSW');
|
||||
|
||||
test_pub=# SELECT * FROM t1;
|
||||
/* pub # */ SELECT * FROM t1;
|
||||
a | b | c
|
||||
---+-----+-----
|
||||
2 | 102 | NSW
|
||||
@ -1170,7 +1124,7 @@ test_pub=# SELECT * FROM t1;
|
||||
(8 rows)
|
||||
</programlisting>
|
||||
<programlisting>
|
||||
test_sub=# SELECT * FROM t1;
|
||||
/* sub # */ SELECT * FROM t1;
|
||||
a | b | c
|
||||
---+-----+-----
|
||||
6 | 106 | NSW
|
||||
@ -1184,10 +1138,9 @@ test_sub=# SELECT * FROM t1;
|
||||
<literal>p1</literal>. The <command>UPDATE</command> replicates
|
||||
the change as normal.
|
||||
<programlisting>
|
||||
test_pub=# UPDATE t1 SET b = 999 WHERE a = 6;
|
||||
UPDATE 1
|
||||
/* pub # */ UPDATE t1 SET b = 999 WHERE a = 6;
|
||||
|
||||
test_pub=# SELECT * FROM t1;
|
||||
/* pub # */ SELECT * FROM t1;
|
||||
a | b | c
|
||||
---+-----+-----
|
||||
2 | 102 | NSW
|
||||
@ -1201,7 +1154,7 @@ test_pub=# SELECT * FROM t1;
|
||||
(8 rows)
|
||||
</programlisting>
|
||||
<programlisting>
|
||||
test_sub=# SELECT * FROM t1;
|
||||
/* sub # */ SELECT * FROM t1;
|
||||
a | b | c
|
||||
---+-----+-----
|
||||
9 | 109 | NSW
|
||||
@ -1216,10 +1169,9 @@ test_sub=# SELECT * FROM t1;
|
||||
transformed into an <command>INSERT</command> and the change is replicated.
|
||||
See the new row on the subscriber.
|
||||
<programlisting>
|
||||
test_pub=# UPDATE t1 SET a = 555 WHERE a = 2;
|
||||
UPDATE 1
|
||||
/* pub # */ UPDATE t1 SET a = 555 WHERE a = 2;
|
||||
|
||||
test_pub=# SELECT * FROM t1;
|
||||
/* pub # */ SELECT * FROM t1;
|
||||
a | b | c
|
||||
-----+-----+-----
|
||||
3 | 103 | QLD
|
||||
@ -1233,7 +1185,7 @@ test_pub=# SELECT * FROM t1;
|
||||
(8 rows)
|
||||
</programlisting>
|
||||
<programlisting>
|
||||
test_sub=# SELECT * FROM t1;
|
||||
/* sub # */ SELECT * FROM t1;
|
||||
a | b | c
|
||||
-----+-----+-----
|
||||
9 | 109 | NSW
|
||||
@ -1249,10 +1201,9 @@ test_sub=# SELECT * FROM t1;
|
||||
transformed into a <command>DELETE</command> and the change is replicated.
|
||||
See that the row is removed from the subscriber.
|
||||
<programlisting>
|
||||
test_pub=# UPDATE t1 SET c = 'VIC' WHERE a = 9;
|
||||
UPDATE 1
|
||||
/* pub # */ UPDATE t1 SET c = 'VIC' WHERE a = 9;
|
||||
|
||||
test_pub=# SELECT * FROM t1;
|
||||
/* pub # */ SELECT * FROM t1;
|
||||
a | b | c
|
||||
-----+-----+-----
|
||||
3 | 103 | QLD
|
||||
@ -1266,7 +1217,7 @@ test_pub=# SELECT * FROM t1;
|
||||
(8 rows)
|
||||
</programlisting>
|
||||
<programlisting>
|
||||
test_sub=# SELECT * FROM t1;
|
||||
/* sub # */ SELECT * FROM t1;
|
||||
a | b | c
|
||||
-----+-----+-----
|
||||
6 | 999 | NSW
|
||||
@ -1284,17 +1235,13 @@ test_sub=# SELECT * FROM t1;
|
||||
<para>
|
||||
Create a partitioned table on the publisher.
|
||||
<programlisting>
|
||||
test_pub=# CREATE TABLE parent(a int PRIMARY KEY) PARTITION BY RANGE(a);
|
||||
CREATE TABLE
|
||||
test_pub=# CREATE TABLE child PARTITION OF parent DEFAULT;
|
||||
CREATE TABLE
|
||||
/* pub # */ CREATE TABLE parent(a int PRIMARY KEY) PARTITION BY RANGE(a);
|
||||
/* pub # */ CREATE TABLE child PARTITION OF parent DEFAULT;
|
||||
</programlisting>
|
||||
Create the same tables on the subscriber.
|
||||
<programlisting>
|
||||
test_sub=# CREATE TABLE parent(a int PRIMARY KEY) PARTITION BY RANGE(a);
|
||||
CREATE TABLE
|
||||
test_sub=# CREATE TABLE child PARTITION OF parent DEFAULT;
|
||||
CREATE TABLE
|
||||
/* sub # */ CREATE TABLE parent(a int PRIMARY KEY) PARTITION BY RANGE(a);
|
||||
/* sub # */ CREATE TABLE child PARTITION OF parent DEFAULT;
|
||||
</programlisting></para>
|
||||
|
||||
<para>
|
||||
@ -1302,16 +1249,14 @@ CREATE TABLE
|
||||
publication parameter <literal>publish_via_partition_root</literal> is set
|
||||
as true. There are row filters defined on both the partitioned table
|
||||
(<literal>parent</literal>), and on the partition (<literal>child</literal>).
|
||||
<programlisting><![CDATA[
|
||||
/* pub # */ CREATE PUBLICATION p4 FOR TABLE parent WHERE (a < 5), child WHERE (a >= 5)
|
||||
/* pub - */ WITH (publish_via_partition_root=true);
|
||||
]]></programlisting>
|
||||
<programlisting>
|
||||
test_pub=# CREATE PUBLICATION p4 FOR TABLE parent WHERE (a < 5), child WHERE (a >= 5)
|
||||
test_pub-# WITH (publish_via_partition_root=true);
|
||||
CREATE PUBLICATION
|
||||
</programlisting>
|
||||
<programlisting>
|
||||
test_sub=# CREATE SUBSCRIPTION s4
|
||||
test_sub-# CONNECTION 'host=localhost dbname=test_pub application_name=s4'
|
||||
test_sub-# PUBLICATION p4;
|
||||
CREATE SUBSCRIPTION
|
||||
/* sub # */ CREATE SUBSCRIPTION s4
|
||||
/* sub - */ CONNECTION 'host=localhost dbname=test_pub application_name=s4'
|
||||
/* sub - */ PUBLICATION p4;
|
||||
</programlisting></para>
|
||||
|
||||
<para>
|
||||
@ -1320,12 +1265,10 @@ CREATE SUBSCRIPTION
|
||||
<literal>parent</literal> (because <literal>publish_via_partition_root</literal>
|
||||
is true).
|
||||
<programlisting>
|
||||
test_pub=# INSERT INTO parent VALUES (2), (4), (6);
|
||||
INSERT 0 3
|
||||
test_pub=# INSERT INTO child VALUES (3), (5), (7);
|
||||
INSERT 0 3
|
||||
/* pub # */ INSERT INTO parent VALUES (2), (4), (6);
|
||||
/* pub # */ INSERT INTO child VALUES (3), (5), (7);
|
||||
|
||||
test_pub=# SELECT * FROM parent ORDER BY a;
|
||||
/* pub # */ SELECT * FROM parent ORDER BY a;
|
||||
a
|
||||
---
|
||||
2
|
||||
@ -1337,7 +1280,7 @@ test_pub=# SELECT * FROM parent ORDER BY a;
|
||||
(6 rows)
|
||||
</programlisting>
|
||||
<programlisting>
|
||||
test_sub=# SELECT * FROM parent ORDER BY a;
|
||||
/* sub # */ SELECT * FROM parent ORDER BY a;
|
||||
a
|
||||
---
|
||||
2
|
||||
@ -1350,16 +1293,13 @@ test_sub=# SELECT * FROM parent ORDER BY a;
|
||||
Repeat the same test, but with a different value for <literal>publish_via_partition_root</literal>.
|
||||
The publication parameter <literal>publish_via_partition_root</literal> is
|
||||
set as false. A row filter is defined on the partition (<literal>child</literal>).
|
||||
<programlisting><![CDATA[
|
||||
/* pub # */ DROP PUBLICATION p4;
|
||||
/* pub # */ CREATE PUBLICATION p4 FOR TABLE parent, child WHERE (a >= 5)
|
||||
/* pub - */ WITH (publish_via_partition_root=false);
|
||||
]]></programlisting>
|
||||
<programlisting>
|
||||
test_pub=# DROP PUBLICATION p4;
|
||||
DROP PUBLICATION
|
||||
test_pub=# CREATE PUBLICATION p4 FOR TABLE parent, child WHERE (a >= 5)
|
||||
test_pub-# WITH (publish_via_partition_root=false);
|
||||
CREATE PUBLICATION
|
||||
</programlisting>
|
||||
<programlisting>
|
||||
test_sub=# ALTER SUBSCRIPTION s4 REFRESH PUBLICATION;
|
||||
ALTER SUBSCRIPTION
|
||||
/* sub # */ ALTER SUBSCRIPTION s4 REFRESH PUBLICATION;
|
||||
</programlisting></para>
|
||||
|
||||
<para>
|
||||
@ -1367,14 +1307,11 @@ ALTER SUBSCRIPTION
|
||||
row filter of <literal>child</literal> (because
|
||||
<literal>publish_via_partition_root</literal> is false).
|
||||
<programlisting>
|
||||
test_pub=# TRUNCATE parent;
|
||||
TRUNCATE TABLE
|
||||
test_pub=# INSERT INTO parent VALUES (2), (4), (6);
|
||||
INSERT 0 3
|
||||
test_pub=# INSERT INTO child VALUES (3), (5), (7);
|
||||
INSERT 0 3
|
||||
/* pub # */ TRUNCATE parent;
|
||||
/* pub # */ INSERT INTO parent VALUES (2), (4), (6);
|
||||
/* pub # */ INSERT INTO child VALUES (3), (5), (7);
|
||||
|
||||
test_pub=# SELECT * FROM parent ORDER BY a;
|
||||
/* pub # */ SELECT * FROM parent ORDER BY a;
|
||||
a
|
||||
---
|
||||
2
|
||||
@ -1386,7 +1323,7 @@ test_pub=# SELECT * FROM parent ORDER BY a;
|
||||
(6 rows)
|
||||
</programlisting>
|
||||
<programlisting>
|
||||
test_sub=# SELECT * FROM child ORDER BY a;
|
||||
/* sub # */ SELECT * FROM child ORDER BY a;
|
||||
a
|
||||
---
|
||||
5
|
||||
@ -1505,8 +1442,7 @@ test_sub=# SELECT * FROM child ORDER BY a;
|
||||
<para>
|
||||
Create a table <literal>t1</literal> to be used in the following example.
|
||||
<programlisting>
|
||||
test_pub=# CREATE TABLE t1(id int, a text, b text, c text, d text, e text, PRIMARY KEY(id));
|
||||
CREATE TABLE
|
||||
/* pub # */ CREATE TABLE t1(id int, a text, b text, c text, d text, e text, PRIMARY KEY(id));
|
||||
</programlisting></para>
|
||||
|
||||
<para>
|
||||
@ -1515,15 +1451,14 @@ CREATE TABLE
|
||||
replicated. Notice that the order of column names in the column list does
|
||||
not matter.
|
||||
<programlisting>
|
||||
test_pub=# CREATE PUBLICATION p1 FOR TABLE t1 (id, b, a, d);
|
||||
CREATE PUBLICATION
|
||||
/* pub # */ CREATE PUBLICATION p1 FOR TABLE t1 (id, b, a, d);
|
||||
</programlisting></para>
|
||||
|
||||
<para>
|
||||
<literal>psql</literal> can be used to show the column lists (if defined)
|
||||
for each publication.
|
||||
<programlisting>
|
||||
test_pub=# \dRp+
|
||||
/* pub # */ \dRp+
|
||||
Publication p1
|
||||
Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root
|
||||
----------+------------+---------+---------+---------+-----------+----------
|
||||
@ -1536,7 +1471,7 @@ Tables:
|
||||
<literal>psql</literal> can be used to show the column lists (if defined)
|
||||
for each table.
|
||||
<programlisting>
|
||||
test_pub=# \d t1
|
||||
/* pub # */ \d t1
|
||||
Table "public.t1"
|
||||
Column | Type | Collation | Nullable | Default
|
||||
--------+---------+-----------+----------+---------
|
||||
@ -1559,24 +1494,19 @@ Publications:
|
||||
<literal>s1</literal> that subscribes to the publication
|
||||
<literal>p1</literal>.
|
||||
<programlisting>
|
||||
test_sub=# CREATE TABLE t1(id int, b text, a text, d text, PRIMARY KEY(id));
|
||||
CREATE TABLE
|
||||
test_sub=# CREATE SUBSCRIPTION s1
|
||||
test_sub-# CONNECTION 'host=localhost dbname=test_pub application_name=s1'
|
||||
test_sub-# PUBLICATION p1;
|
||||
CREATE SUBSCRIPTION
|
||||
/* sub # */ CREATE TABLE t1(id int, b text, a text, d text, PRIMARY KEY(id));
|
||||
/* sub # */ CREATE SUBSCRIPTION s1
|
||||
/* sub - */ CONNECTION 'host=localhost dbname=test_pub application_name=s1'
|
||||
/* sub - */ PUBLICATION p1;
|
||||
</programlisting></para>
|
||||
|
||||
<para>
|
||||
On the publisher node, insert some rows to table <literal>t1</literal>.
|
||||
<programlisting>
|
||||
test_pub=# INSERT INTO t1 VALUES(1, 'a-1', 'b-1', 'c-1', 'd-1', 'e-1');
|
||||
INSERT 0 1
|
||||
test_pub=# INSERT INTO t1 VALUES(2, 'a-2', 'b-2', 'c-2', 'd-2', 'e-2');
|
||||
INSERT 0 1
|
||||
test_pub=# INSERT INTO t1 VALUES(3, 'a-3', 'b-3', 'c-3', 'd-3', 'e-3');
|
||||
INSERT 0 1
|
||||
test_pub=# SELECT * FROM t1 ORDER BY id;
|
||||
/* pub # */ INSERT INTO t1 VALUES(1, 'a-1', 'b-1', 'c-1', 'd-1', 'e-1');
|
||||
/* pub # */ INSERT INTO t1 VALUES(2, 'a-2', 'b-2', 'c-2', 'd-2', 'e-2');
|
||||
/* pub # */ INSERT INTO t1 VALUES(3, 'a-3', 'b-3', 'c-3', 'd-3', 'e-3');
|
||||
/* pub # */ SELECT * FROM t1 ORDER BY id;
|
||||
id | a | b | c | d | e
|
||||
----+-----+-----+-----+-----+-----
|
||||
1 | a-1 | b-1 | c-1 | d-1 | e-1
|
||||
@ -1589,7 +1519,7 @@ test_pub=# SELECT * FROM t1 ORDER BY id;
|
||||
Only data from the column list of publication <literal>p1</literal> is
|
||||
replicated.
|
||||
<programlisting>
|
||||
test_sub=# SELECT * FROM t1 ORDER BY id;
|
||||
/* sub # */ SELECT * FROM t1 ORDER BY id;
|
||||
id | b | a | d
|
||||
----+-----+-----+-----
|
||||
1 | b-1 | a-1 | d-1
|
||||
@ -1617,13 +1547,10 @@ test_sub=# SELECT * FROM t1 ORDER BY id;
|
||||
For example, note below that subscriber table generated column value comes from the
|
||||
subscriber column's calculation.
|
||||
<programlisting>
|
||||
test_pub=# CREATE TABLE tab_gen_to_gen (a int, b int GENERATED ALWAYS AS (a + 1) STORED);
|
||||
CREATE TABLE
|
||||
test_pub=# INSERT INTO tab_gen_to_gen VALUES (1),(2),(3);
|
||||
INSERT 0 3
|
||||
test_pub=# CREATE PUBLICATION pub1 FOR TABLE tab_gen_to_gen;
|
||||
CREATE PUBLICATION
|
||||
test_pub=# SELECT * FROM tab_gen_to_gen;
|
||||
/* pub # */ CREATE TABLE tab_gen_to_gen (a int, b int GENERATED ALWAYS AS (a + 1) STORED);
|
||||
/* pub # */ INSERT INTO tab_gen_to_gen VALUES (1),(2),(3);
|
||||
/* pub # */ CREATE PUBLICATION pub1 FOR TABLE tab_gen_to_gen;
|
||||
/* pub # */ SELECT * FROM tab_gen_to_gen;
|
||||
a | b
|
||||
---+---
|
||||
1 | 2
|
||||
@ -1631,11 +1558,9 @@ test_pub=# SELECT * FROM tab_gen_to_gen;
|
||||
3 | 4
|
||||
(3 rows)
|
||||
|
||||
test_sub=# CREATE TABLE tab_gen_to_gen (a int, b int GENERATED ALWAYS AS (a * 100) STORED);
|
||||
CREATE TABLE
|
||||
test_sub=# CREATE SUBSCRIPTION sub1 CONNECTION 'dbname=test_pub' PUBLICATION pub1;
|
||||
CREATE SUBSCRIPTION
|
||||
test_sub=# SELECT * from tab_gen_to_gen;
|
||||
/* sub # */ CREATE TABLE tab_gen_to_gen (a int, b int GENERATED ALWAYS AS (a * 100) STORED);
|
||||
/* sub # */ CREATE SUBSCRIPTION sub1 CONNECTION 'dbname=test_pub' PUBLICATION pub1;
|
||||
/* sub # */ SELECT * from tab_gen_to_gen;
|
||||
a | b
|
||||
---+----
|
||||
1 | 100
|
||||
@ -2690,8 +2615,7 @@ CONTEXT: processing remote data for replication origin "pg_16395" during "INSER
|
||||
<link linkend="sql-altersubscription-params-disable"><command>ALTER SUBSCRIPTION ... DISABLE</command></link>,
|
||||
e.g.:
|
||||
<programlisting>
|
||||
node2=# ALTER SUBSCRIPTION sub1_node1_node2 DISABLE;
|
||||
ALTER SUBSCRIPTION
|
||||
/* node2 # */ ALTER SUBSCRIPTION sub1_node1_node2 DISABLE;
|
||||
</programlisting>
|
||||
</para>
|
||||
</step>
|
||||
@ -2780,8 +2704,7 @@ pg_ctl -D /opt/PostgreSQL/data2_upgraded start -l logfile
|
||||
<xref linkend="two-node-cluster-disable-subscriptions-node2"/>
|
||||
and now, e.g.:
|
||||
<programlisting>
|
||||
node2=# CREATE TABLE distributors (did integer PRIMARY KEY, name varchar(40));
|
||||
CREATE TABLE
|
||||
/* node2 # */ CREATE TABLE distributors (did integer PRIMARY KEY, name varchar(40));
|
||||
</programlisting>
|
||||
</para>
|
||||
</step>
|
||||
@ -2793,8 +2716,7 @@ CREATE TABLE
|
||||
<link linkend="sql-altersubscription-params-enable"><command>ALTER SUBSCRIPTION ... ENABLE</command></link>,
|
||||
e.g.:
|
||||
<programlisting>
|
||||
node2=# ALTER SUBSCRIPTION sub1_node1_node2 ENABLE;
|
||||
ALTER SUBSCRIPTION
|
||||
/* node2 # */ ALTER SUBSCRIPTION sub1_node1_node2 ENABLE;
|
||||
</programlisting>
|
||||
</para>
|
||||
</step>
|
||||
@ -2805,8 +2727,7 @@ ALTER SUBSCRIPTION
|
||||
<link linkend="sql-altersubscription-params-refresh-publication"><command>ALTER SUBSCRIPTION ... REFRESH PUBLICATION</command></link>,
|
||||
e.g.:
|
||||
<programlisting>
|
||||
node2=# ALTER SUBSCRIPTION sub1_node1_node2 REFRESH PUBLICATION;
|
||||
ALTER SUBSCRIPTION
|
||||
/* node2 # */ ALTER SUBSCRIPTION sub1_node1_node2 REFRESH PUBLICATION;
|
||||
</programlisting>
|
||||
</para>
|
||||
</step>
|
||||
@ -2844,8 +2765,7 @@ ALTER SUBSCRIPTION
|
||||
<link linkend="sql-altersubscription-params-disable"><command>ALTER SUBSCRIPTION ... DISABLE</command></link>,
|
||||
e.g.:
|
||||
<programlisting>
|
||||
node2=# ALTER SUBSCRIPTION sub1_node1_node2 DISABLE;
|
||||
ALTER SUBSCRIPTION
|
||||
/* node2 # */ ALTER SUBSCRIPTION sub1_node1_node2 DISABLE;
|
||||
</programlisting>
|
||||
</para>
|
||||
</step>
|
||||
@ -2896,8 +2816,7 @@ pg_ctl -D /opt/PostgreSQL/data1_upgraded start -l logfile
|
||||
<link linkend="sql-altersubscription-params-disable"><command>ALTER SUBSCRIPTION ... DISABLE</command></link>,
|
||||
e.g.:
|
||||
<programlisting>
|
||||
node3=# ALTER SUBSCRIPTION sub1_node2_node3 DISABLE;
|
||||
ALTER SUBSCRIPTION
|
||||
/* node3 # */ ALTER SUBSCRIPTION sub1_node2_node3 DISABLE;
|
||||
</programlisting>
|
||||
</para>
|
||||
</step>
|
||||
@ -2948,8 +2867,7 @@ pg_ctl -D /opt/PostgreSQL/data2_upgraded start -l logfile
|
||||
<xref linkend="cascaded-cluster-disable-sub-node1-node2"/>
|
||||
and now, e.g.:
|
||||
<programlisting>
|
||||
node2=# CREATE TABLE distributors (did integer PRIMARY KEY, name varchar(40));
|
||||
CREATE TABLE
|
||||
/* node2 # */ CREATE TABLE distributors (did integer PRIMARY KEY, name varchar(40));
|
||||
</programlisting>
|
||||
</para>
|
||||
</step>
|
||||
@ -2961,8 +2879,7 @@ CREATE TABLE
|
||||
<link linkend="sql-altersubscription-params-enable"><command>ALTER SUBSCRIPTION ... ENABLE</command></link>,
|
||||
e.g.:
|
||||
<programlisting>
|
||||
node2=# ALTER SUBSCRIPTION sub1_node1_node2 ENABLE;
|
||||
ALTER SUBSCRIPTION
|
||||
/* node2 # */ ALTER SUBSCRIPTION sub1_node1_node2 ENABLE;
|
||||
</programlisting>
|
||||
</para>
|
||||
</step>
|
||||
@ -2973,8 +2890,7 @@ ALTER SUBSCRIPTION
|
||||
<link linkend="sql-altersubscription-params-refresh-publication"><command>ALTER SUBSCRIPTION ... REFRESH PUBLICATION</command></link>,
|
||||
e.g.:
|
||||
<programlisting>
|
||||
node2=# ALTER SUBSCRIPTION sub1_node1_node2 REFRESH PUBLICATION;
|
||||
ALTER SUBSCRIPTION
|
||||
/* node2 # */ ALTER SUBSCRIPTION sub1_node1_node2 REFRESH PUBLICATION;
|
||||
</programlisting>
|
||||
</para>
|
||||
</step>
|
||||
@ -3025,8 +2941,7 @@ pg_ctl -D /opt/PostgreSQL/data3_upgraded start -l logfile
|
||||
<xref linkend="cascaded-cluster-disable-sub-node2-node3"/> and now,
|
||||
e.g.:
|
||||
<programlisting>
|
||||
node3=# CREATE TABLE distributors (did integer PRIMARY KEY, name varchar(40));
|
||||
CREATE TABLE
|
||||
/* node3 # */ CREATE TABLE distributors (did integer PRIMARY KEY, name varchar(40));
|
||||
</programlisting>
|
||||
</para>
|
||||
</step>
|
||||
@ -3038,8 +2953,7 @@ CREATE TABLE
|
||||
<link linkend="sql-altersubscription-params-enable"><command>ALTER SUBSCRIPTION ... ENABLE</command></link>,
|
||||
e.g.:
|
||||
<programlisting>
|
||||
node3=# ALTER SUBSCRIPTION sub1_node2_node3 ENABLE;
|
||||
ALTER SUBSCRIPTION
|
||||
/* node3 # */ ALTER SUBSCRIPTION sub1_node2_node3 ENABLE;
|
||||
</programlisting>
|
||||
</para>
|
||||
</step>
|
||||
@ -3050,8 +2964,7 @@ ALTER SUBSCRIPTION
|
||||
<link linkend="sql-altersubscription-params-refresh-publication"><command>ALTER SUBSCRIPTION ... REFRESH PUBLICATION</command></link>,
|
||||
e.g.:
|
||||
<programlisting>
|
||||
node3=# ALTER SUBSCRIPTION sub1_node2_node3 REFRESH PUBLICATION;
|
||||
ALTER SUBSCRIPTION
|
||||
/* node3 # */ ALTER SUBSCRIPTION sub1_node2_node3 REFRESH PUBLICATION;
|
||||
</programlisting>
|
||||
</para>
|
||||
</step>
|
||||
@ -3082,8 +2995,7 @@ ALTER SUBSCRIPTION
|
||||
<link linkend="sql-altersubscription-params-disable"><command>ALTER SUBSCRIPTION ... DISABLE</command></link>,
|
||||
e.g.:
|
||||
<programlisting>
|
||||
node2=# ALTER SUBSCRIPTION sub1_node1_node2 DISABLE;
|
||||
ALTER SUBSCRIPTION
|
||||
/* node2 # */ ALTER SUBSCRIPTION sub1_node1_node2 DISABLE;
|
||||
</programlisting>
|
||||
</para>
|
||||
</step>
|
||||
@ -3134,8 +3046,7 @@ pg_ctl -D /opt/PostgreSQL/data1_upgraded start -l logfile
|
||||
<link linkend="sql-altersubscription-params-enable"><command>ALTER SUBSCRIPTION ... ENABLE</command></link>,
|
||||
e.g.:
|
||||
<programlisting>
|
||||
node2=# ALTER SUBSCRIPTION sub1_node1_node2 ENABLE;
|
||||
ALTER SUBSCRIPTION
|
||||
/* node2 # */ ALTER SUBSCRIPTION sub1_node1_node2 ENABLE;
|
||||
</programlisting>
|
||||
</para>
|
||||
</step>
|
||||
@ -3146,8 +3057,7 @@ ALTER SUBSCRIPTION
|
||||
<literal>node2</literal> between <xref linkend="circular-cluster-disable-sub-node2"/>
|
||||
and now, e.g.:
|
||||
<programlisting>
|
||||
node1=# CREATE TABLE distributors (did integer PRIMARY KEY, name varchar(40));
|
||||
CREATE TABLE
|
||||
/* node1 # */ CREATE TABLE distributors (did integer PRIMARY KEY, name varchar(40));
|
||||
</programlisting>
|
||||
</para>
|
||||
</step>
|
||||
@ -3160,8 +3070,7 @@ CREATE TABLE
|
||||
<link linkend="sql-altersubscription-params-refresh-publication"><command>ALTER SUBSCRIPTION ... REFRESH PUBLICATION</command></link>,
|
||||
e.g.:
|
||||
<programlisting>
|
||||
node1=# ALTER SUBSCRIPTION sub1_node2_node1 REFRESH PUBLICATION;
|
||||
ALTER SUBSCRIPTION
|
||||
/* node1 # */ ALTER SUBSCRIPTION sub1_node2_node1 REFRESH PUBLICATION;
|
||||
</programlisting>
|
||||
</para>
|
||||
</step>
|
||||
@ -3173,8 +3082,7 @@ ALTER SUBSCRIPTION
|
||||
<link linkend="sql-altersubscription-params-disable"><command>ALTER SUBSCRIPTION ... DISABLE</command></link>,
|
||||
e.g.:
|
||||
<programlisting>
|
||||
node1=# ALTER SUBSCRIPTION sub1_node2_node1 DISABLE;
|
||||
ALTER SUBSCRIPTION
|
||||
/* node1 # */ ALTER SUBSCRIPTION sub1_node2_node1 DISABLE;
|
||||
</programlisting>
|
||||
</para>
|
||||
</step>
|
||||
@ -3225,8 +3133,7 @@ pg_ctl -D /opt/PostgreSQL/data2_upgraded start -l logfile
|
||||
<link linkend="sql-altersubscription-params-enable"><command>ALTER SUBSCRIPTION ... ENABLE</command></link>,
|
||||
e.g.:
|
||||
<programlisting>
|
||||
node1=# ALTER SUBSCRIPTION sub1_node2_node1 ENABLE;
|
||||
ALTER SUBSCRIPTION
|
||||
/* node1 # */ ALTER SUBSCRIPTION sub1_node2_node1 ENABLE;
|
||||
</programlisting>
|
||||
</para>
|
||||
</step>
|
||||
@ -3237,8 +3144,7 @@ ALTER SUBSCRIPTION
|
||||
the upgraded <literal>node1</literal> between <xref linkend="circular-cluster-disable-sub-node1"/>
|
||||
and now, e.g.:
|
||||
<programlisting>
|
||||
node2=# CREATE TABLE distributors (did integer PRIMARY KEY, name varchar(40));
|
||||
CREATE TABLE
|
||||
/* node2 # */ CREATE TABLE distributors (did integer PRIMARY KEY, name varchar(40));
|
||||
</programlisting>
|
||||
</para>
|
||||
</step>
|
||||
@ -3250,8 +3156,7 @@ CREATE TABLE
|
||||
<link linkend="sql-altersubscription-params-refresh-publication"><command>ALTER SUBSCRIPTION ... REFRESH PUBLICATION</command></link>,
|
||||
e.g.:
|
||||
<programlisting>
|
||||
node2=# ALTER SUBSCRIPTION sub1_node1_node2 REFRESH PUBLICATION;
|
||||
ALTER SUBSCRIPTION
|
||||
/* node2 # */ ALTER SUBSCRIPTION sub1_node1_node2 REFRESH PUBLICATION;
|
||||
</programlisting>
|
||||
</para>
|
||||
</step>
|
||||
|
@ -455,9 +455,8 @@ postgres=# select * from pg_logical_slot_get_changes('regression_slot', NULL, NU
|
||||
using the slot's contents without losing any changes.
|
||||
</para>
|
||||
<para>
|
||||
Creation of a snapshot is not always possible. In particular, it will
|
||||
fail when connected to a hot standby. Applications that do not require
|
||||
snapshot export may suppress it with the <literal>NOEXPORT_SNAPSHOT</literal>
|
||||
Applications that do not require
|
||||
snapshot export may suppress it with the <literal>SNAPSHOT 'nothing'</literal>
|
||||
option.
|
||||
</para>
|
||||
</sect2>
|
||||
|
@ -220,7 +220,12 @@
|
||||
was not supported by the server.
|
||||
</para>
|
||||
|
||||
<table>
|
||||
<para>
|
||||
<xref linkend="protocol-versions-table"/> shows the currently supported
|
||||
protocol versions.
|
||||
</para>
|
||||
|
||||
<table id="protocol-versions-table">
|
||||
<title>Protocol versions</title>
|
||||
|
||||
<tgroup cols="3">
|
||||
|
@ -81,6 +81,35 @@ PostgreSQL documentation
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><option>-k</option></term>
|
||||
<term><option>--link</option></term>
|
||||
<listitem>
|
||||
<para>
|
||||
Use hard links instead of copying files to the synthetic backup.
|
||||
Reconstruction of the synthetic backup might be faster (no file copying)
|
||||
and use less disk space, but care must be taken when using the output
|
||||
directory, because any modifications to that directory (for example,
|
||||
starting the server) can also affect the input directories. Likewise,
|
||||
changes to the input directories (for example, starting the server on
|
||||
the full backup) could affect the output directory. Thus, this option
|
||||
is best used when the input directories are only copies that will be
|
||||
removed after <application>pg_combinebackup</application> has completed.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Requires that the input backups and the output directory are in the
|
||||
same file system.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
If a backup manifest is not available or does not contain checksum of
|
||||
the right type, hard links will still be created, but the file will be
|
||||
also read block-by-block for the checksum calculation.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><option>-n</option></term>
|
||||
<term><option>--dry-run</option></term>
|
||||
@ -137,35 +166,6 @@ PostgreSQL documentation
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><option>-k</option></term>
|
||||
<term><option>--link</option></term>
|
||||
<listitem>
|
||||
<para>
|
||||
Use hard links instead of copying files to the synthetic backup.
|
||||
Reconstruction of the synthetic backup might be faster (no file copying)
|
||||
and use less disk space, but care must be taken when using the output
|
||||
directory, because any modifications to that directory (for example,
|
||||
starting the server) can also affect the input directories. Likewise,
|
||||
changes to the input directories (for example, starting the server on
|
||||
the full backup) could affect the output directory. Thus, this option
|
||||
is best used when the input directories are only copies that will be
|
||||
removed after <application>pg_combinebackup</application> has completed.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Requires that the input backups and the output directory are in the
|
||||
same file system.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
If a backup manifest is not available or does not contain checksum of
|
||||
the right type, hard links will still be created, but the file will be
|
||||
also read block-by-block for the checksum calculation.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><option>--clone</option></term>
|
||||
<listitem>
|
||||
|
@ -1134,7 +1134,7 @@ PostgreSQL documentation
|
||||
<term><option>--no-statistics</option></term>
|
||||
<listitem>
|
||||
<para>
|
||||
Do not dump statistics.
|
||||
Do not dump statistics. This is the default.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
@ -1461,7 +1461,7 @@ PostgreSQL documentation
|
||||
<term><option>--with-statistics</option></term>
|
||||
<listitem>
|
||||
<para>
|
||||
Dump statistics. This is the default.
|
||||
Dump statistics.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
@ -1681,14 +1681,14 @@ CREATE DATABASE foo WITH TEMPLATE template0;
|
||||
</para>
|
||||
|
||||
<para>
|
||||
By default, <command>pg_dump</command> will include most optimizer
|
||||
statistics in the resulting dump file. However, some statistics may not be
|
||||
included, such as those created explicitly with <xref
|
||||
linkend="sql-createstatistics"/> or custom statistics added by an
|
||||
extension. Therefore, it may be useful to run <command>ANALYZE</command>
|
||||
after restoring from a dump file to ensure optimal performance; see <xref
|
||||
linkend="vacuum-for-statistics"/> and <xref linkend="autovacuum"/> for more
|
||||
information.
|
||||
If <option>--with-statistics</option> is specified,
|
||||
<command>pg_dump</command> will include most optimizer statistics in the
|
||||
resulting dump file. However, some statistics may not be included, such as
|
||||
those created explicitly with <xref linkend="sql-createstatistics"/> or
|
||||
custom statistics added by an extension. Therefore, it may be useful to
|
||||
run <command>ANALYZE</command> after restoring from a dump file to ensure
|
||||
optimal performance; see <xref linkend="vacuum-for-statistics"/> and <xref
|
||||
linkend="autovacuum"/> for more information.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
|
@ -567,7 +567,7 @@ exclude database <replaceable class="parameter">PATTERN</replaceable>
|
||||
<term><option>--no-statistics</option></term>
|
||||
<listitem>
|
||||
<para>
|
||||
Do not dump statistics.
|
||||
Do not dump statistics. This is the default.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
@ -695,6 +695,17 @@ exclude database <replaceable class="parameter">PATTERN</replaceable>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><option>--sequence-data</option></term>
|
||||
<listitem>
|
||||
<para>
|
||||
Include sequence data in the dump. This is the default behavior except
|
||||
when <option>--no-data</option>, <option>--schema-only</option>, or
|
||||
<option>--statistics-only</option> is specified.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><option>--use-set-session-authorization</option></term>
|
||||
<listitem>
|
||||
@ -730,7 +741,7 @@ exclude database <replaceable class="parameter">PATTERN</replaceable>
|
||||
<term><option>--with-statistics</option></term>
|
||||
<listitem>
|
||||
<para>
|
||||
Dump statistics. This is the default.
|
||||
Dump statistics.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
@ -946,14 +957,14 @@ exclude database <replaceable class="parameter">PATTERN</replaceable>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
By default, <command>pg_dumpall</command> will include most optimizer
|
||||
statistics in the resulting dump file. However, some statistics may not be
|
||||
included, such as those created explicitly with <xref
|
||||
linkend="sql-createstatistics"/> or custom statistics added by an
|
||||
extension. Therefore, it may be useful to run <command>ANALYZE</command>
|
||||
on each database after restoring from a dump file to ensure optimal
|
||||
performance. You can also run <command>vacuumdb -a -z</command> to analyze
|
||||
all databases.
|
||||
If <option>--with-statistics</option> is specified,
|
||||
<command>pg_dumpall</command> will include most optimizer statistics in the
|
||||
resulting dump file. However, some statistics may not be included, such as
|
||||
those created explicitly with <xref linkend="sql-createstatistics"/> or
|
||||
custom statistics added by an extension. Therefore, it may be useful to
|
||||
run <command>ANALYZE</command> on each database after restoring from a dump
|
||||
file to ensure optimal performance. You can also run <command>vacuumdb -a
|
||||
-z</command> to analyze all databases.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
|
@ -171,25 +171,6 @@ PostgreSQL documentation
|
||||
</para>
|
||||
|
||||
<variablelist>
|
||||
<varlistentry>
|
||||
<term><option>--char-signedness=<replaceable class="parameter">option</replaceable></option></term>
|
||||
<listitem>
|
||||
<para>
|
||||
Manually set the default char signedness. Possible values are
|
||||
<literal>signed</literal> and <literal>unsigned</literal>.
|
||||
</para>
|
||||
<para>
|
||||
For a database cluster that <command>pg_upgrade</command> upgraded from
|
||||
a <productname>PostgreSQL</productname> version before 18, the safe
|
||||
value would be the default <type>char</type> signedness of the platform
|
||||
that ran the cluster before that upgrade. For all other
|
||||
clusters, <literal>signed</literal> would be the safe value. However,
|
||||
this option is exclusively for use with <command>pg_upgrade</command>
|
||||
and should not normally be used manually.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><option>-c <replaceable class="parameter">xid</replaceable>,<replaceable class="parameter">xid</replaceable></option></term>
|
||||
<term><option>--commit-timestamp-ids=<replaceable class="parameter">xid</replaceable>,<replaceable class="parameter">xid</replaceable></option></term>
|
||||
@ -332,34 +313,6 @@ PostgreSQL documentation
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><option>--wal-segsize=<replaceable class="parameter">wal_segment_size</replaceable></option></term>
|
||||
<listitem>
|
||||
<para>
|
||||
Set the new WAL segment size, in megabytes. The value must be set to a
|
||||
power of 2 between 1 and 1024 (megabytes). See the same option of <xref
|
||||
linkend="app-initdb"/> for more information.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
This option can also be used to change the WAL segment size of an
|
||||
existing database cluster, avoiding the need to
|
||||
re-<command>initdb</command>.
|
||||
</para>
|
||||
|
||||
<note>
|
||||
<para>
|
||||
While <command>pg_resetwal</command> will set the WAL starting address
|
||||
beyond the latest existing WAL segment file, some segment size changes
|
||||
can cause previous WAL file names to be reused. It is recommended to
|
||||
use <option>-l</option> together with this option to manually set the
|
||||
WAL starting address if WAL file name overlap will cause problems with
|
||||
your archiving strategy.
|
||||
</para>
|
||||
</note>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><option>-u <replaceable class="parameter">xid</replaceable></option></term>
|
||||
<term><option>--oldest-transaction-id=<replaceable class="parameter">xid</replaceable></option></term>
|
||||
@ -402,6 +355,53 @@ PostgreSQL documentation
|
||||
<!-- 1048576 = SLRU_PAGES_PER_SEGMENT * BLCKSZ * CLOG_XACTS_PER_BYTE -->
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><option>--char-signedness=<replaceable class="parameter">option</replaceable></option></term>
|
||||
<listitem>
|
||||
<para>
|
||||
Manually set the default char signedness. Possible values are
|
||||
<literal>signed</literal> and <literal>unsigned</literal>.
|
||||
</para>
|
||||
<para>
|
||||
For a database cluster that <command>pg_upgrade</command> upgraded from
|
||||
a <productname>PostgreSQL</productname> version before 18, the safe
|
||||
value would be the default <type>char</type> signedness of the platform
|
||||
that ran the cluster before that upgrade. For all other
|
||||
clusters, <literal>signed</literal> would be the safe value. However,
|
||||
this option is exclusively for use with <command>pg_upgrade</command>
|
||||
and should not normally be used manually.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><option>--wal-segsize=<replaceable class="parameter">wal_segment_size</replaceable></option></term>
|
||||
<listitem>
|
||||
<para>
|
||||
Set the new WAL segment size, in megabytes. The value must be set to a
|
||||
power of 2 between 1 and 1024 (megabytes). See the same option of <xref
|
||||
linkend="app-initdb"/> for more information.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
This option can also be used to change the WAL segment size of an
|
||||
existing database cluster, avoiding the need to
|
||||
re-<command>initdb</command>.
|
||||
</para>
|
||||
|
||||
<note>
|
||||
<para>
|
||||
While <command>pg_resetwal</command> will set the WAL starting address
|
||||
beyond the latest existing WAL segment file, some segment size changes
|
||||
can cause previous WAL file names to be reused. It is recommended to
|
||||
use <option>-l</option> together with this option to manually set the
|
||||
WAL starting address if WAL file name overlap will cause problems with
|
||||
your archiving strategy.
|
||||
</para>
|
||||
</note>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
</variablelist>
|
||||
</refsect1>
|
||||
|
||||
|
@ -177,28 +177,6 @@ PostgreSQL documentation
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><option>--exclude-database=<replaceable class="parameter">pattern</replaceable></option></term>
|
||||
<listitem>
|
||||
<para>
|
||||
Do not restore databases whose name matches
|
||||
<replaceable class="parameter">pattern</replaceable>.
|
||||
Multiple patterns can be excluded by writing multiple
|
||||
<option>--exclude-database</option> switches. The
|
||||
<replaceable class="parameter">pattern</replaceable> parameter is
|
||||
interpreted as a pattern according to the same rules used by
|
||||
<application>psql</application>'s <literal>\d</literal>
|
||||
commands (see <xref linkend="app-psql-patterns"/>),
|
||||
so multiple databases can also be excluded by writing wildcard
|
||||
characters in the pattern. When using wildcards, be careful to
|
||||
quote the pattern if needed to prevent shell wildcard expansion.
|
||||
</para>
|
||||
<para>
|
||||
This option is only relevant when restoring from an archive made using <application>pg_dumpall</application>.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><option>-e</option></term>
|
||||
<term><option>--exit-on-error</option></term>
|
||||
@ -223,86 +201,6 @@ PostgreSQL documentation
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><option>--filter=<replaceable class="parameter">filename</replaceable></option></term>
|
||||
<listitem>
|
||||
<para>
|
||||
Specify a filename from which to read patterns for objects excluded
|
||||
or included from restore. The patterns are interpreted according to the
|
||||
same rules as
|
||||
<option>-n</option>/<option>--schema</option> for including objects in schemas,
|
||||
<option>-N</option>/<option>--exclude-schema</option> for excluding objects in schemas,
|
||||
<option>-P</option>/<option>--function</option> for restoring named functions,
|
||||
<option>-I</option>/<option>--index</option> for restoring named indexes,
|
||||
<option>-t</option>/<option>--table</option> for restoring named tables
|
||||
or <option>-T</option>/<option>--trigger</option> for restoring triggers.
|
||||
To read from <literal>STDIN</literal>, use <filename>-</filename> as the
|
||||
filename. The <option>--filter</option> option can be specified in
|
||||
conjunction with the above listed options for including or excluding
|
||||
objects, and can also be specified more than once for multiple filter
|
||||
files.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The file lists one database pattern per row, with the following format:
|
||||
<synopsis>
|
||||
{ include | exclude } { function | index | schema | table | trigger } <replaceable class="parameter">PATTERN</replaceable>
|
||||
</synopsis>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The first keyword specifies whether the objects matched by the pattern
|
||||
are to be included or excluded. The second keyword specifies the type
|
||||
of object to be filtered using the pattern:
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>function</literal>: functions, works like the
|
||||
<option>-P</option>/<option>--function</option> option. This keyword
|
||||
can only be used with the <literal>include</literal> keyword.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>index</literal>: indexes, works like the
|
||||
<option>-I</option>/<option>--indexes</option> option. This keyword
|
||||
can only be used with the <literal>include</literal> keyword.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>schema</literal>: schemas, works like the
|
||||
<option>-n</option>/<option>--schema</option> and
|
||||
<option>-N</option>/<option>--exclude-schema</option> options.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>table</literal>: tables, works like the
|
||||
<option>-t</option>/<option>--table</option> option. This keyword
|
||||
can only be used with the <literal>include</literal> keyword.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>trigger</literal>: triggers, works like the
|
||||
<option>-T</option>/<option>--trigger</option> option. This keyword
|
||||
can only be used with the <literal>include</literal> keyword.
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Lines starting with <literal>#</literal> are considered comments and
|
||||
ignored. Comments can be placed after an object pattern row as well.
|
||||
Blank lines are also ignored. See <xref linkend="app-psql-patterns"/>
|
||||
for how to perform quoting in patterns.
|
||||
</para>
|
||||
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><option>-F <replaceable class="parameter">format</replaceable></option></term>
|
||||
<term><option>--format=<replaceable class="parameter">format</replaceable></option></term>
|
||||
@ -646,15 +544,6 @@ PostgreSQL documentation
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><option>--statistics-only</option></term>
|
||||
<listitem>
|
||||
<para>
|
||||
Restore only the statistics, not schema (data definitions) or data.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><option>-1</option></term>
|
||||
<term><option>--single-transaction</option></term>
|
||||
@ -714,6 +603,108 @@ PostgreSQL documentation
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><option>--exclude-database=<replaceable class="parameter">pattern</replaceable></option></term>
|
||||
<listitem>
|
||||
<para>
|
||||
Do not restore databases whose name matches
|
||||
<replaceable class="parameter">pattern</replaceable>.
|
||||
Multiple patterns can be excluded by writing multiple
|
||||
<option>--exclude-database</option> switches. The
|
||||
<replaceable class="parameter">pattern</replaceable> parameter is
|
||||
interpreted as a pattern according to the same rules used by
|
||||
<application>psql</application>'s <literal>\d</literal>
|
||||
commands (see <xref linkend="app-psql-patterns"/>),
|
||||
so multiple databases can also be excluded by writing wildcard
|
||||
characters in the pattern. When using wildcards, be careful to
|
||||
quote the pattern if needed to prevent shell wildcard expansion.
|
||||
</para>
|
||||
<para>
|
||||
This option is only relevant when restoring from an archive made using <application>pg_dumpall</application>.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><option>--filter=<replaceable class="parameter">filename</replaceable></option></term>
|
||||
<listitem>
|
||||
<para>
|
||||
Specify a filename from which to read patterns for objects excluded
|
||||
or included from restore. The patterns are interpreted according to the
|
||||
same rules as
|
||||
<option>-n</option>/<option>--schema</option> for including objects in schemas,
|
||||
<option>-N</option>/<option>--exclude-schema</option> for excluding objects in schemas,
|
||||
<option>-P</option>/<option>--function</option> for restoring named functions,
|
||||
<option>-I</option>/<option>--index</option> for restoring named indexes,
|
||||
<option>-t</option>/<option>--table</option> for restoring named tables
|
||||
or <option>-T</option>/<option>--trigger</option> for restoring triggers.
|
||||
To read from <literal>STDIN</literal>, use <filename>-</filename> as the
|
||||
filename. The <option>--filter</option> option can be specified in
|
||||
conjunction with the above listed options for including or excluding
|
||||
objects, and can also be specified more than once for multiple filter
|
||||
files.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The file lists one database pattern per row, with the following format:
|
||||
<synopsis>
|
||||
{ include | exclude } { function | index | schema | table | trigger } <replaceable class="parameter">PATTERN</replaceable>
|
||||
</synopsis>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The first keyword specifies whether the objects matched by the pattern
|
||||
are to be included or excluded. The second keyword specifies the type
|
||||
of object to be filtered using the pattern:
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>function</literal>: functions, works like the
|
||||
<option>-P</option>/<option>--function</option> option. This keyword
|
||||
can only be used with the <literal>include</literal> keyword.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>index</literal>: indexes, works like the
|
||||
<option>-I</option>/<option>--indexes</option> option. This keyword
|
||||
can only be used with the <literal>include</literal> keyword.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>schema</literal>: schemas, works like the
|
||||
<option>-n</option>/<option>--schema</option> and
|
||||
<option>-N</option>/<option>--exclude-schema</option> options.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>table</literal>: tables, works like the
|
||||
<option>-t</option>/<option>--table</option> option. This keyword
|
||||
can only be used with the <literal>include</literal> keyword.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<literal>trigger</literal>: triggers, works like the
|
||||
<option>-T</option>/<option>--trigger</option> option. This keyword
|
||||
can only be used with the <literal>include</literal> keyword.
|
||||
</para>
|
||||
</listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Lines starting with <literal>#</literal> are considered comments and
|
||||
ignored. Comments can be placed after an object pattern row as well.
|
||||
Blank lines are also ignored. See <xref linkend="app-psql-patterns"/>
|
||||
for how to perform quoting in patterns.
|
||||
</para>
|
||||
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><option>--if-exists</option></term>
|
||||
<listitem>
|
||||
@ -851,33 +842,6 @@ PostgreSQL documentation
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><option>--with-data</option></term>
|
||||
<listitem>
|
||||
<para>
|
||||
Dump data. This is the default.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><option>--with-schema</option></term>
|
||||
<listitem>
|
||||
<para>
|
||||
Dump schema (data definitions). This is the default.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><option>--with-statistics</option></term>
|
||||
<listitem>
|
||||
<para>
|
||||
Dump statistics. This is the default.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><option>--section=<replaceable class="parameter">sectionname</replaceable></option></term>
|
||||
<listitem>
|
||||
@ -897,6 +861,15 @@ PostgreSQL documentation
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><option>--statistics-only</option></term>
|
||||
<listitem>
|
||||
<para>
|
||||
Restore only the statistics, not schema (data definitions) or data.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><option>--strict-names</option></term>
|
||||
<listitem>
|
||||
@ -946,6 +919,36 @@ PostgreSQL documentation
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><option>--with-data</option></term>
|
||||
<listitem>
|
||||
<para>
|
||||
Output commands to restore data, if the archive contains them.
|
||||
This is the default.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><option>--with-schema</option></term>
|
||||
<listitem>
|
||||
<para>
|
||||
Output commands to restore schema (data definitions), if the archive
|
||||
contains them. This is the default.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><option>--with-statistics</option></term>
|
||||
<listitem>
|
||||
<para>
|
||||
Output commands to restore statistics, if the archive contains them.
|
||||
This is the default.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><option>-?</option></term>
|
||||
<term><option>--help</option></term>
|
||||
|
@ -143,35 +143,6 @@ PostgreSQL documentation
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><option>-i <replaceable class="parameter">path</replaceable></option></term>
|
||||
<term><option>--ignore=<replaceable class="parameter">path</replaceable></option></term>
|
||||
<listitem>
|
||||
<para>
|
||||
Ignore the specified file or directory, which should be expressed
|
||||
as a relative path name, when comparing the list of data files
|
||||
actually present in the backup to those listed in the
|
||||
<literal>backup_manifest</literal> file. If a directory is
|
||||
specified, this option affects the entire subtree rooted at that
|
||||
location. Complaints about extra files, missing files, file size
|
||||
differences, or checksum mismatches will be suppressed if the
|
||||
relative path name matches the specified path name. This option
|
||||
can be specified multiple times.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><option>-m <replaceable class="parameter">path</replaceable></option></term>
|
||||
<term><option>--manifest-path=<replaceable class="parameter">path</replaceable></option></term>
|
||||
<listitem>
|
||||
<para>
|
||||
Use the manifest file at the specified path, rather than one located
|
||||
in the root of the backup directory.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><option>-F <replaceable class="parameter">format</replaceable></option></term>
|
||||
<term><option>--format=<replaceable class="parameter">format</replaceable></option></term>
|
||||
@ -211,6 +182,35 @@ PostgreSQL documentation
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><option>-i <replaceable class="parameter">path</replaceable></option></term>
|
||||
<term><option>--ignore=<replaceable class="parameter">path</replaceable></option></term>
|
||||
<listitem>
|
||||
<para>
|
||||
Ignore the specified file or directory, which should be expressed
|
||||
as a relative path name, when comparing the list of data files
|
||||
actually present in the backup to those listed in the
|
||||
<literal>backup_manifest</literal> file. If a directory is
|
||||
specified, this option affects the entire subtree rooted at that
|
||||
location. Complaints about extra files, missing files, file size
|
||||
differences, or checksum mismatches will be suppressed if the
|
||||
relative path name matches the specified path name. This option
|
||||
can be specified multiple times.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><option>-m <replaceable class="parameter">path</replaceable></option></term>
|
||||
<term><option>--manifest-path=<replaceable class="parameter">path</replaceable></option></term>
|
||||
<listitem>
|
||||
<para>
|
||||
Use the manifest file at the specified path, rather than one located
|
||||
in the root of the backup directory.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><option>-n</option></term>
|
||||
<term><option>--no-parse-wal</option></term>
|
||||
|
@ -145,15 +145,6 @@ PostgreSQL documentation
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><option>--no-statistics</option></term>
|
||||
<listitem>
|
||||
<para>
|
||||
Do not restore statistics from the old cluster into the new cluster.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><option>-o</option> <replaceable class="parameter">options</replaceable></term>
|
||||
<term><option>--old-options</option> <replaceable class="parameter">options</replaceable></term>
|
||||
@ -264,50 +255,10 @@ PostgreSQL documentation
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><option>--swap</option></term>
|
||||
<term><option>--no-statistics</option></term>
|
||||
<listitem>
|
||||
<para>
|
||||
Move the data directories from the old cluster to the new cluster.
|
||||
Then, replace the catalog files with those generated for the new
|
||||
cluster. This mode can outperform <option>--link</option>,
|
||||
<option>--clone</option>, <option>--copy</option>, and
|
||||
<option>--copy-file-range</option>, especially on clusters with many
|
||||
relations.
|
||||
</para>
|
||||
<para>
|
||||
However, this mode creates many garbage files in the old cluster, which
|
||||
can prolong the file synchronization step if
|
||||
<option>--sync-method=syncfs</option> is used. Therefore, it is
|
||||
recommended to use <option>--sync-method=fsync</option> with
|
||||
<option>--swap</option>.
|
||||
</para>
|
||||
<para>
|
||||
Additionally, once the file transfer step begins, the old cluster will
|
||||
be destructively modified and therefore will no longer be safe to
|
||||
start. See <xref linkend="pgupgrade-step-revert"/> for details.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><option>--sync-method=</option><replaceable>method</replaceable></term>
|
||||
<listitem>
|
||||
<para>
|
||||
When set to <literal>fsync</literal>, which is the default,
|
||||
<command>pg_upgrade</command> will recursively open and synchronize all
|
||||
files in the upgraded cluster's data directory. The search for files
|
||||
will follow symbolic links for the WAL directory and each configured
|
||||
tablespace.
|
||||
</para>
|
||||
<para>
|
||||
On Linux, <literal>syncfs</literal> may be used instead to ask the
|
||||
operating system to synchronize the whole file systems that contain the
|
||||
upgraded cluster's data directory, its WAL files, and each tablespace.
|
||||
See <xref linkend="guc-recovery-init-sync-method"/> for information
|
||||
about the caveats to be aware of when using <literal>syncfs</literal>.
|
||||
</para>
|
||||
<para>
|
||||
This option has no effect when <option>--no-sync</option> is used.
|
||||
Do not restore statistics from the old cluster into the new cluster.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
@ -365,6 +316,55 @@ PostgreSQL documentation
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><option>--swap</option></term>
|
||||
<listitem>
|
||||
<para>
|
||||
Move the data directories from the old cluster to the new cluster.
|
||||
Then, replace the catalog files with those generated for the new
|
||||
cluster. This mode can outperform <option>--link</option>,
|
||||
<option>--clone</option>, <option>--copy</option>, and
|
||||
<option>--copy-file-range</option>, especially on clusters with many
|
||||
relations.
|
||||
</para>
|
||||
<para>
|
||||
However, this mode creates many garbage files in the old cluster, which
|
||||
can prolong the file synchronization step if
|
||||
<option>--sync-method=syncfs</option> is used. Therefore, it is
|
||||
recommended to use <option>--sync-method=fsync</option> with
|
||||
<option>--swap</option>.
|
||||
</para>
|
||||
<para>
|
||||
Additionally, once the file transfer step begins, the old cluster will
|
||||
be destructively modified and therefore will no longer be safe to
|
||||
start. See <xref linkend="pgupgrade-step-revert"/> for details.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><option>--sync-method=</option><replaceable>method</replaceable></term>
|
||||
<listitem>
|
||||
<para>
|
||||
When set to <literal>fsync</literal>, which is the default,
|
||||
<command>pg_upgrade</command> will recursively open and synchronize all
|
||||
files in the upgraded cluster's data directory. The search for files
|
||||
will follow symbolic links for the WAL directory and each configured
|
||||
tablespace.
|
||||
</para>
|
||||
<para>
|
||||
On Linux, <literal>syncfs</literal> may be used instead to ask the
|
||||
operating system to synchronize the whole file systems that contain the
|
||||
upgraded cluster's data directory, its WAL files, and each tablespace.
|
||||
See <xref linkend="guc-recovery-init-sync-method"/> for information
|
||||
about the caveats to be aware of when using <literal>syncfs</literal>.
|
||||
</para>
|
||||
<para>
|
||||
This option has no effect when <option>--no-sync</option> is used.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term><option>-?</option></term>
|
||||
<term><option>--help</option></term>
|
||||
|
@ -1067,16 +1067,6 @@ INSERT INTO tbls1 VALUES ($1, $2) \parse stmt1
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="app-psql-meta-command-conninfo">
|
||||
<term><literal>\conninfo</literal></term>
|
||||
<listitem>
|
||||
<para>
|
||||
Outputs information about the current database connection,
|
||||
including TLS-related information if TLS is in use.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="app-psql-meta-command-close">
|
||||
<term><literal>\close</literal> <replaceable class="parameter">prepared_statement_name</replaceable></term>
|
||||
|
||||
@ -1106,6 +1096,16 @@ SELECT $1 \parse stmt1
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="app-psql-meta-command-conninfo">
|
||||
<term><literal>\conninfo</literal></term>
|
||||
<listitem>
|
||||
<para>
|
||||
Outputs information about the current database connection,
|
||||
including TLS-related information if TLS is in use.
|
||||
</para>
|
||||
</listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry id="app-psql-meta-commands-copy">
|
||||
<term><literal>\copy { <replaceable class="parameter">table</replaceable> [ ( <replaceable class="parameter">column_list</replaceable> ) ] }
|
||||
<literal>from</literal>
|
||||
|
@ -465,14 +465,17 @@ Indexes:
|
||||
</programlisting>
|
||||
|
||||
If the index marked <literal>INVALID</literal> is suffixed
|
||||
<literal>ccnew</literal>, then it corresponds to the transient
|
||||
<literal>_ccnew</literal>, then it corresponds to the transient
|
||||
index created during the concurrent operation, and the recommended
|
||||
recovery method is to drop it using <literal>DROP INDEX</literal>,
|
||||
then attempt <command>REINDEX CONCURRENTLY</command> again.
|
||||
If the invalid index is instead suffixed <literal>ccold</literal>,
|
||||
If the invalid index is instead suffixed <literal>_ccold</literal>,
|
||||
it corresponds to the original index which could not be dropped;
|
||||
the recommended recovery method is to just drop said index, since the
|
||||
rebuild proper has been successful.
|
||||
A nonzero number may be appended to the suffix of the invalid index
|
||||
names to keep them unique, like <literal>_ccnew1</literal>,
|
||||
<literal>_ccold2</literal>, etc.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
|
@ -84,6 +84,10 @@ SECURITY LABEL [ FOR <replaceable class="parameter">provider</replaceable> ] ON
|
||||
based on object labels, rather than traditional discretionary access control
|
||||
(DAC) concepts such as users and groups.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
You must own the database object to use <command>SECURITY LABEL</command>.
|
||||
</para>
|
||||
</refsect1>
|
||||
|
||||
<refsect1>
|
||||
|
@ -6,7 +6,7 @@
|
||||
|
||||
<formalpara>
|
||||
<title>Release date:</title>
|
||||
<para>2025-??-??, CURRENT AS OF 2025-05-01</para>
|
||||
<para>2025-??-??, CURRENT AS OF 2025-05-23</para>
|
||||
</formalpara>
|
||||
|
||||
<sect2 id="release-18-highlights">
|
||||
@ -53,6 +53,24 @@
|
||||
|
||||
<itemizedlist>
|
||||
|
||||
<!--
|
||||
Author: Peter Eisentraut <peter@eisentraut.org>
|
||||
2024-10-16 [04bec894a04] initdb: Change default to using data checksums.
|
||||
-->
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
Change initdb default to enable data checksums
|
||||
<ulink url="&commit_baseurl;04bec894a04">§</ulink>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Checksums can be disabled with the new initdb option --no-data-checksums.
|
||||
pg_upgrade requires matching cluster checksum settings, so this new
|
||||
option can be useful to upgrade non-checksum old clusters.
|
||||
</para>
|
||||
</listitem>
|
||||
|
||||
<!--
|
||||
Author: Tom Lane <tgl@sss.pgh.pa.us>
|
||||
2025-01-16 [d7674c9fa] Seek zone abbreviations in the IANA data before timezone
|
||||
@ -82,7 +100,8 @@ Deprecate MD5 password authentication (Nathan Bossart)
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Warnings generated by their use can be disabled by the server variable md5_password_warnings.
|
||||
Support for MD5 passwords will be removed in a future major version release. CREATE ROLE and ALTER ROLE now emit deprecation warnings when setting MD5 passwords.
|
||||
These warnings can be disabled by setting the md5_password_warnings parameter to "off".
|
||||
</para>
|
||||
</listitem>
|
||||
|
||||
@ -138,6 +157,22 @@ Previously ALTER TABLE SET [UN]LOGGED did nothing, and the creation of an unlogg
|
||||
</para>
|
||||
</listitem>
|
||||
|
||||
<!--
|
||||
Author: Tom Lane <tgl@sss.pgh.pa.us>
|
||||
2025-01-23 [01463e1cc] Ensure that AFTER triggers run as the instigating user.
|
||||
-->
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
Execute AFTER triggers as the role that was active when trigger events were queued (Laurenz Albe)
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Previously such triggers were run as the role that was active at trigger execution time (e.g., at COMMIT). This is significant for cases where the role is changed between queue time and
|
||||
transaction commit.
|
||||
</para>
|
||||
</listitem>
|
||||
|
||||
<!--
|
||||
Author: Fujii Masao <fujii@postgresql.org>
|
||||
2024-09-12 [fefa76f70] Remove old RULE privilege completely.
|
||||
@ -173,16 +208,23 @@ This is now longer needed since pg_backend_memory_contexts.path was added.
|
||||
<!--
|
||||
Author: David Rowley <drowley@postgresql.org>
|
||||
2024-07-25 [32d3ed816] Add path column to pg_backend_memory_contexts view
|
||||
Author: David Rowley <drowley@postgresql.org>
|
||||
2025-04-18 [d9e03864b] Make levels 1-based in
|
||||
pg_log_backend_memory_contexts()
|
||||
Author: Fujii Masao <fujii@postgresql.org>
|
||||
2025-04-21 [706cbed35] doc: Fix memory context level in pg_log_backend_memory_c
|
||||
-->
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
Change pg_backend_memory_contexts.level to be one-based (Melih Mutlu)
|
||||
Change pg_backend_memory_contexts.level and pg_log_backend_memory_contexts() to be one-based (Melih Mutlu, Atsushi Torikoshi, David Rowley, Fujii Masao)
|
||||
<ulink url="&commit_baseurl;32d3ed816">§</ulink>
|
||||
<ulink url="&commit_baseurl;d9e03864b">§</ulink>
|
||||
<ulink url="&commit_baseurl;706cbed35">§</ulink>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
It was previously zero-based.
|
||||
These were previously zero-based.
|
||||
</para>
|
||||
</listitem>
|
||||
|
||||
@ -304,16 +346,25 @@ from the grouping. This was already true for non-deferred primary keys.
|
||||
<!--
|
||||
Author: Richard Guo <rguo@postgresql.org>
|
||||
2024-10-09 [67a54b9e8] Allow pushdown of HAVING clauses with grouping sets
|
||||
Author: Richard Guo <rguo@postgresql.org>
|
||||
2024-09-10 [247dea89f] Introduce an RTE for the grouping step
|
||||
Author: Richard Guo <rguo@postgresql.org>
|
||||
2024-09-10 [f5050f795] Mark expressions nullable by grouping sets
|
||||
Author: Richard Guo <rguo@postgresql.org>
|
||||
2025-03-13 [cc5d98525] Fix incorrect handling of subquery pullup
|
||||
-->
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
Allow some HAVING clauses on GROUPING SETS to be pushed to WHERE clauses (Richard Guo)
|
||||
<ulink url="&commit_baseurl;67a54b9e8">§</ulink>
|
||||
<ulink url="&commit_baseurl;247dea89f">§</ulink>
|
||||
<ulink url="&commit_baseurl;f5050f795">§</ulink>
|
||||
<ulink url="&commit_baseurl;cc5d98525">§</ulink>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
This allows earlier row filtering.
|
||||
This allows earlier row filtering. This release also fixes some GROUPING SETS queries that used to return incorrect results.
|
||||
</para>
|
||||
</listitem>
|
||||
|
||||
@ -360,34 +411,6 @@ Allow merge joins to use incremental sorts (Richard Guo)
|
||||
</para>
|
||||
</listitem>
|
||||
|
||||
<!--
|
||||
Author: David Rowley <drowley@postgresql.org>
|
||||
2024-08-20 [adf97c156] Speed up Hash Join by making ExprStates support hashing
|
||||
Author: David Rowley <drowley@postgresql.org>
|
||||
2024-12-11 [0f5738202] Use ExprStates for hashing in GROUP BY and SubPlans
|
||||
Author: Jeff Davis <jdavis@postgresql.org>
|
||||
2025-03-24 [4d143509c] Create accessor functions for TupleHashEntry.
|
||||
Author: Jeff Davis <jdavis@postgresql.org>
|
||||
2025-03-24 [a0942f441] Add ExecCopySlotMinimalTupleExtra().
|
||||
Author: Jeff Davis <jdavis@postgresql.org>
|
||||
2025-03-24 [626df47ad] Remove 'additional' pointer from TupleHashEntryData.
|
||||
-->
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
Improve the performance and reduce memory usage of hash joins and GROUP BY (David Rowley, Jeff Davis)
|
||||
<ulink url="&commit_baseurl;adf97c156">§</ulink>
|
||||
<ulink url="&commit_baseurl;0f5738202">§</ulink>
|
||||
<ulink url="&commit_baseurl;4d143509c">§</ulink>
|
||||
<ulink url="&commit_baseurl;a0942f441">§</ulink>
|
||||
<ulink url="&commit_baseurl;626df47ad">§</ulink>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
This also improves hash set operations used by EXCEPT, and hash lookups of subplan values.
|
||||
</para>
|
||||
</listitem>
|
||||
|
||||
<!--
|
||||
Author: Amit Langote <amitlan@postgresql.org>
|
||||
2025-04-04 [88f55bc97] Make derived clause lookup in EquivalenceClass more effi
|
||||
@ -397,33 +420,12 @@ Author: David Rowley <drowley@postgresql.org>
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
Allow partitions to be pruned more efficienty (Ashutosh Bapat, Yuya Watari, David Rowley)
|
||||
Improve the efficiency of planning queries accessing many partitions (Ashutosh Bapat, Yuya Watari, David Rowley)
|
||||
<ulink url="&commit_baseurl;88f55bc97">§</ulink>
|
||||
<ulink url="&commit_baseurl;d69d45a5a">§</ulink>
|
||||
</para>
|
||||
</listitem>
|
||||
|
||||
<!--
|
||||
Author: Amit Langote <amitlan@postgresql.org>
|
||||
2025-01-30 [bb3ec16e1] Move PartitionPruneInfo out of plan nodes into PlannedSt
|
||||
Author: Amit Langote <amitlan@postgresql.org>
|
||||
2025-01-31 [d47cbf474] Perform runtime initial pruning outside ExecInitNode()
|
||||
Author: Amit Langote <amitlan@postgresql.org>
|
||||
2025-02-07 [cbc127917] Track unpruned relids to avoid processing pruned relatio
|
||||
Author: Amit Langote <amitlan@postgresql.org>
|
||||
2025-02-20 [525392d57] Don't lock partitions pruned by initial pruning
|
||||
-->
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
Avoid the locking of pruned partitions during planning (Amit Langote)
|
||||
<ulink url="&commit_baseurl;bb3ec16e1">§</ulink>
|
||||
<ulink url="&commit_baseurl;d47cbf474">§</ulink>
|
||||
<ulink url="&commit_baseurl;cbc127917">§</ulink>
|
||||
<ulink url="&commit_baseurl;525392d57">§</ulink>
|
||||
</para>
|
||||
</listitem>
|
||||
|
||||
<!--
|
||||
Author: Richard Guo <rguo@postgresql.org>
|
||||
2024-07-30 [9b282a935] Fix partitionwise join with partially-redundant join cla
|
||||
@ -492,8 +494,6 @@ Author: Peter Geoghegan <pg@bowt.ie>
|
||||
2025-04-04 [92fe23d93] Add nbtree skip scan optimization.
|
||||
Author: Peter Geoghegan <pg@bowt.ie>
|
||||
2025-04-04 [8a510275d] Further optimize nbtree search scan key comparisons.
|
||||
Author: Peter Geoghegan <pg@bowt.ie>
|
||||
2025-04-04 [8a510275d] Further optimize nbtree search scan key comparisons.
|
||||
-->
|
||||
|
||||
<listitem>
|
||||
@ -501,11 +501,11 @@ Author: Peter Geoghegan <pg@bowt.ie>
|
||||
Allow skip scans of btree indexes (Peter Geoghegan)
|
||||
<ulink url="&commit_baseurl;92fe23d93">§</ulink>
|
||||
<ulink url="&commit_baseurl;8a510275d">§</ulink>
|
||||
<ulink url="&commit_baseurl;8a510275d">§</ulink>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
This is effective if the earlier non-referenced columns contain few unique values.
|
||||
This allows multi-column btree indexes to be used by queries that only
|
||||
equality-reference the second or later indexed columns.
|
||||
</para>
|
||||
</listitem>
|
||||
|
||||
@ -603,6 +603,7 @@ Add an asynchronous I/O subsystem (Andres Freund, Thomas Munro, Nazir Bilal Yavu
|
||||
</para>
|
||||
|
||||
<para>
|
||||
This feature allows backends to queue multiple read requests, which allows for more efficient sequential scans, bitmap heap scans, vacuums, etc.
|
||||
This is enabled by server variable io_method, with server variables io_combine_limit and io_max_combine_limit added to control it. This also enables
|
||||
effective_io_concurrency and maintenance_io_concurrency values greater than zero for systems without fadvise() support. The new system view pg_aios shows the file handles being used
|
||||
for asynchronous I/O.
|
||||
@ -621,6 +622,34 @@ Improve the locking performance of queries that access many relations (Tomas Von
|
||||
</para>
|
||||
</listitem>
|
||||
|
||||
<!--
|
||||
Author: David Rowley <drowley@postgresql.org>
|
||||
2024-08-20 [adf97c156] Speed up Hash Join by making ExprStates support hashing
|
||||
Author: David Rowley <drowley@postgresql.org>
|
||||
2024-12-11 [0f5738202] Use ExprStates for hashing in GROUP BY and SubPlans
|
||||
Author: Jeff Davis <jdavis@postgresql.org>
|
||||
2025-03-24 [4d143509c] Create accessor functions for TupleHashEntry.
|
||||
Author: Jeff Davis <jdavis@postgresql.org>
|
||||
2025-03-24 [a0942f441] Add ExecCopySlotMinimalTupleExtra().
|
||||
Author: Jeff Davis <jdavis@postgresql.org>
|
||||
2025-03-24 [626df47ad] Remove 'additional' pointer from TupleHashEntryData.
|
||||
-->
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
Improve the performance and reduce memory usage of hash joins and GROUP BY (David Rowley, Jeff Davis)
|
||||
<ulink url="&commit_baseurl;adf97c156">§</ulink>
|
||||
<ulink url="&commit_baseurl;0f5738202">§</ulink>
|
||||
<ulink url="&commit_baseurl;4d143509c">§</ulink>
|
||||
<ulink url="&commit_baseurl;a0942f441">§</ulink>
|
||||
<ulink url="&commit_baseurl;626df47ad">§</ulink>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
This also improves hash set operations used by EXCEPT, and hash lookups of subplan values.
|
||||
</para>
|
||||
</listitem>
|
||||
|
||||
<!--
|
||||
Author: Melanie Plageman <melanieplageman@gmail.com>
|
||||
2025-02-11 [052026c9b] Eagerly scan all-visible pages to amortize aggressive va
|
||||
@ -688,15 +717,12 @@ This more accurately reflects modern hardware.
|
||||
<!--
|
||||
Author: Melanie Plageman <melanieplageman@gmail.com>
|
||||
2025-03-12 [9219093ca] Modularize log_connections output
|
||||
Author: Melanie Plageman <melanieplageman@gmail.com>
|
||||
2025-03-12 [18cd15e70] Add connection establishment duration logging
|
||||
-->
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
Increase the logging granularity of server variable log_connections (Melanie Plageman)
|
||||
<ulink url="&commit_baseurl;9219093ca">§</ulink>
|
||||
<ulink url="&commit_baseurl;18cd15e70">§</ulink>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
@ -704,6 +730,18 @@ This server variable was previously only boolean; these options are still suppo
|
||||
</para>
|
||||
</listitem>
|
||||
|
||||
<!--
|
||||
Author: Melanie Plageman <melanieplageman@gmail.com>
|
||||
2025-03-12 [18cd15e70] Add connection establishment duration logging
|
||||
-->
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
Add log_connections option to report the duration of connection stages (Melanie Plageman)
|
||||
<ulink url="&commit_baseurl;18cd15e70">§</ulink>
|
||||
</para>
|
||||
</listitem>
|
||||
|
||||
<!--
|
||||
Author: Tom Lane <tgl@sss.pgh.pa.us>
|
||||
2025-04-07 [3516ea768] Add local-address escape "%L" to log_line_prefix.
|
||||
@ -723,7 +761,7 @@ Author: Fujii Masao <fujii@postgresql.org>
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
Add server variable log_lock_failure to log lock acquisition failures (Yuki Seino)
|
||||
Add server variable log_lock_failures to log lock acquisition failures (Yuki Seino)
|
||||
<ulink url="&commit_baseurl;6d376c3b0">§</ulink>
|
||||
</para>
|
||||
|
||||
@ -768,6 +806,33 @@ mode; tracking must be enabled with the server variable track_cost_delay_timing.
|
||||
</para>
|
||||
</listitem>
|
||||
|
||||
<!--
|
||||
Author: Masahiko Sawada <msawada@postgresql.org>
|
||||
2024-08-13 [4c1b4cdb8] Add resource statistics reporting to ANALYZE VERBOSE.
|
||||
Author: Masahiko Sawada <msawada@postgresql.org>
|
||||
2024-09-09 [bb7775234] Add WAL usage reporting to ANALYZE VERBOSE output.
|
||||
-->
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
Add WAL, CPU, and average read statistics output to ANALYZE VERBOSE (Anthonin Bonnefoy)
|
||||
<ulink url="&commit_baseurl;4c1b4cdb8">§</ulink>
|
||||
<ulink url="&commit_baseurl;bb7775234">§</ulink>
|
||||
</para>
|
||||
</listitem>
|
||||
|
||||
<!--
|
||||
Author: Michael Paquier <michael@paquier.xyz>
|
||||
2025-02-17 [6a8a7ce47] Add information about WAL buffers full to VACUUM/ANALYZE
|
||||
-->
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
Add full WAL buffer count to VACUUM/ANALYZE (VERBOSE) and autovacuum log output (Bertrand Drouvot)
|
||||
<ulink url="&commit_baseurl;6a8a7ce47">§</ulink>
|
||||
</para>
|
||||
</listitem>
|
||||
|
||||
<!--
|
||||
Author: Michael Paquier <michael@paquier.xyz>
|
||||
2024-12-19 [9aea73fc6] Add backend-level statistics to pgstats
|
||||
@ -968,21 +1033,6 @@ This is true even if the tables in different schemas have different column names
|
||||
</para>
|
||||
</listitem>
|
||||
|
||||
<!--
|
||||
Author: Daniel Gustafsson <dgustafsson@postgresql.org>
|
||||
2025-04-08 [042a66291] Add function to get memory context stats for processes
|
||||
Author: Daniel Gustafsson <dgustafsson@postgresql.org>
|
||||
2025-04-08 [c57971034] Rename argument in pg_get_process_memory_contexts().
|
||||
-->
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
Add function pg_get_process_memory_contexts() to report process memory context statistics (Rahila Syed)
|
||||
<ulink url="&commit_baseurl;042a66291">§</ulink>
|
||||
<ulink url="&commit_baseurl;c57971034">§</ulink>
|
||||
</para>
|
||||
</listitem>
|
||||
|
||||
<!--
|
||||
Author: David Rowley <drowley@postgresql.org>
|
||||
2024-07-01 [12227a1d5] Add context type field to pg_backend_memory_contexts
|
||||
@ -1234,12 +1284,15 @@ This is useful for operating system configuration.
|
||||
<!--
|
||||
Author: Peter Eisentraut <peter@eisentraut.org>
|
||||
2025-03-19 [4f7f7b037] extension_control_path
|
||||
Author: Peter Eisentraut <peter@eisentraut.org>
|
||||
2025-05-02 [81eaaa2c4] Make "directory" setting work with extension_control_pat
|
||||
-->
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
Add server variable extension_control_path to specify the location of extension control files (Peter Eisentraut, Matheus Alcantara)
|
||||
<ulink url="&commit_baseurl;4f7f7b037">§</ulink>
|
||||
<ulink url="&commit_baseurl;81eaaa2c4">§</ulink>
|
||||
</para>
|
||||
</listitem>
|
||||
|
||||
@ -1323,7 +1376,7 @@ Author: Amit Kapila <akapila@postgresql.org>
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
Change the default CREATE SUBSCRIPTION streaming option from "off" to "parallel" (Hayato Kuroda, Masahiko Sawada, Peter Smith, Amit Kapila)
|
||||
Change the default CREATE SUBSCRIPTION streaming option from "off" to "parallel" (Vignesh C)
|
||||
<ulink url="&commit_baseurl;1bf1140be">§</ulink>
|
||||
</para>
|
||||
</listitem>
|
||||
@ -1416,9 +1469,9 @@ Add OLD/NEW support to RETURNING in DML queries (Dean Rasheed)
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Previously RETURNING only returned new values for INSERT and UPDATE, old values for DELETE; MERGE would return the appropriate value for the internal query executed. This new syntax
|
||||
allows INSERT with an ON CONFLICT action to return old values, UPDATE to return old values, and DELETE to return new values if the query assigned to an ON DELETE row would return new
|
||||
values. New syntax allows changeable relation aliases "old" and "new" to specify which values should be returned.
|
||||
Previously RETURNING only returned new values for INSERT and UPDATE, and old values for DELETE; MERGE would return the appropriate value for the internal query executed. This new syntax
|
||||
allows the RETURNING list of INSERT/UPDATE/DELETE/MERGE to explicitly return old and new values by using the special aliases "old" and "new". These aliases can be renamed to
|
||||
avoid identifier conflicts.
|
||||
</para>
|
||||
</listitem>
|
||||
|
||||
@ -1758,33 +1811,15 @@ Automatically include BUFFERS output in EXPLAIN ANALYZE (Guillaume Lelarge, Davi
|
||||
</para>
|
||||
</listitem>
|
||||
|
||||
<!--
|
||||
Author: Masahiko Sawada <msawada@postgresql.org>
|
||||
2024-08-13 [4c1b4cdb8] Add resource statistics reporting to ANALYZE VERBOSE.
|
||||
Author: Masahiko Sawada <msawada@postgresql.org>
|
||||
2024-09-09 [bb7775234] Add WAL usage reporting to ANALYZE VERBOSE output.
|
||||
-->
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
Add WAL, CPU, and average read statistics output to EXPLAIN ANALYZE VERBOSE (Anthonin Bonnefoy)
|
||||
<ulink url="&commit_baseurl;4c1b4cdb8">§</ulink>
|
||||
<ulink url="&commit_baseurl;bb7775234">§</ulink>
|
||||
</para>
|
||||
</listitem>
|
||||
|
||||
<!--
|
||||
Author: Michael Paquier <michael@paquier.xyz>
|
||||
2025-02-17 [320545bfc] Add information about WAL buffers being full to EXPLAIN
|
||||
Author: Michael Paquier <michael@paquier.xyz>
|
||||
2025-02-17 [6a8a7ce47] Add information about WAL buffers full to VACUUM/ANALYZE
|
||||
-->
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
Add full WAL buffer count to EXPLAIN (WAL), VACUUM/ANALYZE (VERBOSE), and autovacuum log output (Bertrand Drouvot)
|
||||
Add full WAL buffer count to EXPLAIN (WAL) output (Bertrand Drouvot)
|
||||
<ulink url="&commit_baseurl;320545bfc">§</ulink>
|
||||
<ulink url="&commit_baseurl;6a8a7ce47">§</ulink>
|
||||
</para>
|
||||
</listitem>
|
||||
|
||||
@ -1916,7 +1951,7 @@ Author: Tom Lane <tgl@sss.pgh.pa.us>
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
Allow jsonb NULL values to be cast to scalar types as NULL (Tom Lane)
|
||||
Allow jsonb "null" values to be cast to scalar types as NULL (Tom Lane)
|
||||
<ulink url="&commit_baseurl;a5579a90a">§</ulink>
|
||||
</para>
|
||||
|
||||
@ -2132,7 +2167,7 @@ Author: Nathan Bossart <nathan@postgresql.org>
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
Add functions crc32() and crc32c to compute CRC values (Aleksander Alekseev)
|
||||
Add functions crc32() and crc32c() to compute CRC values (Aleksander Alekseev)
|
||||
<ulink url="&commit_baseurl;760162fed">§</ulink>
|
||||
</para>
|
||||
</listitem>
|
||||
@ -2596,24 +2631,23 @@ Author: Jeff Davis <jdavis@postgresql.org>
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
Add pg_dump options --with-schema, --with-data, and --with_statistics (Jeff Davis)
|
||||
Add pg_dump options --with-schema, --with-data, and --with-statistics (Jeff Davis)
|
||||
<ulink url="&commit_baseurl;bde2fb797">§</ulink>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The negative versions of these options already existed.
|
||||
</para>
|
||||
</listitem>
|
||||
|
||||
<!--
|
||||
Author: Nathan Bossart <nathan@postgresql.org>
|
||||
2025-03-25 [9c49f0e8c] pg_dump: Add - -sequence-data.
|
||||
Author: Nathan Bossart <nathan@postgresql.org>
|
||||
2025-05-07 [acea3fc49] pg_dumpall: Add - -sequence-data.
|
||||
-->
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
Add pg_dump option --sequence-data to dump sequence data that would normally be excluded (Nathan Bossart)
|
||||
Add pg_dump and pg_dumpall option --sequence-data to dump sequence data that would normally be excluded (Nathan Bossart)
|
||||
<ulink url="&commit_baseurl;9c49f0e8c">§</ulink>
|
||||
<ulink url="&commit_baseurl;acea3fc49">§</ulink>
|
||||
</para>
|
||||
</listitem>
|
||||
|
||||
@ -2636,7 +2670,7 @@ Author: Tom Lane <tgl@sss.pgh.pa.us>
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
Add option --no-policies to pg_dump, pg_dumpall, pg_restore to avoid policy specification (Nikolay Samokhvalov)
|
||||
Add option --no-policies to disable row level security policy processing in pg_dump, pg_dumpall, pg_restore (Nikolay Samokhvalov)
|
||||
<ulink url="&commit_baseurl;cd3c45125">§</ulink>
|
||||
</para>
|
||||
|
||||
@ -2765,7 +2799,7 @@ This is to handle cases where a pre-Postgres 18 cluster's default CPU signedness
|
||||
</sect4>
|
||||
|
||||
<sect4 id="release-18-logicalrep-app">
|
||||
<title>Logical Replication Applications></title>
|
||||
<title>Logical Replication Applications</title>
|
||||
|
||||
<itemizedlist>
|
||||
|
||||
@ -2859,6 +2893,18 @@ Injection points can now be created, but not run, via INJECTION_POINT_LOAD(), an
|
||||
</para>
|
||||
</listitem>
|
||||
|
||||
<!--
|
||||
Author: Michael Paquier <michael@paquier.xyz>
|
||||
2025-05-10 [371f2db8b] Add support for runtime arguments in injection points
|
||||
-->
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
Support runtime arguments in injection points (Michael Paquier)
|
||||
<ulink url="&commit_baseurl;371f2db8b">§</ulink>
|
||||
</para>
|
||||
</listitem>
|
||||
|
||||
<!--
|
||||
Author: Heikki Linnakangas <heikki.linnakangas@iki.fi>
|
||||
2024-07-26 [20e0e7da9] Add test for early backend startup errors
|
||||
@ -2912,13 +2958,22 @@ Add ARM Neon and SVE CPU intrinsics for popcount (integer bit counting) (Chiranm
|
||||
|
||||
<!--
|
||||
Author: Dean Rasheed <dean.a.rasheed@gmail.com>
|
||||
2024-07-09 [ca481d3c9] Optimise numeric multiplication for short inputs.
|
||||
Author: Dean Rasheed <dean.a.rasheed@gmail.com>
|
||||
2024-08-15 [c4e44224c] Extend mul_var_short() to 5 and 6-digit inputs.
|
||||
Author: Dean Rasheed <dean.a.rasheed@gmail.com>
|
||||
2024-08-15 [8dc28d7eb] Optimise numeric multiplication using base-NBASE^2 arith
|
||||
Author: Dean Rasheed <dean.a.rasheed@gmail.com>
|
||||
2024-10-04 [9428c001f] Speed up numeric division by always using the "fast" alg
|
||||
-->
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
Improve the speed of multiplication (Joel Jacobson, Dean Rasheed)
|
||||
Improve the speed of numeric multiplication and division (Joel Jacobson, Dean Rasheed)
|
||||
<ulink url="&commit_baseurl;ca481d3c9">§</ulink>
|
||||
<ulink url="&commit_baseurl;c4e44224c">§</ulink>
|
||||
<ulink url="&commit_baseurl;8dc28d7eb">§</ulink>
|
||||
<ulink url="&commit_baseurl;9428c001f">§</ulink>
|
||||
</para>
|
||||
</listitem>
|
||||
|
||||
@ -3308,7 +3363,7 @@ Author: Tatsuo Ishii <ishii@postgresql.org>
|
||||
|
||||
<listitem>
|
||||
<para>
|
||||
Have pgbench report the number of failed transactions (Yugo Nagata)
|
||||
Have pgbench report the number of failed, retried, or skipped transactions in per-script reports (Yugo Nagata)
|
||||
<ulink url="&commit_baseurl;cae0f3c40">§</ulink>
|
||||
</para>
|
||||
</listitem>
|
||||
|
@ -3932,7 +3932,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
|
||||
<structfield>passwd</structfield> <type>text</type>
|
||||
</para>
|
||||
<para>
|
||||
Password (possibly encrypted); null if none. See
|
||||
Encrypted password; null if none. See
|
||||
<link linkend="catalog-pg-authid"><structname>pg_authid</structname></link>
|
||||
for details of how encrypted passwords are stored.
|
||||
</para></entry>
|
||||
|
@ -129,10 +129,9 @@
|
||||
In all cases, a trigger is executed as part of the same transaction as
|
||||
the statement that triggered it, so if either the statement or the
|
||||
trigger causes an error, the effects of both will be rolled back.
|
||||
Also, the trigger will always run in the security context of the role
|
||||
that executed the statement that caused the trigger to fire, unless
|
||||
the trigger function is defined as <literal>SECURITY DEFINER</literal>,
|
||||
in which case it will run as the function owner.
|
||||
Also, the trigger will always run as the role that queued the trigger
|
||||
event, unless the trigger function is marked as <literal>SECURITY
|
||||
DEFINER</literal>, in which case it will run as the function owner.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
|
@ -3829,15 +3829,17 @@ uint32 WaitEventExtensionNew(const char *wait_event_name)
|
||||
An injection point with a given <literal>name</literal> is declared using
|
||||
macro:
|
||||
<programlisting>
|
||||
INJECTION_POINT(name);
|
||||
INJECTION_POINT(name, arg);
|
||||
</programlisting>
|
||||
|
||||
There are a few injection points already declared at strategic points
|
||||
within the server code. After adding a new injection point the code needs
|
||||
to be compiled in order for that injection point to be available in the
|
||||
binary. Add-ins written in C-language can declare injection points in
|
||||
their own code using the same macro. The injection point names should
|
||||
use lower-case characters, with terms separated by dashes.
|
||||
their own code using the same macro. The injection point names should use
|
||||
lower-case characters, with terms separated by
|
||||
dashes. <literal>arg</literal> is an optional argument value given to the
|
||||
callback at run-time.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
@ -3847,7 +3849,7 @@ INJECTION_POINT(name);
|
||||
a two-step approach with the following macros:
|
||||
<programlisting>
|
||||
INJECTION_POINT_LOAD(name);
|
||||
INJECTION_POINT_CACHED(name);
|
||||
INJECTION_POINT_CACHED(name, arg);
|
||||
</programlisting>
|
||||
|
||||
Before entering the critical section,
|
||||
@ -3880,7 +3882,9 @@ extern void InjectionPointAttach(const char *name,
|
||||
<literal>InjectionPointCallback</literal>:
|
||||
<programlisting>
|
||||
static void
|
||||
custom_injection_callback(const char *name, const void *private_data)
|
||||
custom_injection_callback(const char *name,
|
||||
const void *private_data,
|
||||
void *arg)
|
||||
{
|
||||
uint32 wait_event_info = WaitEventInjectionPointNew(name);
|
||||
|
||||
@ -3909,7 +3913,7 @@ if (IS_INJECTION_POINT_ATTACHED("before-foobar"))
|
||||
local_var = 123;
|
||||
|
||||
/* also execute the callback */
|
||||
INJECTION_POINT_CACHED("before-foobar");
|
||||
INJECTION_POINT_CACHED("before-foobar", NULL);
|
||||
}
|
||||
#endif
|
||||
</programlisting>
|
||||
|
@ -598,7 +598,7 @@
|
||||
<entry>11</entry>
|
||||
</row>
|
||||
<row>
|
||||
<entry><function>stratnum</function></entry>
|
||||
<entry><function>translate_cmptype</function></entry>
|
||||
<entry>translate compare types to strategy numbers
|
||||
used by the operator class (optional)</entry>
|
||||
<entry>12</entry>
|
||||
|
14
meson.build
14
meson.build
@ -2654,6 +2654,7 @@ decl_checks += [
|
||||
['preadv', 'sys/uio.h'],
|
||||
['pwritev', 'sys/uio.h'],
|
||||
['strchrnul', 'string.h'],
|
||||
['memset_s', 'string.h', '#define __STDC_WANT_LIB_EXT1__ 1'],
|
||||
]
|
||||
|
||||
# Check presence of some optional LLVM functions.
|
||||
@ -2667,21 +2668,23 @@ endif
|
||||
foreach c : decl_checks
|
||||
func = c.get(0)
|
||||
header = c.get(1)
|
||||
args = c.get(2, {})
|
||||
prologue = c.get(2, '')
|
||||
args = c.get(3, {})
|
||||
varname = 'HAVE_DECL_' + func.underscorify().to_upper()
|
||||
|
||||
found = cc.compiles('''
|
||||
#include <@0@>
|
||||
@0@
|
||||
#include <@1@>
|
||||
|
||||
int main()
|
||||
{
|
||||
#ifndef @1@
|
||||
(void) @1@;
|
||||
#ifndef @2@
|
||||
(void) @2@;
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
'''.format(header, func),
|
||||
'''.format(prologue, header, func),
|
||||
name: 'test whether @0@ is declared'.format(func),
|
||||
# need to add cflags_warn to get at least
|
||||
# -Werror=unguarded-availability-new if applicable
|
||||
@ -2880,7 +2883,6 @@ func_checks = [
|
||||
['kqueue'],
|
||||
['localeconv_l'],
|
||||
['mbstowcs_l'],
|
||||
['memset_s'],
|
||||
['mkdtemp'],
|
||||
['posix_fadvise'],
|
||||
['posix_fallocate'],
|
||||
|
@ -112,7 +112,7 @@ ifeq ($(PORTNAME), darwin)
|
||||
ifneq ($(SO_MAJOR_VERSION), 0)
|
||||
version_link = -compatibility_version $(SO_MAJOR_VERSION) -current_version $(SO_MAJOR_VERSION).$(SO_MINOR_VERSION)
|
||||
endif
|
||||
LINK.shared = $(COMPILER) -dynamiclib -install_name '$(libdir)/lib$(NAME).$(SO_MAJOR_VERSION)$(DLSUFFIX)' $(version_link) $(exported_symbols_list)
|
||||
LINK.shared = $(COMPILER) -dynamiclib -install_name '$(libdir)/lib$(NAME).$(SO_MAJOR_VERSION)$(DLSUFFIX)' $(version_link)
|
||||
shlib = lib$(NAME).$(SO_MAJOR_VERSION)$(DLSUFFIX)
|
||||
shlib_major = lib$(NAME).$(SO_MAJOR_VERSION)$(DLSUFFIX)
|
||||
else
|
||||
@ -122,7 +122,7 @@ ifeq ($(PORTNAME), darwin)
|
||||
BUILD.exports = $(AWK) '/^[^\#]/ {printf "_%s\n",$$1}' $< >$@
|
||||
exports_file = $(SHLIB_EXPORTS:%.txt=%.list)
|
||||
ifneq (,$(exports_file))
|
||||
exported_symbols_list = -exported_symbols_list $(exports_file)
|
||||
LINK.shared += -exported_symbols_list $(exports_file)
|
||||
endif
|
||||
endif
|
||||
|
||||
|
@ -68,7 +68,7 @@ typedef struct BrinShared
|
||||
int scantuplesortstates;
|
||||
|
||||
/* Query ID, for report in worker processes */
|
||||
uint64 queryid;
|
||||
int64 queryid;
|
||||
|
||||
/*
|
||||
* workersdonecv is used to monitor the progress of workers. All parallel
|
||||
|
@ -1243,8 +1243,9 @@ transformRelOptions(Datum oldOptions, List *defList, const char *namspace,
|
||||
}
|
||||
else
|
||||
{
|
||||
text *t;
|
||||
const char *name;
|
||||
const char *value;
|
||||
text *t;
|
||||
Size len;
|
||||
|
||||
/*
|
||||
@ -1291,11 +1292,19 @@ transformRelOptions(Datum oldOptions, List *defList, const char *namspace,
|
||||
* have just "name", assume "name=true" is meant. Note: the
|
||||
* namespace is not output.
|
||||
*/
|
||||
name = def->defname;
|
||||
if (def->arg != NULL)
|
||||
value = defGetString(def);
|
||||
else
|
||||
value = "true";
|
||||
|
||||
/* Insist that name not contain "=", else "a=b=c" is ambiguous */
|
||||
if (strchr(name, '=') != NULL)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||
errmsg("invalid option name \"%s\": must not contain \"=\"",
|
||||
name)));
|
||||
|
||||
/*
|
||||
* This is not a great place for this test, but there's no other
|
||||
* convenient place to filter the option out. As WITH (oids =
|
||||
@ -1303,7 +1312,7 @@ transformRelOptions(Datum oldOptions, List *defList, const char *namspace,
|
||||
* amount of ugly.
|
||||
*/
|
||||
if (acceptOidsOff && def->defnamespace == NULL &&
|
||||
strcmp(def->defname, "oids") == 0)
|
||||
strcmp(name, "oids") == 0)
|
||||
{
|
||||
if (defGetBoolean(def))
|
||||
ereport(ERROR,
|
||||
@ -1313,11 +1322,11 @@ transformRelOptions(Datum oldOptions, List *defList, const char *namspace,
|
||||
continue;
|
||||
}
|
||||
|
||||
len = VARHDRSZ + strlen(def->defname) + 1 + strlen(value);
|
||||
len = VARHDRSZ + strlen(name) + 1 + strlen(value);
|
||||
/* +1 leaves room for sprintf's trailing null */
|
||||
t = (text *) palloc(len + 1);
|
||||
SET_VARSIZE(t, len);
|
||||
sprintf(VARDATA(t), "%s=%s", def->defname, value);
|
||||
sprintf(VARDATA(t), "%s=%s", name, value);
|
||||
|
||||
astate = accumArrayResult(astate, PointerGetDatum(t),
|
||||
false, TEXTOID,
|
||||
|
@ -685,9 +685,9 @@ ginFinishSplit(GinBtree btree, GinBtreeStack *stack, bool freestack,
|
||||
|
||||
#ifdef USE_INJECTION_POINTS
|
||||
if (GinPageIsLeaf(BufferGetPage(stack->buffer)))
|
||||
INJECTION_POINT("gin-leave-leaf-split-incomplete");
|
||||
INJECTION_POINT("gin-leave-leaf-split-incomplete", NULL);
|
||||
else
|
||||
INJECTION_POINT("gin-leave-internal-split-incomplete");
|
||||
INJECTION_POINT("gin-leave-internal-split-incomplete", NULL);
|
||||
#endif
|
||||
|
||||
/* search parent to lock */
|
||||
@ -778,7 +778,7 @@ ginFinishSplit(GinBtree btree, GinBtreeStack *stack, bool freestack,
|
||||
static void
|
||||
ginFinishOldSplit(GinBtree btree, GinBtreeStack *stack, GinStatsData *buildStats, int access)
|
||||
{
|
||||
INJECTION_POINT("gin-finish-incomplete-split");
|
||||
INJECTION_POINT("gin-finish-incomplete-split", NULL);
|
||||
elog(DEBUG1, "finishing incomplete split of block %u in gin index \"%s\"",
|
||||
stack->blkno, RelationGetRelationName(btree->index));
|
||||
|
||||
|
@ -1058,11 +1058,11 @@ gistGetFakeLSN(Relation rel)
|
||||
}
|
||||
|
||||
/*
|
||||
* This is a stratnum support function for GiST opclasses that use the
|
||||
* RT*StrategyNumber constants.
|
||||
* This is a stratnum translation support function for GiST opclasses that use
|
||||
* the RT*StrategyNumber constants.
|
||||
*/
|
||||
Datum
|
||||
gist_stratnum_common(PG_FUNCTION_ARGS)
|
||||
gist_translate_cmptype_common(PG_FUNCTION_ARGS)
|
||||
{
|
||||
CompareType cmptype = PG_GETARG_INT32(0);
|
||||
|
||||
@ -1090,9 +1090,9 @@ gist_stratnum_common(PG_FUNCTION_ARGS)
|
||||
/*
|
||||
* Returns the opclass's private stratnum used for the given compare type.
|
||||
*
|
||||
* Calls the opclass's GIST_STRATNUM_PROC support function, if any,
|
||||
* and returns the result.
|
||||
* Returns InvalidStrategy if the function is not defined.
|
||||
* Calls the opclass's GIST_TRANSLATE_CMPTYPE_PROC support function, if any,
|
||||
* and returns the result. Returns InvalidStrategy if the function is not
|
||||
* defined.
|
||||
*/
|
||||
StrategyNumber
|
||||
gisttranslatecmptype(CompareType cmptype, Oid opfamily)
|
||||
@ -1101,7 +1101,7 @@ gisttranslatecmptype(CompareType cmptype, Oid opfamily)
|
||||
Datum result;
|
||||
|
||||
/* Check whether the function is provided. */
|
||||
funcid = get_opfamily_proc(opfamily, ANYOID, ANYOID, GIST_STRATNUM_PROC);
|
||||
funcid = get_opfamily_proc(opfamily, ANYOID, ANYOID, GIST_TRANSLATE_CMPTYPE_PROC);
|
||||
if (!OidIsValid(funcid))
|
||||
return InvalidStrategy;
|
||||
|
||||
|
@ -138,7 +138,7 @@ gistvalidate(Oid opclassoid)
|
||||
ok = check_amproc_signature(procform->amproc, VOIDOID, true,
|
||||
1, 1, INTERNALOID);
|
||||
break;
|
||||
case GIST_STRATNUM_PROC:
|
||||
case GIST_TRANSLATE_CMPTYPE_PROC:
|
||||
ok = check_amproc_signature(procform->amproc, INT2OID, true,
|
||||
1, 1, INT4OID) &&
|
||||
procform->amproclefttype == ANYOID &&
|
||||
@ -265,7 +265,7 @@ gistvalidate(Oid opclassoid)
|
||||
if (i == GIST_DISTANCE_PROC || i == GIST_FETCH_PROC ||
|
||||
i == GIST_COMPRESS_PROC || i == GIST_DECOMPRESS_PROC ||
|
||||
i == GIST_OPTIONS_PROC || i == GIST_SORTSUPPORT_PROC ||
|
||||
i == GIST_STRATNUM_PROC)
|
||||
i == GIST_TRANSLATE_CMPTYPE_PROC)
|
||||
continue; /* optional methods */
|
||||
ereport(INFO,
|
||||
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
|
||||
@ -336,7 +336,7 @@ gistadjustmembers(Oid opfamilyoid,
|
||||
case GIST_FETCH_PROC:
|
||||
case GIST_OPTIONS_PROC:
|
||||
case GIST_SORTSUPPORT_PROC:
|
||||
case GIST_STRATNUM_PROC:
|
||||
case GIST_TRANSLATE_CMPTYPE_PROC:
|
||||
/* Optional, so force it to be a soft family dependency */
|
||||
op->ref_is_hard = false;
|
||||
op->ref_is_family = true;
|
||||
|
@ -213,6 +213,27 @@ static const int MultiXactStatusLock[MaxMultiXactStatus + 1] =
|
||||
#define TUPLOCK_from_mxstatus(status) \
|
||||
(MultiXactStatusLock[(status)])
|
||||
|
||||
/*
|
||||
* Check that we have a valid snapshot if we might need TOAST access.
|
||||
*/
|
||||
static inline void
|
||||
AssertHasSnapshotForToast(Relation rel)
|
||||
{
|
||||
#ifdef USE_ASSERT_CHECKING
|
||||
|
||||
/* bootstrap mode in particular breaks this rule */
|
||||
if (!IsNormalProcessingMode())
|
||||
return;
|
||||
|
||||
/* if the relation doesn't have a TOAST table, we are good */
|
||||
if (!OidIsValid(rel->rd_rel->reltoastrelid))
|
||||
return;
|
||||
|
||||
Assert(HaveRegisteredOrActiveSnapshot());
|
||||
|
||||
#endif /* USE_ASSERT_CHECKING */
|
||||
}
|
||||
|
||||
/* ----------------------------------------------------------------
|
||||
* heap support routines
|
||||
* ----------------------------------------------------------------
|
||||
@ -2066,6 +2087,8 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
|
||||
Assert(HeapTupleHeaderGetNatts(tup->t_data) <=
|
||||
RelationGetNumberOfAttributes(relation));
|
||||
|
||||
AssertHasSnapshotForToast(relation);
|
||||
|
||||
/*
|
||||
* Fill in tuple header fields and toast the tuple if necessary.
|
||||
*
|
||||
@ -2343,6 +2366,8 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
|
||||
/* currently not needed (thus unsupported) for heap_multi_insert() */
|
||||
Assert(!(options & HEAP_INSERT_NO_LOGICAL));
|
||||
|
||||
AssertHasSnapshotForToast(relation);
|
||||
|
||||
needwal = RelationNeedsWAL(relation);
|
||||
saveFreeSpace = RelationGetTargetPageFreeSpace(relation,
|
||||
HEAP_DEFAULT_FILLFACTOR);
|
||||
@ -2765,6 +2790,8 @@ heap_delete(Relation relation, ItemPointer tid,
|
||||
|
||||
Assert(ItemPointerIsValid(tid));
|
||||
|
||||
AssertHasSnapshotForToast(relation);
|
||||
|
||||
/*
|
||||
* Forbid this during a parallel operation, lest it allocate a combo CID.
|
||||
* Other workers might need that combo CID for visibility checks, and we
|
||||
@ -3260,6 +3287,8 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
|
||||
Assert(HeapTupleHeaderGetNatts(newtup->t_data) <=
|
||||
RelationGetNumberOfAttributes(relation));
|
||||
|
||||
AssertHasSnapshotForToast(relation);
|
||||
|
||||
/*
|
||||
* Forbid this during a parallel operation, lest it allocate a combo CID.
|
||||
* Other workers might need that combo CID for visibility checks, and we
|
||||
@ -3304,7 +3333,7 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
|
||||
interesting_attrs = bms_add_members(interesting_attrs, id_attrs);
|
||||
|
||||
block = ItemPointerGetBlockNumber(otid);
|
||||
INJECTION_POINT("heap_update-before-pin");
|
||||
INJECTION_POINT("heap_update-before-pin", NULL);
|
||||
buffer = ReadBuffer(relation, block);
|
||||
page = BufferGetPage(buffer);
|
||||
|
||||
@ -4953,7 +4982,7 @@ l3:
|
||||
case LockWaitError:
|
||||
if (!ConditionalMultiXactIdWait((MultiXactId) xwait,
|
||||
status, infomask, relation,
|
||||
NULL, log_lock_failure))
|
||||
NULL, log_lock_failures))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
|
||||
errmsg("could not obtain lock on row in relation \"%s\"",
|
||||
@ -4991,7 +5020,7 @@ l3:
|
||||
}
|
||||
break;
|
||||
case LockWaitError:
|
||||
if (!ConditionalXactLockTableWait(xwait, log_lock_failure))
|
||||
if (!ConditionalXactLockTableWait(xwait, log_lock_failures))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
|
||||
errmsg("could not obtain lock on row in relation \"%s\"",
|
||||
@ -5256,7 +5285,7 @@ heap_acquire_tuplock(Relation relation, ItemPointer tid, LockTupleMode mode,
|
||||
break;
|
||||
|
||||
case LockWaitError:
|
||||
if (!ConditionalLockTupleTuplock(relation, tid, mode, log_lock_failure))
|
||||
if (!ConditionalLockTupleTuplock(relation, tid, mode, log_lock_failures))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
|
||||
errmsg("could not obtain lock on row in relation \"%s\"",
|
||||
|
@ -464,7 +464,7 @@ tuple_lock_retry:
|
||||
return TM_WouldBlock;
|
||||
break;
|
||||
case LockWaitError:
|
||||
if (!ConditionalXactLockTableWait(SnapshotDirty.xmax, log_lock_failure))
|
||||
if (!ConditionalXactLockTableWait(SnapshotDirty.xmax, log_lock_failures))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
|
||||
errmsg("could not obtain lock on row in relation \"%s\"",
|
||||
|
@ -757,7 +757,6 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
|
||||
vacrel->vm_new_visible_pages = 0;
|
||||
vacrel->vm_new_visible_frozen_pages = 0;
|
||||
vacrel->vm_new_frozen_pages = 0;
|
||||
vacrel->rel_pages = orig_rel_pages = RelationGetNumberOfBlocks(rel);
|
||||
|
||||
/*
|
||||
* Get cutoffs that determine which deleted tuples are considered DEAD,
|
||||
@ -776,7 +775,9 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
|
||||
* to increase the number of dead tuples it can prune away.)
|
||||
*/
|
||||
vacrel->aggressive = vacuum_get_cutoffs(rel, params, &vacrel->cutoffs);
|
||||
vacrel->rel_pages = orig_rel_pages = RelationGetNumberOfBlocks(rel);
|
||||
vacrel->vistest = GlobalVisTestFor(rel);
|
||||
|
||||
/* Initialize state used to track oldest extant XID/MXID */
|
||||
vacrel->NewRelfrozenXid = vacrel->cutoffs.OldestXmin;
|
||||
vacrel->NewRelminMxid = vacrel->cutoffs.OldestMxact;
|
||||
@ -1413,11 +1414,25 @@ lazy_scan_heap(LVRelState *vacrel)
|
||||
|
||||
if (vm_page_frozen)
|
||||
{
|
||||
Assert(vacrel->eager_scan_remaining_successes > 0);
|
||||
vacrel->eager_scan_remaining_successes--;
|
||||
if (vacrel->eager_scan_remaining_successes > 0)
|
||||
vacrel->eager_scan_remaining_successes--;
|
||||
|
||||
if (vacrel->eager_scan_remaining_successes == 0)
|
||||
{
|
||||
/*
|
||||
* Report only once that we disabled eager scanning. We
|
||||
* may eagerly read ahead blocks in excess of the success
|
||||
* or failure caps before attempting to freeze them, so we
|
||||
* could reach here even after disabling additional eager
|
||||
* scanning.
|
||||
*/
|
||||
if (vacrel->eager_scan_max_fails_per_region > 0)
|
||||
ereport(vacrel->verbose ? INFO : DEBUG2,
|
||||
(errmsg("disabling eager scanning after freezing %u eagerly scanned blocks of \"%s.%s.%s\"",
|
||||
orig_eager_scan_success_limit,
|
||||
vacrel->dbname, vacrel->relnamespace,
|
||||
vacrel->relname)));
|
||||
|
||||
/*
|
||||
* If we hit our success cap, permanently disable eager
|
||||
* scanning by setting the other eager scan management
|
||||
@ -1426,19 +1441,10 @@ lazy_scan_heap(LVRelState *vacrel)
|
||||
vacrel->eager_scan_remaining_fails = 0;
|
||||
vacrel->next_eager_scan_region_start = InvalidBlockNumber;
|
||||
vacrel->eager_scan_max_fails_per_region = 0;
|
||||
|
||||
ereport(vacrel->verbose ? INFO : DEBUG2,
|
||||
(errmsg("disabling eager scanning after freezing %u eagerly scanned blocks of \"%s.%s.%s\"",
|
||||
orig_eager_scan_success_limit,
|
||||
vacrel->dbname, vacrel->relnamespace,
|
||||
vacrel->relname)));
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
Assert(vacrel->eager_scan_remaining_fails > 0);
|
||||
else if (vacrel->eager_scan_remaining_fails > 0)
|
||||
vacrel->eager_scan_remaining_fails--;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -851,7 +851,7 @@ systable_inplace_update_begin(Relation relation,
|
||||
if (retries++ > 10000)
|
||||
elog(ERROR, "giving up after too many tries to overwrite row");
|
||||
|
||||
INJECTION_POINT("inplace-before-pin");
|
||||
INJECTION_POINT("inplace-before-pin", NULL);
|
||||
scan = systable_beginscan(relation, indexId, indexOK, snapshot,
|
||||
nkeys, unconstify(ScanKeyData *, key));
|
||||
oldtup = systable_getnext(scan);
|
||||
|
@ -228,6 +228,8 @@ btgettuple(IndexScanDesc scan, ScanDirection dir)
|
||||
BTScanOpaque so = (BTScanOpaque) scan->opaque;
|
||||
bool res;
|
||||
|
||||
Assert(scan->heapRelation != NULL);
|
||||
|
||||
/* btree indexes are never lossy */
|
||||
scan->xs_recheck = false;
|
||||
|
||||
@ -289,6 +291,8 @@ btgetbitmap(IndexScanDesc scan, TIDBitmap *tbm)
|
||||
int64 ntids = 0;
|
||||
ItemPointer heapTid;
|
||||
|
||||
Assert(scan->heapRelation == NULL);
|
||||
|
||||
/* Each loop iteration performs another primitive index scan */
|
||||
do
|
||||
{
|
||||
@ -393,6 +397,34 @@ btrescan(IndexScanDesc scan, ScanKey scankey, int nscankeys,
|
||||
BTScanPosInvalidate(so->currPos);
|
||||
}
|
||||
|
||||
/*
|
||||
* We prefer to eagerly drop leaf page pins before btgettuple returns.
|
||||
* This avoids making VACUUM wait to acquire a cleanup lock on the page.
|
||||
*
|
||||
* We cannot safely drop leaf page pins during index-only scans due to a
|
||||
* race condition involving VACUUM setting pages all-visible in the VM.
|
||||
* It's also unsafe for plain index scans that use a non-MVCC snapshot.
|
||||
*
|
||||
* When we drop pins eagerly, the mechanism that marks so->killedItems[]
|
||||
* index tuples LP_DEAD has to deal with concurrent TID recycling races.
|
||||
* The scheme used to detect unsafe TID recycling won't work when scanning
|
||||
* unlogged relations (since it involves saving an affected page's LSN).
|
||||
* Opt out of eager pin dropping during unlogged relation scans for now
|
||||
* (this is preferable to opting out of kill_prior_tuple LP_DEAD setting).
|
||||
*
|
||||
* Also opt out of dropping leaf page pins eagerly during bitmap scans.
|
||||
* Pins cannot be held for more than an instant during bitmap scans either
|
||||
* way, so we might as well avoid wasting cycles on acquiring page LSNs.
|
||||
*
|
||||
* See nbtree/README section on making concurrent TID recycling safe.
|
||||
*
|
||||
* Note: so->dropPin should never change across rescans.
|
||||
*/
|
||||
so->dropPin = (!scan->xs_want_itup &&
|
||||
IsMVCCSnapshot(scan->xs_snapshot) &&
|
||||
RelationNeedsWAL(scan->indexRelation) &&
|
||||
scan->heapRelation != NULL);
|
||||
|
||||
so->markItemIndex = -1;
|
||||
so->needPrimScan = false;
|
||||
so->scanBehind = false;
|
||||
|
@ -25,7 +25,7 @@
|
||||
#include "utils/rel.h"
|
||||
|
||||
|
||||
static void _bt_drop_lock_and_maybe_pin(IndexScanDesc scan, BTScanPos sp);
|
||||
static inline void _bt_drop_lock_and_maybe_pin(Relation rel, BTScanOpaque so);
|
||||
static Buffer _bt_moveright(Relation rel, Relation heaprel, BTScanInsert key,
|
||||
Buffer buf, bool forupdate, BTStack stack,
|
||||
int access);
|
||||
@ -57,24 +57,29 @@ static bool _bt_endpoint(IndexScanDesc scan, ScanDirection dir);
|
||||
/*
|
||||
* _bt_drop_lock_and_maybe_pin()
|
||||
*
|
||||
* Unlock the buffer; and if it is safe to release the pin, do that, too.
|
||||
* This will prevent vacuum from stalling in a blocked state trying to read a
|
||||
* page when a cursor is sitting on it.
|
||||
*
|
||||
* See nbtree/README section on making concurrent TID recycling safe.
|
||||
* Unlock so->currPos.buf. If scan is so->dropPin, drop the pin, too.
|
||||
* Dropping the pin prevents VACUUM from blocking on acquiring a cleanup lock.
|
||||
*/
|
||||
static void
|
||||
_bt_drop_lock_and_maybe_pin(IndexScanDesc scan, BTScanPos sp)
|
||||
static inline void
|
||||
_bt_drop_lock_and_maybe_pin(Relation rel, BTScanOpaque so)
|
||||
{
|
||||
_bt_unlockbuf(scan->indexRelation, sp->buf);
|
||||
|
||||
if (IsMVCCSnapshot(scan->xs_snapshot) &&
|
||||
RelationNeedsWAL(scan->indexRelation) &&
|
||||
!scan->xs_want_itup)
|
||||
if (!so->dropPin)
|
||||
{
|
||||
ReleaseBuffer(sp->buf);
|
||||
sp->buf = InvalidBuffer;
|
||||
/* Just drop the lock (not the pin) */
|
||||
_bt_unlockbuf(rel, so->currPos.buf);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Drop both the lock and the pin.
|
||||
*
|
||||
* Have to set so->currPos.lsn so that _bt_killitems has a way to detect
|
||||
* when concurrent heap TID recycling by VACUUM might have taken place.
|
||||
*/
|
||||
Assert(RelationNeedsWAL(rel));
|
||||
so->currPos.lsn = BufferGetLSNAtomic(so->currPos.buf);
|
||||
_bt_relbuf(rel, so->currPos.buf);
|
||||
so->currPos.buf = InvalidBuffer;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -866,8 +871,8 @@ _bt_compare(Relation rel,
|
||||
* if backwards scan, the last item) in the tree that satisfies the
|
||||
* qualifications in the scan key. On success exit, data about the
|
||||
* matching tuple(s) on the page has been loaded into so->currPos. We'll
|
||||
* drop all locks and hold onto a pin on page's buffer, except when
|
||||
* _bt_drop_lock_and_maybe_pin dropped the pin to avoid blocking VACUUM.
|
||||
* drop all locks and hold onto a pin on page's buffer, except during
|
||||
* so->dropPin scans, when we drop both the lock and the pin.
|
||||
* _bt_returnitem sets the next item to return to scan on success exit.
|
||||
*
|
||||
* If there are no matching items in the index, we return false, with no
|
||||
@ -1610,7 +1615,13 @@ _bt_readpage(IndexScanDesc scan, ScanDirection dir, OffsetNumber offnum,
|
||||
so->currPos.currPage = BufferGetBlockNumber(so->currPos.buf);
|
||||
so->currPos.prevPage = opaque->btpo_prev;
|
||||
so->currPos.nextPage = opaque->btpo_next;
|
||||
/* delay setting so->currPos.lsn until _bt_drop_lock_and_maybe_pin */
|
||||
so->currPos.dir = dir;
|
||||
so->currPos.nextTupleOffset = 0;
|
||||
|
||||
/* either moreRight or moreLeft should be set now (may be unset later) */
|
||||
Assert(ScanDirectionIsForward(dir) ? so->currPos.moreRight :
|
||||
so->currPos.moreLeft);
|
||||
Assert(!P_IGNORE(opaque));
|
||||
Assert(BTScanPosIsPinned(so->currPos));
|
||||
Assert(!so->needPrimScan);
|
||||
@ -1626,14 +1637,6 @@ _bt_readpage(IndexScanDesc scan, ScanDirection dir, OffsetNumber offnum,
|
||||
so->currPos.currPage);
|
||||
}
|
||||
|
||||
/* initialize remaining currPos fields related to current page */
|
||||
so->currPos.lsn = BufferGetLSNAtomic(so->currPos.buf);
|
||||
so->currPos.dir = dir;
|
||||
so->currPos.nextTupleOffset = 0;
|
||||
/* either moreLeft or moreRight should be set now (may be unset later) */
|
||||
Assert(ScanDirectionIsForward(dir) ? so->currPos.moreRight :
|
||||
so->currPos.moreLeft);
|
||||
|
||||
PredicateLockPage(rel, so->currPos.currPage, scan->xs_snapshot);
|
||||
|
||||
/* initialize local variables */
|
||||
@ -1790,9 +1793,13 @@ _bt_readpage(IndexScanDesc scan, ScanDirection dir, OffsetNumber offnum,
|
||||
IndexTuple itup = (IndexTuple) PageGetItem(page, iid);
|
||||
int truncatt;
|
||||
|
||||
truncatt = BTreeTupleGetNAtts(itup, rel);
|
||||
/* Reset arrays, per _bt_set_startikey contract */
|
||||
if (pstate.forcenonrequired)
|
||||
_bt_start_array_keys(scan, dir);
|
||||
pstate.forcenonrequired = false;
|
||||
pstate.startikey = 0; /* _bt_set_startikey ignores P_HIKEY */
|
||||
|
||||
truncatt = BTreeTupleGetNAtts(itup, rel);
|
||||
_bt_checkkeys(scan, &pstate, arrayKeys, itup, truncatt);
|
||||
}
|
||||
|
||||
@ -1879,8 +1886,10 @@ _bt_readpage(IndexScanDesc scan, ScanDirection dir, OffsetNumber offnum,
|
||||
pstate.offnum = offnum;
|
||||
if (arrayKeys && offnum == minoff && pstate.forcenonrequired)
|
||||
{
|
||||
/* Reset arrays, per _bt_set_startikey contract */
|
||||
pstate.forcenonrequired = false;
|
||||
pstate.startikey = 0;
|
||||
_bt_start_array_keys(scan, dir);
|
||||
}
|
||||
passes_quals = _bt_checkkeys(scan, &pstate, arrayKeys,
|
||||
itup, indnatts);
|
||||
@ -2101,10 +2110,9 @@ _bt_returnitem(IndexScanDesc scan, BTScanOpaque so)
|
||||
*
|
||||
* Wrapper on _bt_readnextpage that performs final steps for the current page.
|
||||
*
|
||||
* On entry, if so->currPos.buf is valid the buffer is pinned but not locked.
|
||||
* If there's no pin held, it's because _bt_drop_lock_and_maybe_pin dropped
|
||||
* the pin eagerly earlier on. The scan must have so->currPos.currPage set to
|
||||
* a valid block, in any case.
|
||||
* On entry, so->currPos must be valid. Its buffer will be pinned, though
|
||||
* never locked. (Actually, when so->dropPin there won't even be a pin held,
|
||||
* though so->currPos.currPage must still be set to a valid block number.)
|
||||
*/
|
||||
static bool
|
||||
_bt_steppage(IndexScanDesc scan, ScanDirection dir)
|
||||
@ -2245,12 +2253,14 @@ _bt_readfirstpage(IndexScanDesc scan, OffsetNumber offnum, ScanDirection dir)
|
||||
*/
|
||||
if (_bt_readpage(scan, dir, offnum, true))
|
||||
{
|
||||
Relation rel = scan->indexRelation;
|
||||
|
||||
/*
|
||||
* _bt_readpage succeeded. Drop the lock (and maybe the pin) on
|
||||
* so->currPos.buf in preparation for btgettuple returning tuples.
|
||||
*/
|
||||
Assert(BTScanPosIsPinned(so->currPos));
|
||||
_bt_drop_lock_and_maybe_pin(scan, &so->currPos);
|
||||
_bt_drop_lock_and_maybe_pin(rel, so);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -2288,8 +2298,8 @@ _bt_readfirstpage(IndexScanDesc scan, OffsetNumber offnum, ScanDirection dir)
|
||||
*
|
||||
* On success exit, so->currPos is updated to contain data from the next
|
||||
* interesting page, and we return true. We hold a pin on the buffer on
|
||||
* success exit, except when _bt_drop_lock_and_maybe_pin decided it was safe
|
||||
* to eagerly drop the pin (to avoid blocking VACUUM).
|
||||
* success exit (except during so->dropPin index scans, when we drop the pin
|
||||
* eagerly to avoid blocking VACUUM).
|
||||
*
|
||||
* If there are no more matching records in the given direction, we drop all
|
||||
* locks and pins, invalidate so->currPos, and return false.
|
||||
@ -2407,7 +2417,7 @@ _bt_readnextpage(IndexScanDesc scan, BlockNumber blkno,
|
||||
*/
|
||||
Assert(so->currPos.currPage == blkno);
|
||||
Assert(BTScanPosIsPinned(so->currPos));
|
||||
_bt_drop_lock_and_maybe_pin(scan, &so->currPos);
|
||||
_bt_drop_lock_and_maybe_pin(rel, so);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -105,7 +105,7 @@ typedef struct BTShared
|
||||
int scantuplesortstates;
|
||||
|
||||
/* Query ID, for report in worker processes */
|
||||
uint64 queryid;
|
||||
int64 queryid;
|
||||
|
||||
/*
|
||||
* workersdonecv is used to monitor the progress of workers. All parallel
|
||||
|
@ -2393,11 +2393,27 @@ _bt_scanbehind_checkkeys(IndexScanDesc scan, ScanDirection dir,
|
||||
TupleDesc tupdesc = RelationGetDescr(rel);
|
||||
BTScanOpaque so = (BTScanOpaque) scan->opaque;
|
||||
int nfinaltupatts = BTreeTupleGetNAtts(finaltup, rel);
|
||||
bool scanBehind;
|
||||
|
||||
Assert(so->numArrayKeys);
|
||||
|
||||
if (_bt_tuple_before_array_skeys(scan, dir, finaltup, tupdesc,
|
||||
nfinaltupatts, false, 0, NULL))
|
||||
nfinaltupatts, false, 0, &scanBehind))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* If scanBehind was set, all of the untruncated attribute values from
|
||||
* finaltup that correspond to an array match the array's current element,
|
||||
* but there are other keys associated with truncated suffix attributes.
|
||||
* Array advancement must have incremented the scan's arrays on the
|
||||
* previous page, resulting in a set of array keys that happen to be an
|
||||
* exact match for the current page high key's untruncated prefix values.
|
||||
*
|
||||
* This page definitely doesn't contain tuples that the scan will need to
|
||||
* return. The next page may or may not contain relevant tuples. Handle
|
||||
* this by cutting our losses and starting a new primscan.
|
||||
*/
|
||||
if (scanBehind)
|
||||
return false;
|
||||
|
||||
if (!so->oppositeDirCheck)
|
||||
@ -2473,13 +2489,14 @@ _bt_oppodir_checkkeys(IndexScanDesc scan, ScanDirection dir,
|
||||
* primscan's first page would mislead _bt_advance_array_keys, which expects
|
||||
* pstate.nskipadvances to be representative of every first page's key space.)
|
||||
*
|
||||
* Caller must reset startikey and forcenonrequired ahead of the _bt_checkkeys
|
||||
* call for pstate.finaltup iff we set forcenonrequired=true. This will give
|
||||
* _bt_checkkeys the opportunity to call _bt_advance_array_keys once more,
|
||||
* with sktrig_required=true, to advance the arrays that were ignored during
|
||||
* checks of all of the page's prior tuples. Caller doesn't need to do this
|
||||
* on the rightmost/leftmost page in the index (where pstate.finaltup isn't
|
||||
* set), since forcenonrequired won't be set here by us in the first place.
|
||||
* Caller must call _bt_start_array_keys and reset startikey/forcenonrequired
|
||||
* ahead of the finaltup _bt_checkkeys call when we set forcenonrequired=true.
|
||||
* This will give _bt_checkkeys the opportunity to call _bt_advance_array_keys
|
||||
* with sktrig_required=true, restoring the invariant that the scan's required
|
||||
* arrays always track the scan's progress through the index's key space.
|
||||
* Caller won't need to do this on the rightmost/leftmost page in the index
|
||||
* (where pstate.finaltup isn't ever set), since forcenonrequired will never
|
||||
* be set here in the first place.
|
||||
*/
|
||||
void
|
||||
_bt_set_startikey(IndexScanDesc scan, BTReadPageState *pstate)
|
||||
@ -2540,10 +2557,31 @@ _bt_set_startikey(IndexScanDesc scan, BTReadPageState *pstate)
|
||||
if (key->sk_flags & SK_ROW_HEADER)
|
||||
{
|
||||
/*
|
||||
* Can't let pstate.startikey get set to an ikey beyond a
|
||||
* RowCompare inequality
|
||||
* RowCompare inequality.
|
||||
*
|
||||
* Only the first subkey from a RowCompare can ever be marked
|
||||
* required (that happens when the row header is marked required).
|
||||
* There is no simple, general way for us to transitively deduce
|
||||
* whether or not every tuple on the page satisfies a RowCompare
|
||||
* key based only on firsttup and lasttup -- so we just give up.
|
||||
*/
|
||||
break; /* unsafe */
|
||||
if (!start_past_saop_eq && !so->skipScan)
|
||||
break; /* unsafe to go further */
|
||||
|
||||
/*
|
||||
* We have to be even more careful with RowCompares that come
|
||||
* after an array: we assume it's unsafe to even bypass the array.
|
||||
* Calling _bt_start_array_keys to recover the scan's arrays
|
||||
* following use of forcenonrequired mode isn't compatible with
|
||||
* _bt_check_rowcompare's continuescan=false behavior with NULL
|
||||
* row compare members. _bt_advance_array_keys must not make a
|
||||
* decision on the basis of a key not being satisfied in the
|
||||
* opposite-to-scan direction until the scan reaches a leaf page
|
||||
* where the same key begins to be satisfied in scan direction.
|
||||
* The _bt_first !used_all_subkeys behavior makes this limitation
|
||||
* hard to work around some other way.
|
||||
*/
|
||||
return; /* completely unsafe to set pstate.startikey */
|
||||
}
|
||||
if (key->sk_strategy != BTEqualStrategyNumber)
|
||||
{
|
||||
@ -3292,87 +3330,85 @@ _bt_checkkeys_look_ahead(IndexScanDesc scan, BTReadPageState *pstate,
|
||||
* current page and killed tuples thereon (generally, this should only be
|
||||
* called if so->numKilled > 0).
|
||||
*
|
||||
* The caller does not have a lock on the page and may or may not have the
|
||||
* page pinned in a buffer. Note that read-lock is sufficient for setting
|
||||
* LP_DEAD status (which is only a hint).
|
||||
* Caller should not have a lock on the so->currPos page, but must hold a
|
||||
* buffer pin when !so->dropPin. When we return, it still won't be locked.
|
||||
* It'll continue to hold whatever pins were held before calling here.
|
||||
*
|
||||
* We match items by heap TID before assuming they are the right ones to
|
||||
* delete. We cope with cases where items have moved right due to insertions.
|
||||
* If an item has moved off the current page due to a split, we'll fail to
|
||||
* find it and do nothing (this is not an error case --- we assume the item
|
||||
* will eventually get marked in a future indexscan).
|
||||
* We match items by heap TID before assuming they are the right ones to set
|
||||
* LP_DEAD. If the scan is one that holds a buffer pin on the target page
|
||||
* continuously from initially reading the items until applying this function
|
||||
* (if it is a !so->dropPin scan), VACUUM cannot have deleted any items on the
|
||||
* page, so the page's TIDs can't have been recycled by now. There's no risk
|
||||
* that we'll confuse a new index tuple that happens to use a recycled TID
|
||||
* with a now-removed tuple with the same TID (that used to be on this same
|
||||
* page). We can't rely on that during scans that drop buffer pins eagerly
|
||||
* (so->dropPin scans), though, so we must condition setting LP_DEAD bits on
|
||||
* the page LSN having not changed since back when _bt_readpage saw the page.
|
||||
* We totally give up on setting LP_DEAD bits when the page LSN changed.
|
||||
*
|
||||
* Note that if we hold a pin on the target page continuously from initially
|
||||
* reading the items until applying this function, VACUUM cannot have deleted
|
||||
* any items from the page, and so there is no need to search left from the
|
||||
* recorded offset. (This observation also guarantees that the item is still
|
||||
* the right one to delete, which might otherwise be questionable since heap
|
||||
* TIDs can get recycled.) This holds true even if the page has been modified
|
||||
* by inserts and page splits, so there is no need to consult the LSN.
|
||||
*
|
||||
* If the pin was released after reading the page, then we re-read it. If it
|
||||
* has been modified since we read it (as determined by the LSN), we dare not
|
||||
* flag any entries because it is possible that the old entry was vacuumed
|
||||
* away and the TID was re-used by a completely different heap tuple.
|
||||
* We give up much less often during !so->dropPin scans, but it still happens.
|
||||
* We cope with cases where items have moved right due to insertions. If an
|
||||
* item has moved off the current page due to a split, we'll fail to find it
|
||||
* and just give up on it.
|
||||
*/
|
||||
void
|
||||
_bt_killitems(IndexScanDesc scan)
|
||||
{
|
||||
Relation rel = scan->indexRelation;
|
||||
BTScanOpaque so = (BTScanOpaque) scan->opaque;
|
||||
Page page;
|
||||
BTPageOpaque opaque;
|
||||
OffsetNumber minoff;
|
||||
OffsetNumber maxoff;
|
||||
int i;
|
||||
int numKilled = so->numKilled;
|
||||
bool killedsomething = false;
|
||||
bool droppedpin PG_USED_FOR_ASSERTS_ONLY;
|
||||
Buffer buf;
|
||||
|
||||
Assert(numKilled > 0);
|
||||
Assert(BTScanPosIsValid(so->currPos));
|
||||
Assert(scan->heapRelation != NULL); /* can't be a bitmap index scan */
|
||||
|
||||
/*
|
||||
* Always reset the scan state, so we don't look for same items on other
|
||||
* pages.
|
||||
*/
|
||||
/* Always invalidate so->killedItems[] before leaving so->currPos */
|
||||
so->numKilled = 0;
|
||||
|
||||
if (BTScanPosIsPinned(so->currPos))
|
||||
if (!so->dropPin)
|
||||
{
|
||||
/*
|
||||
* We have held the pin on this page since we read the index tuples,
|
||||
* so all we need to do is lock it. The pin will have prevented
|
||||
* re-use of any TID on the page, so there is no need to check the
|
||||
* LSN.
|
||||
* concurrent VACUUMs from recycling any of the TIDs on the page.
|
||||
*/
|
||||
droppedpin = false;
|
||||
_bt_lockbuf(scan->indexRelation, so->currPos.buf, BT_READ);
|
||||
|
||||
page = BufferGetPage(so->currPos.buf);
|
||||
Assert(BTScanPosIsPinned(so->currPos));
|
||||
buf = so->currPos.buf;
|
||||
_bt_lockbuf(rel, buf, BT_READ);
|
||||
}
|
||||
else
|
||||
{
|
||||
Buffer buf;
|
||||
XLogRecPtr latestlsn;
|
||||
|
||||
droppedpin = true;
|
||||
/* Attempt to re-read the buffer, getting pin and lock. */
|
||||
buf = _bt_getbuf(scan->indexRelation, so->currPos.currPage, BT_READ);
|
||||
Assert(!BTScanPosIsPinned(so->currPos));
|
||||
Assert(RelationNeedsWAL(rel));
|
||||
buf = _bt_getbuf(rel, so->currPos.currPage, BT_READ);
|
||||
|
||||
page = BufferGetPage(buf);
|
||||
if (BufferGetLSNAtomic(buf) == so->currPos.lsn)
|
||||
so->currPos.buf = buf;
|
||||
else
|
||||
latestlsn = BufferGetLSNAtomic(buf);
|
||||
Assert(!XLogRecPtrIsInvalid(so->currPos.lsn));
|
||||
Assert(so->currPos.lsn <= latestlsn);
|
||||
if (so->currPos.lsn != latestlsn)
|
||||
{
|
||||
/* Modified while not pinned means hinting is not safe. */
|
||||
_bt_relbuf(scan->indexRelation, buf);
|
||||
/* Modified, give up on hinting */
|
||||
_bt_relbuf(rel, buf);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Unmodified, hinting is safe */
|
||||
}
|
||||
|
||||
page = BufferGetPage(buf);
|
||||
opaque = BTPageGetOpaque(page);
|
||||
minoff = P_FIRSTDATAKEY(opaque);
|
||||
maxoff = PageGetMaxOffsetNumber(page);
|
||||
|
||||
for (i = 0; i < numKilled; i++)
|
||||
for (int i = 0; i < numKilled; i++)
|
||||
{
|
||||
int itemIndex = so->killedItems[i];
|
||||
BTScanPosItem *kitem = &so->currPos.items[itemIndex];
|
||||
@ -3404,7 +3440,7 @@ _bt_killitems(IndexScanDesc scan)
|
||||
* correctness.
|
||||
*
|
||||
* Note that the page may have been modified in almost any way
|
||||
* since we first read it (in the !droppedpin case), so it's
|
||||
* since we first read it (in the !so->dropPin case), so it's
|
||||
* possible that this posting list tuple wasn't a posting list
|
||||
* tuple when we first encountered its heap TIDs.
|
||||
*/
|
||||
@ -3420,7 +3456,7 @@ _bt_killitems(IndexScanDesc scan)
|
||||
* though only in the common case where the page can't
|
||||
* have been concurrently modified
|
||||
*/
|
||||
Assert(kitem->indexOffset == offnum || !droppedpin);
|
||||
Assert(kitem->indexOffset == offnum || !so->dropPin);
|
||||
|
||||
/*
|
||||
* Read-ahead to later kitems here.
|
||||
@ -3484,10 +3520,13 @@ _bt_killitems(IndexScanDesc scan)
|
||||
if (killedsomething)
|
||||
{
|
||||
opaque->btpo_flags |= BTP_HAS_GARBAGE;
|
||||
MarkBufferDirtyHint(so->currPos.buf, true);
|
||||
MarkBufferDirtyHint(buf, true);
|
||||
}
|
||||
|
||||
_bt_unlockbuf(scan->indexRelation, so->currPos.buf);
|
||||
if (!so->dropPin)
|
||||
_bt_unlockbuf(rel, buf);
|
||||
else
|
||||
_bt_relbuf(rel, buf);
|
||||
}
|
||||
|
||||
|
||||
|
@ -252,6 +252,8 @@ ParsePrepareRecord(uint8 info, xl_xact_prepare *xlrec, xl_xact_parsed_prepare *p
|
||||
parsed->nsubxacts = xlrec->nsubxacts;
|
||||
parsed->nrels = xlrec->ncommitrels;
|
||||
parsed->nabortrels = xlrec->nabortrels;
|
||||
parsed->nstats = xlrec->ncommitstats;
|
||||
parsed->nabortstats = xlrec->nabortstats;
|
||||
parsed->nmsgs = xlrec->ninvalmsgs;
|
||||
|
||||
strncpy(parsed->twophase_gid, bufptr, xlrec->gidlen);
|
||||
|
@ -872,7 +872,7 @@ MultiXactIdCreateFromMembers(int nmembers, MultiXactMember *members)
|
||||
*/
|
||||
multi = GetNewMultiXactId(nmembers, &offset);
|
||||
|
||||
INJECTION_POINT_CACHED("multixact-create-from-members");
|
||||
INJECTION_POINT_CACHED("multixact-create-from-members", NULL);
|
||||
|
||||
/* Make an XLOG entry describing the new MXID. */
|
||||
xlrec.mid = multi;
|
||||
@ -1486,7 +1486,7 @@ retry:
|
||||
LWLockRelease(lock);
|
||||
CHECK_FOR_INTERRUPTS();
|
||||
|
||||
INJECTION_POINT("multixact-get-members-cv-sleep");
|
||||
INJECTION_POINT("multixact-get-members-cv-sleep", NULL);
|
||||
|
||||
ConditionVariableSleep(&MultiXactState->nextoff_cv,
|
||||
WAIT_EVENT_MULTIXACT_CREATION);
|
||||
|
@ -7882,7 +7882,7 @@ CreateRestartPoint(int flags)
|
||||
* This location needs to be after CheckPointGuts() to ensure that some
|
||||
* work has already happened during this checkpoint.
|
||||
*/
|
||||
INJECTION_POINT("create-restart-point");
|
||||
INJECTION_POINT("create-restart-point", NULL);
|
||||
|
||||
/*
|
||||
* Remember the prior checkpoint's redo ptr for
|
||||
|
@ -346,8 +346,6 @@ IsSharedRelation(Oid relationId)
|
||||
relationId == PgDbRoleSettingToastIndex ||
|
||||
relationId == PgParameterAclToastTable ||
|
||||
relationId == PgParameterAclToastIndex ||
|
||||
relationId == PgReplicationOriginToastTable ||
|
||||
relationId == PgReplicationOriginToastIndex ||
|
||||
relationId == PgShdescriptionToastTable ||
|
||||
relationId == PgShdescriptionToastIndex ||
|
||||
relationId == PgShseclabelToastTable ||
|
||||
|
@ -674,11 +674,6 @@ GRANT SELECT ON pg_backend_memory_contexts TO pg_read_all_stats;
|
||||
REVOKE EXECUTE ON FUNCTION pg_get_backend_memory_contexts() FROM PUBLIC;
|
||||
GRANT EXECUTE ON FUNCTION pg_get_backend_memory_contexts() TO pg_read_all_stats;
|
||||
|
||||
REVOKE EXECUTE ON FUNCTION
|
||||
pg_get_process_memory_contexts(integer, boolean, float) FROM PUBLIC;
|
||||
GRANT EXECUTE ON FUNCTION
|
||||
pg_get_process_memory_contexts(integer, boolean, float) TO pg_read_all_stats;
|
||||
|
||||
-- Statistics views
|
||||
|
||||
CREATE VIEW pg_stat_all_tables AS
|
||||
|
@ -835,7 +835,7 @@ BeginCopyTo(ParseState *pstate,
|
||||
((DR_copy *) dest)->cstate = cstate;
|
||||
|
||||
/* Create a QueryDesc requesting no output */
|
||||
cstate->queryDesc = CreateQueryDesc(plan, NULL, pstate->p_sourcetext,
|
||||
cstate->queryDesc = CreateQueryDesc(plan, pstate->p_sourcetext,
|
||||
GetActiveSnapshot(),
|
||||
InvalidSnapshot,
|
||||
dest, NULL, NULL, 0);
|
||||
@ -845,8 +845,7 @@ BeginCopyTo(ParseState *pstate,
|
||||
*
|
||||
* ExecutorStart computes a result tupdesc for us
|
||||
*/
|
||||
if (!ExecutorStart(cstate->queryDesc, 0))
|
||||
elog(ERROR, "ExecutorStart() failed unexpectedly");
|
||||
ExecutorStart(cstate->queryDesc, 0);
|
||||
|
||||
tupDesc = cstate->queryDesc->tupDesc;
|
||||
}
|
||||
|
@ -334,13 +334,12 @@ ExecCreateTableAs(ParseState *pstate, CreateTableAsStmt *stmt,
|
||||
UpdateActiveSnapshotCommandId();
|
||||
|
||||
/* Create a QueryDesc, redirecting output to our tuple receiver */
|
||||
queryDesc = CreateQueryDesc(plan, NULL, pstate->p_sourcetext,
|
||||
queryDesc = CreateQueryDesc(plan, pstate->p_sourcetext,
|
||||
GetActiveSnapshot(), InvalidSnapshot,
|
||||
dest, params, queryEnv, 0);
|
||||
|
||||
/* call ExecutorStart to prepare the plan for execution */
|
||||
if (!ExecutorStart(queryDesc, GetIntoRelEFlags(into)))
|
||||
elog(ERROR, "ExecutorStart() failed unexpectedly");
|
||||
ExecutorStart(queryDesc, GetIntoRelEFlags(into));
|
||||
|
||||
/* run the plan to completion */
|
||||
ExecutorRun(queryDesc, ForwardScanDirection, 0);
|
||||
|
@ -1065,16 +1065,41 @@ createdb(ParseState *pstate, const CreatedbStmt *stmt)
|
||||
|
||||
/* Check that the chosen locales are valid, and get canonical spellings */
|
||||
if (!check_locale(LC_COLLATE, dbcollate, &canonname))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
|
||||
errmsg("invalid LC_COLLATE locale name: \"%s\"", dbcollate),
|
||||
errhint("If the locale name is specific to ICU, use ICU_LOCALE.")));
|
||||
{
|
||||
if (dblocprovider == COLLPROVIDER_BUILTIN)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
|
||||
errmsg("invalid LC_COLLATE locale name: \"%s\"", dbcollate),
|
||||
errhint("If the locale name is specific to the builtin provider, use BUILTIN_LOCALE.")));
|
||||
else if (dblocprovider == COLLPROVIDER_ICU)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
|
||||
errmsg("invalid LC_COLLATE locale name: \"%s\"", dbcollate),
|
||||
errhint("If the locale name is specific to the ICU provider, use ICU_LOCALE.")));
|
||||
else
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
|
||||
errmsg("invalid LC_COLLATE locale name: \"%s\"", dbcollate)));
|
||||
}
|
||||
dbcollate = canonname;
|
||||
if (!check_locale(LC_CTYPE, dbctype, &canonname))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
|
||||
errmsg("invalid LC_CTYPE locale name: \"%s\"", dbctype),
|
||||
errhint("If the locale name is specific to ICU, use ICU_LOCALE.")));
|
||||
{
|
||||
if (dblocprovider == COLLPROVIDER_BUILTIN)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
|
||||
errmsg("invalid LC_CTYPE locale name: \"%s\"", dbctype),
|
||||
errhint("If the locale name is specific to the builtin provider, use BUILTIN_LOCALE.")));
|
||||
else if (dblocprovider == COLLPROVIDER_ICU)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
|
||||
errmsg("invalid LC_CTYPE locale name: \"%s\"", dbctype),
|
||||
errhint("If the locale name is specific to the ICU provider, use ICU_LOCALE.")));
|
||||
else
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
|
||||
errmsg("invalid LC_CTYPE locale name: \"%s\"", dbctype)));
|
||||
}
|
||||
|
||||
dbctype = canonname;
|
||||
|
||||
check_encoding_locale_matches(encoding, dbcollate, dbctype);
|
||||
|
@ -369,8 +369,7 @@ standard_ExplainOneQuery(Query *query, int cursorOptions,
|
||||
}
|
||||
|
||||
/* run it (if needed) and produce output */
|
||||
ExplainOnePlan(plan, NULL, NULL, -1, into, es, queryString, params,
|
||||
queryEnv,
|
||||
ExplainOnePlan(plan, into, es, queryString, params, queryEnv,
|
||||
&planduration, (es->buffers ? &bufusage : NULL),
|
||||
es->memory ? &mem_counters : NULL);
|
||||
}
|
||||
@ -492,9 +491,7 @@ ExplainOneUtility(Node *utilityStmt, IntoClause *into, ExplainState *es,
|
||||
* to call it.
|
||||
*/
|
||||
void
|
||||
ExplainOnePlan(PlannedStmt *plannedstmt, CachedPlan *cplan,
|
||||
CachedPlanSource *plansource, int query_index,
|
||||
IntoClause *into, ExplainState *es,
|
||||
ExplainOnePlan(PlannedStmt *plannedstmt, IntoClause *into, ExplainState *es,
|
||||
const char *queryString, ParamListInfo params,
|
||||
QueryEnvironment *queryEnv, const instr_time *planduration,
|
||||
const BufferUsage *bufusage,
|
||||
@ -550,7 +547,7 @@ ExplainOnePlan(PlannedStmt *plannedstmt, CachedPlan *cplan,
|
||||
dest = None_Receiver;
|
||||
|
||||
/* Create a QueryDesc for the query */
|
||||
queryDesc = CreateQueryDesc(plannedstmt, cplan, queryString,
|
||||
queryDesc = CreateQueryDesc(plannedstmt, queryString,
|
||||
GetActiveSnapshot(), InvalidSnapshot,
|
||||
dest, params, queryEnv, instrument_option);
|
||||
|
||||
@ -564,17 +561,8 @@ ExplainOnePlan(PlannedStmt *plannedstmt, CachedPlan *cplan,
|
||||
if (into)
|
||||
eflags |= GetIntoRelEFlags(into);
|
||||
|
||||
/* Prepare the plan for execution. */
|
||||
if (queryDesc->cplan)
|
||||
{
|
||||
ExecutorStartCachedPlan(queryDesc, eflags, plansource, query_index);
|
||||
Assert(queryDesc->planstate);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (!ExecutorStart(queryDesc, eflags))
|
||||
elog(ERROR, "ExecutorStart() failed unexpectedly");
|
||||
}
|
||||
/* call ExecutorStart to prepare the plan for execution */
|
||||
ExecutorStart(queryDesc, eflags);
|
||||
|
||||
/* Execute the plan for statistics if asked for */
|
||||
if (es->analyze)
|
||||
@ -823,14 +811,10 @@ ExplainPrintPlan(ExplainState *es, QueryDesc *queryDesc)
|
||||
* the queryid in any of the EXPLAIN plans to keep stable the results
|
||||
* generated by regression test suites.
|
||||
*/
|
||||
if (es->verbose && queryDesc->plannedstmt->queryId != UINT64CONST(0) &&
|
||||
if (es->verbose && queryDesc->plannedstmt->queryId != INT64CONST(0) &&
|
||||
compute_query_id != COMPUTE_QUERY_ID_REGRESS)
|
||||
{
|
||||
/*
|
||||
* Output the queryid as an int64 rather than a uint64 so we match
|
||||
* what would be seen in the BIGINT pg_stat_statements.queryid column.
|
||||
*/
|
||||
ExplainPropertyInteger("Query Identifier", NULL, (int64)
|
||||
ExplainPropertyInteger("Query Identifier", NULL,
|
||||
queryDesc->plannedstmt->queryId, es);
|
||||
}
|
||||
}
|
||||
@ -1232,6 +1216,10 @@ ExplainPreScanNode(PlanState *planstate, Bitmapset **rels_used)
|
||||
if (((ModifyTable *) plan)->exclRelRTI)
|
||||
*rels_used = bms_add_member(*rels_used,
|
||||
((ModifyTable *) plan)->exclRelRTI);
|
||||
/* Ensure Vars used in RETURNING will have refnames */
|
||||
if (plan->targetlist)
|
||||
*rels_used = bms_add_member(*rels_used,
|
||||
linitial_int(((ModifyTable *) plan)->resultRelations));
|
||||
break;
|
||||
case T_Append:
|
||||
*rels_used = bms_add_members(*rels_used,
|
||||
|
@ -993,13 +993,11 @@ execute_sql_string(const char *sql, const char *filename)
|
||||
QueryDesc *qdesc;
|
||||
|
||||
qdesc = CreateQueryDesc(stmt,
|
||||
NULL,
|
||||
sql,
|
||||
GetActiveSnapshot(), NULL,
|
||||
dest, NULL, NULL, 0);
|
||||
|
||||
if (!ExecutorStart(qdesc, 0))
|
||||
elog(ERROR, "ExecutorStart() failed unexpectedly");
|
||||
ExecutorStart(qdesc, 0);
|
||||
ExecutorRun(qdesc, ForwardScanDirection, 0);
|
||||
ExecutorFinish(qdesc);
|
||||
ExecutorEnd(qdesc);
|
||||
|
@ -71,15 +71,26 @@ optionListToArray(List *options)
|
||||
foreach(cell, options)
|
||||
{
|
||||
DefElem *def = lfirst(cell);
|
||||
const char *name;
|
||||
const char *value;
|
||||
Size len;
|
||||
text *t;
|
||||
|
||||
name = def->defname;
|
||||
value = defGetString(def);
|
||||
len = VARHDRSZ + strlen(def->defname) + 1 + strlen(value);
|
||||
|
||||
/* Insist that name not contain "=", else "a=b=c" is ambiguous */
|
||||
if (strchr(name, '=') != NULL)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||
errmsg("invalid option name \"%s\": must not contain \"=\"",
|
||||
name)));
|
||||
|
||||
len = VARHDRSZ + strlen(name) + 1 + strlen(value);
|
||||
/* +1 leaves room for sprintf's trailing null */
|
||||
t = palloc(len + 1);
|
||||
SET_VARSIZE(t, len);
|
||||
sprintf(VARDATA(t), "%s=%s", def->defname, value);
|
||||
sprintf(VARDATA(t), "%s=%s", name, value);
|
||||
|
||||
astate = accumArrayResult(astate, PointerGetDatum(t),
|
||||
false, TEXTOID,
|
||||
|
@ -3892,9 +3892,9 @@ ReindexRelationConcurrently(const ReindexStmt *stmt, Oid relationOid, const Rein
|
||||
|
||||
#ifdef USE_INJECTION_POINTS
|
||||
if (idx->safe)
|
||||
INJECTION_POINT("reindex-conc-index-safe");
|
||||
INJECTION_POINT("reindex-conc-index-safe", NULL);
|
||||
else
|
||||
INJECTION_POINT("reindex-conc-index-not-safe");
|
||||
INJECTION_POINT("reindex-conc-index-not-safe", NULL);
|
||||
#endif
|
||||
|
||||
idx->tableId = RelationGetRelid(heapRel);
|
||||
@ -4226,7 +4226,7 @@ ReindexRelationConcurrently(const ReindexStmt *stmt, Oid relationOid, const Rein
|
||||
false);
|
||||
|
||||
/*
|
||||
* Updating pg_index might involve TOAST table access, so ensure we
|
||||
* Swapping the indexes might involve TOAST table access, so ensure we
|
||||
* have a valid snapshot.
|
||||
*/
|
||||
PushActiveSnapshot(GetTransactionSnapshot());
|
||||
|
@ -438,13 +438,12 @@ refresh_matview_datafill(DestReceiver *dest, Query *query,
|
||||
UpdateActiveSnapshotCommandId();
|
||||
|
||||
/* Create a QueryDesc, redirecting output to our tuple receiver */
|
||||
queryDesc = CreateQueryDesc(plan, NULL, queryString,
|
||||
queryDesc = CreateQueryDesc(plan, queryString,
|
||||
GetActiveSnapshot(), InvalidSnapshot,
|
||||
dest, NULL, NULL, 0);
|
||||
|
||||
/* call ExecutorStart to prepare the plan for execution */
|
||||
if (!ExecutorStart(queryDesc, 0))
|
||||
elog(ERROR, "ExecutorStart() failed unexpectedly");
|
||||
ExecutorStart(queryDesc, 0);
|
||||
|
||||
/* run the plan */
|
||||
ExecutorRun(queryDesc, ForwardScanDirection, 0);
|
||||
|
@ -117,7 +117,6 @@ PerformCursorOpen(ParseState *pstate, DeclareCursorStmt *cstmt, ParamListInfo pa
|
||||
queryString,
|
||||
CMDTAG_SELECT, /* cursor's query is always a SELECT */
|
||||
list_make1(plan),
|
||||
NULL,
|
||||
NULL);
|
||||
|
||||
/*----------
|
||||
|
@ -205,8 +205,7 @@ ExecuteQuery(ParseState *pstate,
|
||||
query_string,
|
||||
entry->plansource->commandTag,
|
||||
plan_list,
|
||||
cplan,
|
||||
entry->plansource);
|
||||
cplan);
|
||||
|
||||
/*
|
||||
* For CREATE TABLE ... AS EXECUTE, we must verify that the prepared
|
||||
@ -586,7 +585,6 @@ ExplainExecuteQuery(ExecuteStmt *execstmt, IntoClause *into, ExplainState *es,
|
||||
MemoryContextCounters mem_counters;
|
||||
MemoryContext planner_ctx = NULL;
|
||||
MemoryContext saved_ctx = NULL;
|
||||
int query_index = 0;
|
||||
|
||||
if (es->memory)
|
||||
{
|
||||
@ -659,8 +657,7 @@ ExplainExecuteQuery(ExecuteStmt *execstmt, IntoClause *into, ExplainState *es,
|
||||
PlannedStmt *pstmt = lfirst_node(PlannedStmt, p);
|
||||
|
||||
if (pstmt->commandType != CMD_UTILITY)
|
||||
ExplainOnePlan(pstmt, cplan, entry->plansource, query_index,
|
||||
into, es, query_string, paramLI, pstate->p_queryEnv,
|
||||
ExplainOnePlan(pstmt, into, es, query_string, paramLI, pstate->p_queryEnv,
|
||||
&planduration, (es->buffers ? &bufusage : NULL),
|
||||
es->memory ? &mem_counters : NULL);
|
||||
else
|
||||
@ -671,8 +668,6 @@ ExplainExecuteQuery(ExecuteStmt *execstmt, IntoClause *into, ExplainState *es,
|
||||
/* Separate plans with an appropriate separator */
|
||||
if (lnext(plan_list, p) != NULL)
|
||||
ExplainSeparatePlans(es);
|
||||
|
||||
query_index++;
|
||||
}
|
||||
|
||||
if (estate)
|
||||
|
@ -430,8 +430,8 @@ static void AlterConstrUpdateConstraintEntry(ATAlterConstraint *cmdcon, Relation
|
||||
static ObjectAddress ATExecValidateConstraint(List **wqueue,
|
||||
Relation rel, char *constrName,
|
||||
bool recurse, bool recursing, LOCKMODE lockmode);
|
||||
static void QueueFKConstraintValidation(List **wqueue, Relation conrel, Relation rel,
|
||||
HeapTuple contuple, LOCKMODE lockmode);
|
||||
static void QueueFKConstraintValidation(List **wqueue, Relation conrel, Relation fkrel,
|
||||
Oid pkrelid, HeapTuple contuple, LOCKMODE lockmode);
|
||||
static void QueueCheckConstraintValidation(List **wqueue, Relation conrel, Relation rel,
|
||||
char *constrName, HeapTuple contuple,
|
||||
bool recurse, bool recursing, LOCKMODE lockmode);
|
||||
@ -11858,6 +11858,7 @@ AttachPartitionForeignKey(List **wqueue,
|
||||
if (queueValidation)
|
||||
{
|
||||
Relation conrel;
|
||||
Oid confrelid;
|
||||
|
||||
conrel = table_open(ConstraintRelationId, RowExclusiveLock);
|
||||
|
||||
@ -11865,9 +11866,11 @@ AttachPartitionForeignKey(List **wqueue,
|
||||
if (!HeapTupleIsValid(partcontup))
|
||||
elog(ERROR, "cache lookup failed for constraint %u", partConstrOid);
|
||||
|
||||
confrelid = ((Form_pg_constraint) GETSTRUCT(partcontup))->confrelid;
|
||||
|
||||
/* Use the same lock as for AT_ValidateConstraint */
|
||||
QueueFKConstraintValidation(wqueue, conrel, partition, partcontup,
|
||||
ShareUpdateExclusiveLock);
|
||||
QueueFKConstraintValidation(wqueue, conrel, partition, confrelid,
|
||||
partcontup, ShareUpdateExclusiveLock);
|
||||
ReleaseSysCache(partcontup);
|
||||
table_close(conrel, RowExclusiveLock);
|
||||
}
|
||||
@ -12463,9 +12466,12 @@ ATExecAlterConstrEnforceability(List **wqueue, ATAlterConstraint *cmdcon,
|
||||
|
||||
/*
|
||||
* Tell Phase 3 to check that the constraint is satisfied by existing
|
||||
* rows.
|
||||
* rows. Only applies to leaf partitions, and (for constraints that
|
||||
* reference a partitioned table) only if this is not one of the
|
||||
* pg_constraint rows that exist solely to support action triggers.
|
||||
*/
|
||||
if (rel->rd_rel->relkind == RELKIND_RELATION)
|
||||
if (rel->rd_rel->relkind == RELKIND_RELATION &&
|
||||
currcon->confrelid == pkrelid)
|
||||
{
|
||||
AlteredTableInfo *tab;
|
||||
NewConstraint *newcon;
|
||||
@ -12919,7 +12925,8 @@ ATExecValidateConstraint(List **wqueue, Relation rel, char *constrName,
|
||||
{
|
||||
if (con->contype == CONSTRAINT_FOREIGN)
|
||||
{
|
||||
QueueFKConstraintValidation(wqueue, conrel, rel, tuple, lockmode);
|
||||
QueueFKConstraintValidation(wqueue, conrel, rel, con->confrelid,
|
||||
tuple, lockmode);
|
||||
}
|
||||
else if (con->contype == CONSTRAINT_CHECK)
|
||||
{
|
||||
@ -12952,8 +12959,8 @@ ATExecValidateConstraint(List **wqueue, Relation rel, char *constrName,
|
||||
* for the specified relation and all its children.
|
||||
*/
|
||||
static void
|
||||
QueueFKConstraintValidation(List **wqueue, Relation conrel, Relation rel,
|
||||
HeapTuple contuple, LOCKMODE lockmode)
|
||||
QueueFKConstraintValidation(List **wqueue, Relation conrel, Relation fkrel,
|
||||
Oid pkrelid, HeapTuple contuple, LOCKMODE lockmode)
|
||||
{
|
||||
Form_pg_constraint con;
|
||||
AlteredTableInfo *tab;
|
||||
@ -12964,7 +12971,17 @@ QueueFKConstraintValidation(List **wqueue, Relation conrel, Relation rel,
|
||||
Assert(con->contype == CONSTRAINT_FOREIGN);
|
||||
Assert(!con->convalidated);
|
||||
|
||||
if (rel->rd_rel->relkind == RELKIND_RELATION)
|
||||
/*
|
||||
* Add the validation to phase 3's queue; not needed for partitioned
|
||||
* tables themselves, only for their partitions.
|
||||
*
|
||||
* When the referenced table (pkrelid) is partitioned, the referencing
|
||||
* table (fkrel) has one pg_constraint row pointing to each partition
|
||||
* thereof. These rows are there only to support action triggers and no
|
||||
* table scan is needed, therefore skip this for them as well.
|
||||
*/
|
||||
if (fkrel->rd_rel->relkind == RELKIND_RELATION &&
|
||||
con->confrelid == pkrelid)
|
||||
{
|
||||
NewConstraint *newcon;
|
||||
Constraint *fkconstraint;
|
||||
@ -12983,15 +13000,16 @@ QueueFKConstraintValidation(List **wqueue, Relation conrel, Relation rel,
|
||||
newcon->qual = (Node *) fkconstraint;
|
||||
|
||||
/* Find or create work queue entry for this table */
|
||||
tab = ATGetQueueEntry(wqueue, rel);
|
||||
tab = ATGetQueueEntry(wqueue, fkrel);
|
||||
tab->constraints = lappend(tab->constraints, newcon);
|
||||
}
|
||||
|
||||
/*
|
||||
* If the table at either end of the constraint is partitioned, we need to
|
||||
* recurse and handle every constraint that is a child of this constraint.
|
||||
* recurse and handle every unvalidate constraint that is a child of this
|
||||
* constraint.
|
||||
*/
|
||||
if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE ||
|
||||
if (fkrel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE ||
|
||||
get_rel_relkind(con->confrelid) == RELKIND_PARTITIONED_TABLE)
|
||||
{
|
||||
ScanKeyData pkey;
|
||||
@ -13023,8 +13041,12 @@ QueueFKConstraintValidation(List **wqueue, Relation conrel, Relation rel,
|
||||
|
||||
childrel = table_open(childcon->conrelid, lockmode);
|
||||
|
||||
QueueFKConstraintValidation(wqueue, conrel, childrel, childtup,
|
||||
lockmode);
|
||||
/*
|
||||
* NB: Note that pkrelid should be passed as-is during recursion,
|
||||
* as it is required to identify the root referenced table.
|
||||
*/
|
||||
QueueFKConstraintValidation(wqueue, conrel, childrel, pkrelid,
|
||||
childtup, lockmode);
|
||||
table_close(childrel, NoLock);
|
||||
}
|
||||
|
||||
@ -13032,7 +13054,11 @@ QueueFKConstraintValidation(List **wqueue, Relation conrel, Relation rel,
|
||||
}
|
||||
|
||||
/*
|
||||
* Now update the catalog, while we have the door open.
|
||||
* Now mark the pg_constraint row as validated (even if we didn't check,
|
||||
* notably the ones for partitions on the referenced side).
|
||||
*
|
||||
* We rely on transaction abort to roll back this change if phase 3
|
||||
* ultimately finds violating rows. This is a bit ugly.
|
||||
*/
|
||||
copyTuple = heap_copytuple(contuple);
|
||||
copy_con = (Form_pg_constraint) GETSTRUCT(copyTuple);
|
||||
@ -20964,9 +20990,17 @@ ATExecDetachPartition(List **wqueue, AlteredTableInfo *tab, Relation rel,
|
||||
tab->rel = rel;
|
||||
}
|
||||
|
||||
/*
|
||||
* Detaching the partition might involve TOAST table access, so ensure we
|
||||
* have a valid snapshot.
|
||||
*/
|
||||
PushActiveSnapshot(GetTransactionSnapshot());
|
||||
|
||||
/* Do the final part of detaching */
|
||||
DetachPartitionFinalize(rel, partRel, concurrent, defaultPartOid);
|
||||
|
||||
PopActiveSnapshot();
|
||||
|
||||
ObjectAddressSet(address, RelationRelationId, RelationGetRelid(partRel));
|
||||
|
||||
/* keep our lock until commit */
|
||||
|
@ -5057,21 +5057,6 @@ AfterTriggerBeginQuery(void)
|
||||
}
|
||||
|
||||
|
||||
/* ----------
|
||||
* AfterTriggerAbortQuery()
|
||||
*
|
||||
* Called by standard_ExecutorEnd() if the query execution was aborted due to
|
||||
* the plan becoming invalid during initialization.
|
||||
* ----------
|
||||
*/
|
||||
void
|
||||
AfterTriggerAbortQuery(void)
|
||||
{
|
||||
/* Revert the actions of AfterTriggerBeginQuery(). */
|
||||
afterTriggers.query_depth--;
|
||||
}
|
||||
|
||||
|
||||
/* ----------
|
||||
* AfterTriggerEndQuery()
|
||||
*
|
||||
|
@ -63,7 +63,7 @@ typedef struct PVShared
|
||||
*/
|
||||
Oid relid;
|
||||
int elevel;
|
||||
uint64 queryid;
|
||||
int64 queryid;
|
||||
|
||||
/*
|
||||
* Fields for both index vacuum and cleanup.
|
||||
|
@ -285,28 +285,6 @@ are typically reset to empty once per tuple. Per-tuple contexts are usually
|
||||
associated with ExprContexts, and commonly each PlanState node has its own
|
||||
ExprContext to evaluate its qual and targetlist expressions in.
|
||||
|
||||
Relation Locking
|
||||
----------------
|
||||
|
||||
When the executor initializes a plan tree for execution, it doesn't lock
|
||||
non-index relations if the plan tree is freshly generated and not derived
|
||||
from a CachedPlan. This is because such locks have already been established
|
||||
during the query's parsing, rewriting, and planning phases. However, with a
|
||||
cached plan tree, some relations may remain unlocked. The function
|
||||
AcquireExecutorLocks() only locks unprunable relations in the plan, deferring
|
||||
the locking of prunable ones to executor initialization. This avoids
|
||||
unnecessary locking of relations that will be pruned during "initial" runtime
|
||||
pruning in ExecDoInitialPruning().
|
||||
|
||||
This approach creates a window where a cached plan tree with child tables
|
||||
could become outdated if another backend modifies these tables before
|
||||
ExecDoInitialPruning() locks them. As a result, the executor has the added duty
|
||||
to verify the plan tree's validity whenever it locks a child table after
|
||||
doing initial pruning. This validation is done by checking the CachedPlan.is_valid
|
||||
flag. If the plan tree is outdated (is_valid = false), the executor stops
|
||||
further initialization, cleans up anything in EState that would have been
|
||||
allocated up to that point, and retries execution after recreating the
|
||||
invalid plan in the CachedPlan. See ExecutorStartCachedPlan().
|
||||
|
||||
Query Processing Control Flow
|
||||
-----------------------------
|
||||
@ -315,13 +293,11 @@ This is a sketch of control flow for full query processing:
|
||||
|
||||
CreateQueryDesc
|
||||
|
||||
ExecutorStart or ExecutorStartCachedPlan
|
||||
ExecutorStart
|
||||
CreateExecutorState
|
||||
creates per-query context
|
||||
switch to per-query context to run ExecDoInitialPruning and ExecInitNode
|
||||
switch to per-query context to run ExecInitNode
|
||||
AfterTriggerBeginQuery
|
||||
ExecDoInitialPruning
|
||||
does initial pruning and locks surviving partitions if needed
|
||||
ExecInitNode --- recursively scans plan tree
|
||||
ExecInitNode
|
||||
recurse into subsidiary nodes
|
||||
@ -345,12 +321,7 @@ This is a sketch of control flow for full query processing:
|
||||
|
||||
FreeQueryDesc
|
||||
|
||||
As mentioned in the "Relation Locking" section, if the plan tree is found to
|
||||
be stale after locking partitions in ExecDoInitialPruning(), the control is
|
||||
immediately returned to ExecutorStartCachedPlan(), which will create a new plan
|
||||
tree and perform the steps starting from CreateExecutorState() again.
|
||||
|
||||
Per above comments, it's not really critical for ExecEndPlan to free any
|
||||
Per above comments, it's not really critical for ExecEndNode to free any
|
||||
memory; it'll all go away in FreeExecutorState anyway. However, we do need to
|
||||
be careful to close relations, drop buffer pins, etc, so we do need to scan
|
||||
the plan state tree to find these sorts of resources.
|
||||
|
@ -55,13 +55,11 @@
|
||||
#include "parser/parse_relation.h"
|
||||
#include "pgstat.h"
|
||||
#include "rewrite/rewriteHandler.h"
|
||||
#include "storage/lmgr.h"
|
||||
#include "tcop/utility.h"
|
||||
#include "utils/acl.h"
|
||||
#include "utils/backend_status.h"
|
||||
#include "utils/lsyscache.h"
|
||||
#include "utils/partcache.h"
|
||||
#include "utils/plancache.h"
|
||||
#include "utils/rls.h"
|
||||
#include "utils/snapmgr.h"
|
||||
|
||||
@ -119,16 +117,11 @@ static void ReportNotNullViolationError(ResultRelInfo *resultRelInfo,
|
||||
* get control when ExecutorStart is called. Such a plugin would
|
||||
* normally call standard_ExecutorStart().
|
||||
*
|
||||
* Return value indicates if the plan has been initialized successfully so
|
||||
* that queryDesc->planstate contains a valid PlanState tree. It may not
|
||||
* if the plan got invalidated during InitPlan().
|
||||
* ----------------------------------------------------------------
|
||||
*/
|
||||
bool
|
||||
void
|
||||
ExecutorStart(QueryDesc *queryDesc, int eflags)
|
||||
{
|
||||
bool plan_valid;
|
||||
|
||||
/*
|
||||
* In some cases (e.g. an EXECUTE statement or an execute message with the
|
||||
* extended query protocol) the query_id won't be reported, so do it now.
|
||||
@ -140,14 +133,12 @@ ExecutorStart(QueryDesc *queryDesc, int eflags)
|
||||
pgstat_report_query_id(queryDesc->plannedstmt->queryId, false);
|
||||
|
||||
if (ExecutorStart_hook)
|
||||
plan_valid = (*ExecutorStart_hook) (queryDesc, eflags);
|
||||
(*ExecutorStart_hook) (queryDesc, eflags);
|
||||
else
|
||||
plan_valid = standard_ExecutorStart(queryDesc, eflags);
|
||||
|
||||
return plan_valid;
|
||||
standard_ExecutorStart(queryDesc, eflags);
|
||||
}
|
||||
|
||||
bool
|
||||
void
|
||||
standard_ExecutorStart(QueryDesc *queryDesc, int eflags)
|
||||
{
|
||||
EState *estate;
|
||||
@ -271,64 +262,6 @@ standard_ExecutorStart(QueryDesc *queryDesc, int eflags)
|
||||
InitPlan(queryDesc, eflags);
|
||||
|
||||
MemoryContextSwitchTo(oldcontext);
|
||||
|
||||
return ExecPlanStillValid(queryDesc->estate);
|
||||
}
|
||||
|
||||
/*
|
||||
* ExecutorStartCachedPlan
|
||||
* Start execution for a given query in the CachedPlanSource, replanning
|
||||
* if the plan is invalidated due to deferred locks taken during the
|
||||
* plan's initialization
|
||||
*
|
||||
* This function handles cases where the CachedPlan given in queryDesc->cplan
|
||||
* might become invalid during the initialization of the plan given in
|
||||
* queryDesc->plannedstmt, particularly when prunable relations in it are
|
||||
* locked after performing initial pruning. If the locks invalidate the plan,
|
||||
* the function calls UpdateCachedPlan() to replan all queries in the
|
||||
* CachedPlan, and then retries initialization.
|
||||
*
|
||||
* The function repeats the process until ExecutorStart() successfully
|
||||
* initializes the plan, that is without the CachedPlan becoming invalid.
|
||||
*/
|
||||
void
|
||||
ExecutorStartCachedPlan(QueryDesc *queryDesc, int eflags,
|
||||
CachedPlanSource *plansource,
|
||||
int query_index)
|
||||
{
|
||||
if (unlikely(queryDesc->cplan == NULL))
|
||||
elog(ERROR, "ExecutorStartCachedPlan(): missing CachedPlan");
|
||||
if (unlikely(plansource == NULL))
|
||||
elog(ERROR, "ExecutorStartCachedPlan(): missing CachedPlanSource");
|
||||
|
||||
/*
|
||||
* Loop and retry with an updated plan until no further invalidation
|
||||
* occurs.
|
||||
*/
|
||||
while (1)
|
||||
{
|
||||
if (!ExecutorStart(queryDesc, eflags))
|
||||
{
|
||||
/*
|
||||
* Clean up the current execution state before creating the new
|
||||
* plan to retry ExecutorStart(). Mark execution as aborted to
|
||||
* ensure that AFTER trigger state is properly reset.
|
||||
*/
|
||||
queryDesc->estate->es_aborted = true;
|
||||
ExecutorEnd(queryDesc);
|
||||
|
||||
/* Retry ExecutorStart() with an updated plan tree. */
|
||||
queryDesc->plannedstmt = UpdateCachedPlan(plansource, query_index,
|
||||
queryDesc->queryEnv);
|
||||
}
|
||||
else
|
||||
|
||||
/*
|
||||
* Exit the loop if the plan is initialized successfully and no
|
||||
* sinval messages were received that invalidated the CachedPlan.
|
||||
*/
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* ----------------------------------------------------------------
|
||||
@ -387,7 +320,6 @@ standard_ExecutorRun(QueryDesc *queryDesc,
|
||||
estate = queryDesc->estate;
|
||||
|
||||
Assert(estate != NULL);
|
||||
Assert(!estate->es_aborted);
|
||||
Assert(!(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
|
||||
|
||||
/* caller must ensure the query's snapshot is active */
|
||||
@ -494,11 +426,8 @@ standard_ExecutorFinish(QueryDesc *queryDesc)
|
||||
Assert(estate != NULL);
|
||||
Assert(!(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
|
||||
|
||||
/*
|
||||
* This should be run once and only once per Executor instance and never
|
||||
* if the execution was aborted.
|
||||
*/
|
||||
Assert(!estate->es_finished && !estate->es_aborted);
|
||||
/* This should be run once and only once per Executor instance */
|
||||
Assert(!estate->es_finished);
|
||||
|
||||
/* Switch into per-query memory context */
|
||||
oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
|
||||
@ -561,10 +490,11 @@ standard_ExecutorEnd(QueryDesc *queryDesc)
|
||||
(PgStat_Counter) estate->es_parallel_workers_launched);
|
||||
|
||||
/*
|
||||
* Check that ExecutorFinish was called, unless in EXPLAIN-only mode or if
|
||||
* execution was aborted.
|
||||
* Check that ExecutorFinish was called, unless in EXPLAIN-only mode. This
|
||||
* Assert is needed because ExecutorFinish is new as of 9.1, and callers
|
||||
* might forget to call it.
|
||||
*/
|
||||
Assert(estate->es_finished || estate->es_aborted ||
|
||||
Assert(estate->es_finished ||
|
||||
(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
|
||||
|
||||
/*
|
||||
@ -578,14 +508,6 @@ standard_ExecutorEnd(QueryDesc *queryDesc)
|
||||
UnregisterSnapshot(estate->es_snapshot);
|
||||
UnregisterSnapshot(estate->es_crosscheck_snapshot);
|
||||
|
||||
/*
|
||||
* Reset AFTER trigger module if the query execution was aborted.
|
||||
*/
|
||||
if (estate->es_aborted &&
|
||||
!(estate->es_top_eflags &
|
||||
(EXEC_FLAG_SKIP_TRIGGERS | EXEC_FLAG_EXPLAIN_ONLY)))
|
||||
AfterTriggerAbortQuery();
|
||||
|
||||
/*
|
||||
* Must switch out of context before destroying it
|
||||
*/
|
||||
@ -684,21 +606,6 @@ ExecCheckPermissions(List *rangeTable, List *rteperminfos,
|
||||
(rte->rtekind == RTE_SUBQUERY &&
|
||||
rte->relkind == RELKIND_VIEW));
|
||||
|
||||
/*
|
||||
* Ensure that we have at least an AccessShareLock on relations
|
||||
* whose permissions need to be checked.
|
||||
*
|
||||
* Skip this check in a parallel worker because locks won't be
|
||||
* taken until ExecInitNode() performs plan initialization.
|
||||
*
|
||||
* XXX: ExecCheckPermissions() in a parallel worker may be
|
||||
* redundant with the checks done in the leader process, so this
|
||||
* should be reviewed to ensure it’s necessary.
|
||||
*/
|
||||
Assert(IsParallelWorker() ||
|
||||
CheckRelationOidLockedByMe(rte->relid, AccessShareLock,
|
||||
true));
|
||||
|
||||
(void) getRTEPermissionInfo(rteperminfos, rte);
|
||||
/* Many-to-one mapping not allowed */
|
||||
Assert(!bms_is_member(rte->perminfoindex, indexset));
|
||||
@ -924,12 +831,6 @@ ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
|
||||
*
|
||||
* Initializes the query plan: open files, allocate storage
|
||||
* and start up the rule manager
|
||||
*
|
||||
* If the plan originates from a CachedPlan (given in queryDesc->cplan),
|
||||
* it can become invalid during runtime "initial" pruning when the
|
||||
* remaining set of locks is taken. The function returns early in that
|
||||
* case without initializing the plan, and the caller is expected to
|
||||
* retry with a new valid plan.
|
||||
* ----------------------------------------------------------------
|
||||
*/
|
||||
static void
|
||||
@ -937,7 +838,6 @@ InitPlan(QueryDesc *queryDesc, int eflags)
|
||||
{
|
||||
CmdType operation = queryDesc->operation;
|
||||
PlannedStmt *plannedstmt = queryDesc->plannedstmt;
|
||||
CachedPlan *cachedplan = queryDesc->cplan;
|
||||
Plan *plan = plannedstmt->planTree;
|
||||
List *rangeTable = plannedstmt->rtable;
|
||||
EState *estate = queryDesc->estate;
|
||||
@ -958,7 +858,6 @@ InitPlan(QueryDesc *queryDesc, int eflags)
|
||||
bms_copy(plannedstmt->unprunableRelids));
|
||||
|
||||
estate->es_plannedstmt = plannedstmt;
|
||||
estate->es_cachedplan = cachedplan;
|
||||
estate->es_part_prune_infos = plannedstmt->partPruneInfos;
|
||||
|
||||
/*
|
||||
@ -972,9 +871,6 @@ InitPlan(QueryDesc *queryDesc, int eflags)
|
||||
*/
|
||||
ExecDoInitialPruning(estate);
|
||||
|
||||
if (!ExecPlanStillValid(estate))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Next, build the ExecRowMark array from the PlanRowMark(s), if any.
|
||||
*/
|
||||
@ -3092,9 +2988,6 @@ EvalPlanQualStart(EPQState *epqstate, Plan *planTree)
|
||||
* the snapshot, rangetable, and external Param info. They need their own
|
||||
* copies of local state, including a tuple table, es_param_exec_vals,
|
||||
* result-rel info, etc.
|
||||
*
|
||||
* es_cachedplan is not copied because EPQ plan execution does not acquire
|
||||
* any new locks that could invalidate the CachedPlan.
|
||||
*/
|
||||
rcestate->es_direction = ForwardScanDirection;
|
||||
rcestate->es_snapshot = parentestate->es_snapshot;
|
||||
|
@ -1278,15 +1278,8 @@ ExecParallelGetQueryDesc(shm_toc *toc, DestReceiver *receiver,
|
||||
paramspace = shm_toc_lookup(toc, PARALLEL_KEY_PARAMLISTINFO, false);
|
||||
paramLI = RestoreParamList(¶mspace);
|
||||
|
||||
/*
|
||||
* Create a QueryDesc for the query. We pass NULL for cachedplan, because
|
||||
* we don't have a pointer to the CachedPlan in the leader's process. It's
|
||||
* fine because the only reason the executor needs to see it is to decide
|
||||
* if it should take locks on certain relations, but parallel workers
|
||||
* always take locks anyway.
|
||||
*/
|
||||
/* Create a QueryDesc for the query. */
|
||||
return CreateQueryDesc(pstmt,
|
||||
NULL,
|
||||
queryString,
|
||||
GetActiveSnapshot(), InvalidSnapshot,
|
||||
receiver, paramLI, NULL, instrument_options);
|
||||
@ -1471,8 +1464,7 @@ ParallelQueryMain(dsm_segment *seg, shm_toc *toc)
|
||||
|
||||
/* Start up the executor */
|
||||
queryDesc->plannedstmt->jitFlags = fpes->jit_flags;
|
||||
if (!ExecutorStart(queryDesc, fpes->eflags))
|
||||
elog(ERROR, "ExecutorStart() failed unexpectedly");
|
||||
ExecutorStart(queryDesc, fpes->eflags);
|
||||
|
||||
/* Special executor initialization steps for parallel workers */
|
||||
queryDesc->planstate->state->es_query_dsa = area;
|
||||
|
@ -26,7 +26,6 @@
|
||||
#include "partitioning/partdesc.h"
|
||||
#include "partitioning/partprune.h"
|
||||
#include "rewrite/rewriteManip.h"
|
||||
#include "storage/lmgr.h"
|
||||
#include "utils/acl.h"
|
||||
#include "utils/lsyscache.h"
|
||||
#include "utils/partcache.h"
|
||||
@ -1771,8 +1770,7 @@ adjust_partition_colnos_using_map(List *colnos, AttrMap *attrMap)
|
||||
* ExecDoInitialPruning:
|
||||
* Perform runtime "initial" pruning, if necessary, to determine the set
|
||||
* of child subnodes that need to be initialized during ExecInitNode() for
|
||||
* all plan nodes that contain a PartitionPruneInfo. This also locks the
|
||||
* leaf partitions whose subnodes will be initialized if needed.
|
||||
* all plan nodes that contain a PartitionPruneInfo.
|
||||
*
|
||||
* ExecInitPartitionExecPruning:
|
||||
* Updates the PartitionPruneState found at given part_prune_index in
|
||||
@ -1798,8 +1796,7 @@ adjust_partition_colnos_using_map(List *colnos, AttrMap *attrMap)
|
||||
* ExecDoInitialPruning
|
||||
* Perform runtime "initial" pruning, if necessary, to determine the set
|
||||
* of child subnodes that need to be initialized during ExecInitNode() for
|
||||
* plan nodes that support partition pruning. This also locks the leaf
|
||||
* partitions whose subnodes will be initialized if needed.
|
||||
* plan nodes that support partition pruning.
|
||||
*
|
||||
* This function iterates over each PartitionPruneInfo entry in
|
||||
* estate->es_part_prune_infos. For each entry, it creates a PartitionPruneState
|
||||
@ -1821,9 +1818,7 @@ adjust_partition_colnos_using_map(List *colnos, AttrMap *attrMap)
|
||||
void
|
||||
ExecDoInitialPruning(EState *estate)
|
||||
{
|
||||
PlannedStmt *stmt = estate->es_plannedstmt;
|
||||
ListCell *lc;
|
||||
List *locked_relids = NIL;
|
||||
|
||||
foreach(lc, estate->es_part_prune_infos)
|
||||
{
|
||||
@ -1849,68 +1844,11 @@ ExecDoInitialPruning(EState *estate)
|
||||
else
|
||||
validsubplan_rtis = all_leafpart_rtis;
|
||||
|
||||
if (ExecShouldLockRelations(estate))
|
||||
{
|
||||
int rtindex = -1;
|
||||
|
||||
while ((rtindex = bms_next_member(validsubplan_rtis,
|
||||
rtindex)) >= 0)
|
||||
{
|
||||
RangeTblEntry *rte = exec_rt_fetch(rtindex, estate);
|
||||
|
||||
Assert(rte->rtekind == RTE_RELATION &&
|
||||
rte->rellockmode != NoLock);
|
||||
LockRelationOid(rte->relid, rte->rellockmode);
|
||||
locked_relids = lappend_int(locked_relids, rtindex);
|
||||
}
|
||||
}
|
||||
estate->es_unpruned_relids = bms_add_members(estate->es_unpruned_relids,
|
||||
validsubplan_rtis);
|
||||
estate->es_part_prune_results = lappend(estate->es_part_prune_results,
|
||||
validsubplans);
|
||||
}
|
||||
|
||||
/*
|
||||
* Lock the first result relation of each ModifyTable node, even if it was
|
||||
* pruned. This is required for ExecInitModifyTable(), which keeps its
|
||||
* first result relation if all other result relations have been pruned,
|
||||
* because some executor paths (e.g., in nodeModifyTable.c and
|
||||
* execPartition.c) rely on there being at least one result relation.
|
||||
*
|
||||
* There's room for improvement here --- we actually only need to do this
|
||||
* if all other result relations of the ModifyTable node were pruned, but
|
||||
* we don't have an easy way to tell that here.
|
||||
*/
|
||||
if (stmt->resultRelations && ExecShouldLockRelations(estate))
|
||||
{
|
||||
foreach(lc, stmt->firstResultRels)
|
||||
{
|
||||
Index firstResultRel = lfirst_int(lc);
|
||||
|
||||
if (!bms_is_member(firstResultRel, estate->es_unpruned_relids))
|
||||
{
|
||||
RangeTblEntry *rte = exec_rt_fetch(firstResultRel, estate);
|
||||
|
||||
Assert(rte->rtekind == RTE_RELATION && rte->rellockmode != NoLock);
|
||||
LockRelationOid(rte->relid, rte->rellockmode);
|
||||
locked_relids = lappend_int(locked_relids, firstResultRel);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Release the useless locks if the plan won't be executed. This is the
|
||||
* same as what CheckCachedPlan() in plancache.c does.
|
||||
*/
|
||||
if (!ExecPlanStillValid(estate))
|
||||
{
|
||||
foreach(lc, locked_relids)
|
||||
{
|
||||
RangeTblEntry *rte = exec_rt_fetch(lfirst_int(lc), estate);
|
||||
|
||||
UnlockRelationOid(rte->relid, rte->rellockmode);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -147,7 +147,6 @@ CreateExecutorState(void)
|
||||
estate->es_top_eflags = 0;
|
||||
estate->es_instrument = 0;
|
||||
estate->es_finished = false;
|
||||
estate->es_aborted = false;
|
||||
|
||||
estate->es_exprcontexts = NIL;
|
||||
|
||||
|
@ -34,6 +34,7 @@
|
||||
#include "utils/funccache.h"
|
||||
#include "utils/lsyscache.h"
|
||||
#include "utils/memutils.h"
|
||||
#include "utils/plancache.h"
|
||||
#include "utils/snapmgr.h"
|
||||
#include "utils/syscache.h"
|
||||
|
||||
@ -1338,7 +1339,6 @@ postquel_start(execution_state *es, SQLFunctionCachePtr fcache)
|
||||
dest = None_Receiver;
|
||||
|
||||
es->qd = CreateQueryDesc(es->stmt,
|
||||
NULL,
|
||||
fcache->func->src,
|
||||
GetActiveSnapshot(),
|
||||
InvalidSnapshot,
|
||||
@ -1363,8 +1363,7 @@ postquel_start(execution_state *es, SQLFunctionCachePtr fcache)
|
||||
eflags = EXEC_FLAG_SKIP_TRIGGERS;
|
||||
else
|
||||
eflags = 0; /* default run-to-completion flags */
|
||||
if (!ExecutorStart(es->qd, eflags))
|
||||
elog(ERROR, "ExecutorStart() failed unexpectedly");
|
||||
ExecutorStart(es->qd, eflags);
|
||||
}
|
||||
|
||||
es->status = F_EXEC_RUN;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user