Fix typos in comments.
Backpatch to all supported versions, where applicable, to make backpatching of future fixes go more smoothly. Josh Soref Discussion: https://www.postgresql.org/message-id/CACZqfqCf+5qRztLPgmmosr-B0Ye4srWzzw_mo4c_8_B_mtjmJQ@mail.gmail.com
This commit is contained in:
parent
b971a98cea
commit
90e8599219
4
configure
vendored
4
configure
vendored
@ -7094,7 +7094,7 @@ test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}'
|
|||||||
test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644'
|
test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644'
|
||||||
|
|
||||||
# When Autoconf chooses install-sh as install program it tries to generate
|
# When Autoconf chooses install-sh as install program it tries to generate
|
||||||
# a relative path to it in each makefile where it subsitutes it. This clashes
|
# a relative path to it in each makefile where it substitutes it. This clashes
|
||||||
# with our Makefile.global concept. This workaround helps.
|
# with our Makefile.global concept. This workaround helps.
|
||||||
case $INSTALL in
|
case $INSTALL in
|
||||||
*install-sh*) install_bin='';;
|
*install-sh*) install_bin='';;
|
||||||
@ -7238,7 +7238,7 @@ fi
|
|||||||
$as_echo "$MKDIR_P" >&6; }
|
$as_echo "$MKDIR_P" >&6; }
|
||||||
|
|
||||||
# When Autoconf chooses install-sh as mkdir -p program it tries to generate
|
# When Autoconf chooses install-sh as mkdir -p program it tries to generate
|
||||||
# a relative path to it in each makefile where it subsitutes it. This clashes
|
# a relative path to it in each makefile where it substitutes it. This clashes
|
||||||
# with our Makefile.global concept. This workaround helps.
|
# with our Makefile.global concept. This workaround helps.
|
||||||
case $MKDIR_P in
|
case $MKDIR_P in
|
||||||
*install-sh*) MKDIR_P='\${SHELL} \${top_srcdir}/config/install-sh -c -d';;
|
*install-sh*) MKDIR_P='\${SHELL} \${top_srcdir}/config/install-sh -c -d';;
|
||||||
|
@ -894,7 +894,7 @@ fi
|
|||||||
|
|
||||||
AC_PROG_INSTALL
|
AC_PROG_INSTALL
|
||||||
# When Autoconf chooses install-sh as install program it tries to generate
|
# When Autoconf chooses install-sh as install program it tries to generate
|
||||||
# a relative path to it in each makefile where it subsitutes it. This clashes
|
# a relative path to it in each makefile where it substitutes it. This clashes
|
||||||
# with our Makefile.global concept. This workaround helps.
|
# with our Makefile.global concept. This workaround helps.
|
||||||
case $INSTALL in
|
case $INSTALL in
|
||||||
*install-sh*) install_bin='';;
|
*install-sh*) install_bin='';;
|
||||||
@ -907,7 +907,7 @@ AC_PROG_LN_S
|
|||||||
AC_PROG_AWK
|
AC_PROG_AWK
|
||||||
AC_PROG_MKDIR_P
|
AC_PROG_MKDIR_P
|
||||||
# When Autoconf chooses install-sh as mkdir -p program it tries to generate
|
# When Autoconf chooses install-sh as mkdir -p program it tries to generate
|
||||||
# a relative path to it in each makefile where it subsitutes it. This clashes
|
# a relative path to it in each makefile where it substitutes it. This clashes
|
||||||
# with our Makefile.global concept. This workaround helps.
|
# with our Makefile.global concept. This workaround helps.
|
||||||
case $MKDIR_P in
|
case $MKDIR_P in
|
||||||
*install-sh*) MKDIR_P='\${SHELL} \${top_srcdir}/config/install-sh -c -d';;
|
*install-sh*) MKDIR_P='\${SHELL} \${top_srcdir}/config/install-sh -c -d';;
|
||||||
|
@ -51,7 +51,7 @@ blbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
|
|||||||
initBloomState(&state, index);
|
initBloomState(&state, index);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Interate over the pages. We don't care about concurrently added pages,
|
* Iterate over the pages. We don't care about concurrently added pages,
|
||||||
* they can't contain tuples to delete.
|
* they can't contain tuples to delete.
|
||||||
*/
|
*/
|
||||||
npages = RelationGetNumberOfBlocks(index);
|
npages = RelationGetNumberOfBlocks(index);
|
||||||
|
@ -1019,7 +1019,7 @@ SELECT cube_dim('(4,8,15,16,23),(4,8,15,16,23)'::cube);
|
|||||||
5
|
5
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- Test of cube_ll_coord function (retrieves LL coodinate values)
|
-- Test of cube_ll_coord function (retrieves LL coordinate values)
|
||||||
--
|
--
|
||||||
SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 1);
|
SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 1);
|
||||||
cube_ll_coord
|
cube_ll_coord
|
||||||
@ -1075,7 +1075,7 @@ SELECT cube_ll_coord('(42,137)'::cube, 3);
|
|||||||
0
|
0
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- Test of cube_ur_coord function (retrieves UR coodinate values)
|
-- Test of cube_ur_coord function (retrieves UR coordinate values)
|
||||||
--
|
--
|
||||||
SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 1);
|
SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 1);
|
||||||
cube_ur_coord
|
cube_ur_coord
|
||||||
|
@ -1019,7 +1019,7 @@ SELECT cube_dim('(4,8,15,16,23),(4,8,15,16,23)'::cube);
|
|||||||
5
|
5
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- Test of cube_ll_coord function (retrieves LL coodinate values)
|
-- Test of cube_ll_coord function (retrieves LL coordinate values)
|
||||||
--
|
--
|
||||||
SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 1);
|
SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 1);
|
||||||
cube_ll_coord
|
cube_ll_coord
|
||||||
@ -1075,7 +1075,7 @@ SELECT cube_ll_coord('(42,137)'::cube, 3);
|
|||||||
0
|
0
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- Test of cube_ur_coord function (retrieves UR coodinate values)
|
-- Test of cube_ur_coord function (retrieves UR coordinate values)
|
||||||
--
|
--
|
||||||
SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 1);
|
SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 1);
|
||||||
cube_ur_coord
|
cube_ur_coord
|
||||||
|
@ -1019,7 +1019,7 @@ SELECT cube_dim('(4,8,15,16,23),(4,8,15,16,23)'::cube);
|
|||||||
5
|
5
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- Test of cube_ll_coord function (retrieves LL coodinate values)
|
-- Test of cube_ll_coord function (retrieves LL coordinate values)
|
||||||
--
|
--
|
||||||
SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 1);
|
SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 1);
|
||||||
cube_ll_coord
|
cube_ll_coord
|
||||||
@ -1075,7 +1075,7 @@ SELECT cube_ll_coord('(42,137)'::cube, 3);
|
|||||||
0
|
0
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- Test of cube_ur_coord function (retrieves UR coodinate values)
|
-- Test of cube_ur_coord function (retrieves UR coordinate values)
|
||||||
--
|
--
|
||||||
SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 1);
|
SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 1);
|
||||||
cube_ur_coord
|
cube_ur_coord
|
||||||
|
@ -1019,7 +1019,7 @@ SELECT cube_dim('(4,8,15,16,23),(4,8,15,16,23)'::cube);
|
|||||||
5
|
5
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- Test of cube_ll_coord function (retrieves LL coodinate values)
|
-- Test of cube_ll_coord function (retrieves LL coordinate values)
|
||||||
--
|
--
|
||||||
SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 1);
|
SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 1);
|
||||||
cube_ll_coord
|
cube_ll_coord
|
||||||
@ -1075,7 +1075,7 @@ SELECT cube_ll_coord('(42,137)'::cube, 3);
|
|||||||
0
|
0
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- Test of cube_ur_coord function (retrieves UR coodinate values)
|
-- Test of cube_ur_coord function (retrieves UR coordinate values)
|
||||||
--
|
--
|
||||||
SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 1);
|
SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 1);
|
||||||
cube_ur_coord
|
cube_ur_coord
|
||||||
|
@ -246,7 +246,7 @@ SELECT cube_dim('(0,0,0)'::cube);
|
|||||||
SELECT cube_dim('(42,42,42),(42,42,42)'::cube);
|
SELECT cube_dim('(42,42,42),(42,42,42)'::cube);
|
||||||
SELECT cube_dim('(4,8,15,16,23),(4,8,15,16,23)'::cube);
|
SELECT cube_dim('(4,8,15,16,23),(4,8,15,16,23)'::cube);
|
||||||
|
|
||||||
-- Test of cube_ll_coord function (retrieves LL coodinate values)
|
-- Test of cube_ll_coord function (retrieves LL coordinate values)
|
||||||
--
|
--
|
||||||
SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 1);
|
SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 1);
|
||||||
SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 2);
|
SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 2);
|
||||||
@ -258,7 +258,7 @@ SELECT cube_ll_coord('(42,137)'::cube, 1);
|
|||||||
SELECT cube_ll_coord('(42,137)'::cube, 2);
|
SELECT cube_ll_coord('(42,137)'::cube, 2);
|
||||||
SELECT cube_ll_coord('(42,137)'::cube, 3);
|
SELECT cube_ll_coord('(42,137)'::cube, 3);
|
||||||
|
|
||||||
-- Test of cube_ur_coord function (retrieves UR coodinate values)
|
-- Test of cube_ur_coord function (retrieves UR coordinate values)
|
||||||
--
|
--
|
||||||
SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 1);
|
SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 1);
|
||||||
SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 2);
|
SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 2);
|
||||||
|
@ -11,7 +11,7 @@ CREATE FUNCTION earth() RETURNS float8
|
|||||||
LANGUAGE SQL IMMUTABLE PARALLEL SAFE
|
LANGUAGE SQL IMMUTABLE PARALLEL SAFE
|
||||||
AS 'SELECT ''6378168''::float8';
|
AS 'SELECT ''6378168''::float8';
|
||||||
|
|
||||||
-- Astromers may want to change the earth function so that distances will be
|
-- Astronomers may want to change the earth function so that distances will be
|
||||||
-- returned in degrees. To do this comment out the above definition and
|
-- returned in degrees. To do this comment out the above definition and
|
||||||
-- uncomment the one below. Note that doing this will break the regression
|
-- uncomment the one below. Note that doing this will break the regression
|
||||||
-- tests.
|
-- tests.
|
||||||
|
@ -23,7 +23,7 @@
|
|||||||
* Product 9 + 21 + 7 + 3 + 1 + 12 + 4 + 24 + 7 + 15 + 0 + 0 = 103
|
* Product 9 + 21 + 7 + 3 + 1 + 12 + 4 + 24 + 7 + 15 + 0 + 0 = 103
|
||||||
* 103 / 10 = 10 remainder 3
|
* 103 / 10 = 10 remainder 3
|
||||||
* Check digit 10 - 3 = 7
|
* Check digit 10 - 3 = 7
|
||||||
* => 977-1144875-00-7 ?? <- suplemental number (number of the week, month, etc.)
|
* => 977-1144875-00-7 ?? <- supplemental number (number of the week, month, etc.)
|
||||||
* ^^ 00 for non-daily publications (01=Monday, 02=Tuesday, ...)
|
* ^^ 00 for non-daily publications (01=Monday, 02=Tuesday, ...)
|
||||||
*
|
*
|
||||||
* The hyphenation is always in after the four digits of the ISSN code.
|
* The hyphenation is always in after the four digits of the ISSN code.
|
||||||
|
@ -160,7 +160,7 @@ dehyphenate(char *bufO, char *bufI)
|
|||||||
* into bufO using the given hyphenation range TABLE.
|
* into bufO using the given hyphenation range TABLE.
|
||||||
* Assumes the input string to be used is of only digits.
|
* Assumes the input string to be used is of only digits.
|
||||||
*
|
*
|
||||||
* Returns the number of characters acctually hyphenated.
|
* Returns the number of characters actually hyphenated.
|
||||||
*/
|
*/
|
||||||
static unsigned
|
static unsigned
|
||||||
hyphenate(char *bufO, char *bufI, const char *(*TABLE)[2], const unsigned TABLE_index[10][2])
|
hyphenate(char *bufO, char *bufI, const char *(*TABLE)[2], const unsigned TABLE_index[10][2])
|
||||||
@ -748,7 +748,7 @@ string2ean(const char *str, bool errorOK, ean13 *result,
|
|||||||
}
|
}
|
||||||
else if (*aux2 == '!' && *(aux2 + 1) == '\0')
|
else if (*aux2 == '!' && *(aux2 + 1) == '\0')
|
||||||
{
|
{
|
||||||
/* the invalid check digit sufix was found, set it */
|
/* the invalid check digit suffix was found, set it */
|
||||||
if (!magic)
|
if (!magic)
|
||||||
valid = false;
|
valid = false;
|
||||||
magic = true;
|
magic = true;
|
||||||
|
@ -1105,7 +1105,7 @@ SELECT '{a.b.c.d.e,B.df}'::ltree[] ? '{A.b.c.d.e,*.df}';
|
|||||||
t
|
t
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
--exractors
|
--extractors
|
||||||
SELECT ('{3456,1.2.3.34}'::ltree[] ?@> '1.2.3.4') is null;
|
SELECT ('{3456,1.2.3.34}'::ltree[] ?@> '1.2.3.4') is null;
|
||||||
?column?
|
?column?
|
||||||
----------
|
----------
|
||||||
|
@ -197,7 +197,7 @@ pushval_asis(QPRS_STATE *state, int type, char *strval, int lenval, uint16 flag)
|
|||||||
|
|
||||||
#define STACKDEPTH 32
|
#define STACKDEPTH 32
|
||||||
/*
|
/*
|
||||||
* make polish notaion of query
|
* make polish notation of query
|
||||||
*/
|
*/
|
||||||
static int32
|
static int32
|
||||||
makepol(QPRS_STATE *state)
|
makepol(QPRS_STATE *state)
|
||||||
|
@ -204,7 +204,7 @@ SELECT 'a.b.c.d.e'::ltree ? '{A.b.c.d.e, a.*}';
|
|||||||
SELECT '{a.b.c.d.e,B.df}'::ltree[] ? '{A.b.c.d.e}';
|
SELECT '{a.b.c.d.e,B.df}'::ltree[] ? '{A.b.c.d.e}';
|
||||||
SELECT '{a.b.c.d.e,B.df}'::ltree[] ? '{A.b.c.d.e,*.df}';
|
SELECT '{a.b.c.d.e,B.df}'::ltree[] ? '{A.b.c.d.e,*.df}';
|
||||||
|
|
||||||
--exractors
|
--extractors
|
||||||
SELECT ('{3456,1.2.3.34}'::ltree[] ?@> '1.2.3.4') is null;
|
SELECT ('{3456,1.2.3.34}'::ltree[] ?@> '1.2.3.4') is null;
|
||||||
SELECT '{3456,1.2.3}'::ltree[] ?@> '1.2.3.4';
|
SELECT '{3456,1.2.3}'::ltree[] ?@> '1.2.3.4';
|
||||||
SELECT '{3456,1.2.3.4}'::ltree[] ?<@ '1.2.3';
|
SELECT '{3456,1.2.3.4}'::ltree[] ?<@ '1.2.3';
|
||||||
|
@ -779,7 +779,7 @@ main(int argc, char **argv)
|
|||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* Once we have restored this file successfully we can remove some
|
* Once we have restored this file successfully we can remove some
|
||||||
* prior WAL files. If this restore fails we musn't remove any
|
* prior WAL files. If this restore fails we mustn't remove any
|
||||||
* file because some of them will be requested again immediately
|
* file because some of them will be requested again immediately
|
||||||
* after the failed restore, or when we restart recovery.
|
* after the failed restore, or when we restart recovery.
|
||||||
*/
|
*/
|
||||||
|
@ -138,7 +138,7 @@ typedef struct Counters
|
|||||||
{
|
{
|
||||||
int64 calls; /* # of times executed */
|
int64 calls; /* # of times executed */
|
||||||
double total_time; /* total execution time, in msec */
|
double total_time; /* total execution time, in msec */
|
||||||
double min_time; /* minimim execution time in msec */
|
double min_time; /* minimum execution time in msec */
|
||||||
double max_time; /* maximum execution time in msec */
|
double max_time; /* maximum execution time in msec */
|
||||||
double mean_time; /* mean execution time in msec */
|
double mean_time; /* mean execution time in msec */
|
||||||
double sum_var_time; /* sum of variances in execution time in msec */
|
double sum_var_time; /* sum of variances in execution time in msec */
|
||||||
|
@ -413,7 +413,7 @@ comp_ptrgm(const void *v1, const void *v2)
|
|||||||
* ulen1: count of unique trigrams of array "trg1".
|
* ulen1: count of unique trigrams of array "trg1".
|
||||||
* len2: length of array "trg2" and array "trg2indexes".
|
* len2: length of array "trg2" and array "trg2indexes".
|
||||||
* len: length of the array "found".
|
* len: length of the array "found".
|
||||||
* check_only: if true then only check existaince of similar search pattern in
|
* check_only: if true then only check existence of similar search pattern in
|
||||||
* text.
|
* text.
|
||||||
*
|
*
|
||||||
* Returns word similarity.
|
* Returns word similarity.
|
||||||
@ -456,7 +456,7 @@ iterate_word_similarity(int *trg2indexes,
|
|||||||
lastpos[trgindex] = i;
|
lastpos[trgindex] = i;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Adjust lower bound if this trigram is present in required substing */
|
/* Adjust lower bound if this trigram is present in required substring */
|
||||||
if (found[trgindex])
|
if (found[trgindex])
|
||||||
{
|
{
|
||||||
int prev_lower,
|
int prev_lower,
|
||||||
@ -547,7 +547,7 @@ iterate_word_similarity(int *trg2indexes,
|
|||||||
*
|
*
|
||||||
* str1: search pattern string, of length slen1 bytes.
|
* str1: search pattern string, of length slen1 bytes.
|
||||||
* str2: text in which we are looking for a word, of length slen2 bytes.
|
* str2: text in which we are looking for a word, of length slen2 bytes.
|
||||||
* check_only: if true then only check existaince of similar search pattern in
|
* check_only: if true then only check existence of similar search pattern in
|
||||||
* text.
|
* text.
|
||||||
*
|
*
|
||||||
* Returns word similarity.
|
* Returns word similarity.
|
||||||
|
@ -311,7 +311,7 @@ pullf_read_max(PullFilter *pf, int len, uint8 **data_p, uint8 *tmpbuf)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* caller wants exatly len bytes and dont bother with references
|
* caller wants exactly len bytes and don't bother with references
|
||||||
*/
|
*/
|
||||||
int
|
int
|
||||||
pullf_read_fixed(PullFilter *src, int len, uint8 *dst)
|
pullf_read_fixed(PullFilter *src, int len, uint8 *dst)
|
||||||
|
@ -139,7 +139,7 @@ bn_to_mpi(mpz_t *bn)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Decide the number of bits in the random componont k
|
* Decide the number of bits in the random component k
|
||||||
*
|
*
|
||||||
* It should be in the same range as p for signing (which
|
* It should be in the same range as p for signing (which
|
||||||
* is deprecated), but can be much smaller for encrypting.
|
* is deprecated), but can be much smaller for encrypting.
|
||||||
@ -147,8 +147,8 @@ bn_to_mpi(mpz_t *bn)
|
|||||||
* Until I research it further, I just mimic gpg behaviour.
|
* Until I research it further, I just mimic gpg behaviour.
|
||||||
* It has a special mapping table, for values <= 5120,
|
* It has a special mapping table, for values <= 5120,
|
||||||
* above that it uses 'arbitrary high number'. Following
|
* above that it uses 'arbitrary high number'. Following
|
||||||
* algorihm hovers 10-70 bits above gpg values. And for
|
* algorithm hovers 10-70 bits above gpg values. And for
|
||||||
* larger p, it uses gpg's algorihm.
|
* larger p, it uses gpg's algorithm.
|
||||||
*
|
*
|
||||||
* The point is - if k gets large, encryption will be
|
* The point is - if k gets large, encryption will be
|
||||||
* really slow. It does not matter for decryption.
|
* really slow. It does not matter for decryption.
|
||||||
|
@ -74,7 +74,7 @@ bn_to_mpi(BIGNUM *bn)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Decide the number of bits in the random componont k
|
* Decide the number of bits in the random component k
|
||||||
*
|
*
|
||||||
* It should be in the same range as p for signing (which
|
* It should be in the same range as p for signing (which
|
||||||
* is deprecated), but can be much smaller for encrypting.
|
* is deprecated), but can be much smaller for encrypting.
|
||||||
@ -82,8 +82,8 @@ bn_to_mpi(BIGNUM *bn)
|
|||||||
* Until I research it further, I just mimic gpg behaviour.
|
* Until I research it further, I just mimic gpg behaviour.
|
||||||
* It has a special mapping table, for values <= 5120,
|
* It has a special mapping table, for values <= 5120,
|
||||||
* above that it uses 'arbitrary high number'. Following
|
* above that it uses 'arbitrary high number'. Following
|
||||||
* algorihm hovers 10-70 bits above gpg values. And for
|
* algorithm hovers 10-70 bits above gpg values. And for
|
||||||
* larger p, it uses gpg's algorihm.
|
* larger p, it uses gpg's algorithm.
|
||||||
*
|
*
|
||||||
* The point is - if k gets large, encryption will be
|
* The point is - if k gets large, encryption will be
|
||||||
* really slow. It does not matter for decryption.
|
* really slow. It does not matter for decryption.
|
||||||
|
@ -2053,7 +2053,7 @@ SELECT t1."C 1" FROM "S 1"."T 1" t1, LATERAL (SELECT DISTINCT t2.c1, t3.c1 FROM
|
|||||||
1
|
1
|
||||||
(10 rows)
|
(10 rows)
|
||||||
|
|
||||||
-- non-Var items in targelist of the nullable rel of a join preventing
|
-- non-Var items in targetlist of the nullable rel of a join preventing
|
||||||
-- push-down in some cases
|
-- push-down in some cases
|
||||||
-- unable to push {ft1, ft2}
|
-- unable to push {ft1, ft2}
|
||||||
EXPLAIN (VERBOSE, COSTS OFF)
|
EXPLAIN (VERBOSE, COSTS OFF)
|
||||||
|
@ -485,7 +485,7 @@ EXPLAIN (VERBOSE, COSTS OFF)
|
|||||||
SELECT t1."C 1" FROM "S 1"."T 1" t1, LATERAL (SELECT DISTINCT t2.c1, t3.c1 FROM ft1 t2, ft2 t3 WHERE t2.c1 = t3.c1 AND t2.c2 = t1.c2) q ORDER BY t1."C 1" OFFSET 10 LIMIT 10;
|
SELECT t1."C 1" FROM "S 1"."T 1" t1, LATERAL (SELECT DISTINCT t2.c1, t3.c1 FROM ft1 t2, ft2 t3 WHERE t2.c1 = t3.c1 AND t2.c2 = t1.c2) q ORDER BY t1."C 1" OFFSET 10 LIMIT 10;
|
||||||
SELECT t1."C 1" FROM "S 1"."T 1" t1, LATERAL (SELECT DISTINCT t2.c1, t3.c1 FROM ft1 t2, ft2 t3 WHERE t2.c1 = t3.c1 AND t2.c2 = t1.c2) q ORDER BY t1."C 1" OFFSET 10 LIMIT 10;
|
SELECT t1."C 1" FROM "S 1"."T 1" t1, LATERAL (SELECT DISTINCT t2.c1, t3.c1 FROM ft1 t2, ft2 t3 WHERE t2.c1 = t3.c1 AND t2.c2 = t1.c2) q ORDER BY t1."C 1" OFFSET 10 LIMIT 10;
|
||||||
|
|
||||||
-- non-Var items in targelist of the nullable rel of a join preventing
|
-- non-Var items in targetlist of the nullable rel of a join preventing
|
||||||
-- push-down in some cases
|
-- push-down in some cases
|
||||||
-- unable to push {ft1, ft2}
|
-- unable to push {ft1, ft2}
|
||||||
EXPLAIN (VERBOSE, COSTS OFF)
|
EXPLAIN (VERBOSE, COSTS OFF)
|
||||||
|
@ -888,7 +888,7 @@ restore(char *result, float val, int n)
|
|||||||
if (Abs(exp) <= 4)
|
if (Abs(exp) <= 4)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* remove the decimal point from the mantyssa and write the digits
|
* remove the decimal point from the mantissa and write the digits
|
||||||
* to the buf array
|
* to the buf array
|
||||||
*/
|
*/
|
||||||
for (p = result + sign, i = 10, dp = 0; *p != 'e'; p++, i++)
|
for (p = result + sign, i = 10, dp = 0; *p != 'e'; p++, i++)
|
||||||
|
@ -23,7 +23,7 @@
|
|||||||
* When we ask SELinux whether the required privileges are allowed or not,
|
* When we ask SELinux whether the required privileges are allowed or not,
|
||||||
* we use security_compute_av(3). It needs us to represent object classes
|
* we use security_compute_av(3). It needs us to represent object classes
|
||||||
* and access vectors using 'external' codes defined in the security policy.
|
* and access vectors using 'external' codes defined in the security policy.
|
||||||
* It is determinded in the runtime, not build time. So, it needs an internal
|
* It is determined in the runtime, not build time. So, it needs an internal
|
||||||
* service to translate object class/access vectors which we want to check
|
* service to translate object class/access vectors which we want to check
|
||||||
* into the code which kernel want to be given.
|
* into the code which kernel want to be given.
|
||||||
*/
|
*/
|
||||||
|
@ -206,7 +206,7 @@ SELECT * FROM auth_tbl; -- failed
|
|||||||
SELECT sepgsql_setcon(NULL); -- end of session
|
SELECT sepgsql_setcon(NULL); -- end of session
|
||||||
SELECT sepgsql_getcon();
|
SELECT sepgsql_getcon();
|
||||||
|
|
||||||
-- the pooler cannot touch these tables directry
|
-- the pooler cannot touch these tables directly
|
||||||
SELECT * FROM foo_tbl; -- failed
|
SELECT * FROM foo_tbl; -- failed
|
||||||
|
|
||||||
SELECT * FROM var_tbl; -- failed
|
SELECT * FROM var_tbl; -- failed
|
||||||
|
@ -89,7 +89,7 @@ check_primary_key(PG_FUNCTION_ARGS)
|
|||||||
/* internal error */
|
/* internal error */
|
||||||
elog(ERROR, "check_primary_key: cannot process DELETE events");
|
elog(ERROR, "check_primary_key: cannot process DELETE events");
|
||||||
|
|
||||||
/* If UPDATion the must check new Tuple, not old one */
|
/* If UPDATE, then must check new Tuple, not old one */
|
||||||
else
|
else
|
||||||
tuple = trigdata->tg_newtuple;
|
tuple = trigdata->tg_newtuple;
|
||||||
|
|
||||||
|
@ -29,7 +29,7 @@
|
|||||||
# modified by Ray Aspeitia 12-03-2003 :
|
# modified by Ray Aspeitia 12-03-2003 :
|
||||||
# added log rotation script to db startup
|
# added log rotation script to db startup
|
||||||
# modified StartupParameters.plist "Provides" parameter to make it easier to
|
# modified StartupParameters.plist "Provides" parameter to make it easier to
|
||||||
# start and stop with the SystemStarter utitlity
|
# start and stop with the SystemStarter utility
|
||||||
|
|
||||||
# use the below command in order to correctly start/stop/restart PG with log rotation script:
|
# use the below command in order to correctly start/stop/restart PG with log rotation script:
|
||||||
# SystemStarter [start|stop|restart] PostgreSQL
|
# SystemStarter [start|stop|restart] PostgreSQL
|
||||||
|
@ -414,7 +414,7 @@ CREATE FUNCTION stat(text,text)
|
|||||||
LANGUAGE INTERNAL
|
LANGUAGE INTERNAL
|
||||||
RETURNS NULL ON NULL INPUT;
|
RETURNS NULL ON NULL INPUT;
|
||||||
|
|
||||||
--reset - just for debuging
|
--reset - just for debugging
|
||||||
CREATE FUNCTION reset_tsearch()
|
CREATE FUNCTION reset_tsearch()
|
||||||
RETURNS void
|
RETURNS void
|
||||||
as 'MODULE_PATHNAME', 'tsa_reset_tsearch'
|
as 'MODULE_PATHNAME', 'tsa_reset_tsearch'
|
||||||
|
@ -610,7 +610,7 @@ xpath_table(PG_FUNCTION_ARGS)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* At the moment we assume that the returned attributes make sense for the
|
* At the moment we assume that the returned attributes make sense for the
|
||||||
* XPath specififed (i.e. we trust the caller). It's not fatal if they get
|
* XPath specified (i.e. we trust the caller). It's not fatal if they get
|
||||||
* it wrong - the input function for the column type will raise an error
|
* it wrong - the input function for the column type will raise an error
|
||||||
* if the path result can't be converted into the correct binary
|
* if the path result can't be converted into the correct binary
|
||||||
* representation.
|
* representation.
|
||||||
|
@ -401,7 +401,7 @@ $(shlib): $(OBJS) $(DLL_DEFFILE) | $(SHLIB_PREREQS)
|
|||||||
$(CC) $(CFLAGS) -shared -static-libgcc -o $@ $(OBJS) $(DLL_DEFFILE) $(LDFLAGS) $(LDFLAGS_SL) $(SHLIB_LINK) $(LIBS) -Wl,--out-implib=$(stlib)
|
$(CC) $(CFLAGS) -shared -static-libgcc -o $@ $(OBJS) $(DLL_DEFFILE) $(LDFLAGS) $(LDFLAGS_SL) $(SHLIB_LINK) $(LIBS) -Wl,--out-implib=$(stlib)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
endif # PORTNAME == cgywin
|
endif # PORTNAME == cygwin
|
||||||
endif # PORTNAME == cygwin || PORTNAME == win32
|
endif # PORTNAME == cygwin || PORTNAME == win32
|
||||||
|
|
||||||
|
|
||||||
|
@ -28,7 +28,7 @@ The current implementation of GiST supports:
|
|||||||
|
|
||||||
The support for concurrency implemented in PostgreSQL was developed based on
|
The support for concurrency implemented in PostgreSQL was developed based on
|
||||||
the paper "Access Methods for Next-Generation Database Systems" by
|
the paper "Access Methods for Next-Generation Database Systems" by
|
||||||
Marcel Kornaker:
|
Marcel Kornacker:
|
||||||
|
|
||||||
http://www.sai.msu.su/~megera/postgres/gist/papers/concurrency/access-methods-for-next-generation.pdf.gz
|
http://www.sai.msu.su/~megera/postgres/gist/papers/concurrency/access-methods-for-next-generation.pdf.gz
|
||||||
|
|
||||||
|
@ -209,7 +209,7 @@ typedef struct RewriteMappingFile
|
|||||||
} RewriteMappingFile;
|
} RewriteMappingFile;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* A single In-Memeory logical rewrite mapping, hanging of
|
* A single In-Memory logical rewrite mapping, hanging off
|
||||||
* RewriteMappingFile->mappings.
|
* RewriteMappingFile->mappings.
|
||||||
*/
|
*/
|
||||||
typedef struct RewriteMappingDataEntry
|
typedef struct RewriteMappingDataEntry
|
||||||
|
@ -614,7 +614,7 @@ CommitTsParameterChange(bool newvalue, bool oldvalue)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Activate this module whenever necessary.
|
* Activate this module whenever necessary.
|
||||||
* This must happen during postmaster or standalong-backend startup,
|
* This must happen during postmaster or standalone-backend startup,
|
||||||
* or during WAL replay anytime the track_commit_timestamp setting is
|
* or during WAL replay anytime the track_commit_timestamp setting is
|
||||||
* changed in the master.
|
* changed in the master.
|
||||||
*
|
*
|
||||||
|
@ -2746,7 +2746,7 @@ CommitTransactionCommand(void)
|
|||||||
* These shouldn't happen. TBLOCK_DEFAULT means the previous
|
* These shouldn't happen. TBLOCK_DEFAULT means the previous
|
||||||
* StartTransactionCommand didn't set the STARTED state
|
* StartTransactionCommand didn't set the STARTED state
|
||||||
* appropriately, while TBLOCK_PARALLEL_INPROGRESS should be ended
|
* appropriately, while TBLOCK_PARALLEL_INPROGRESS should be ended
|
||||||
* by EndParallelWorkerTranaction(), not this function.
|
* by EndParallelWorkerTransaction(), not this function.
|
||||||
*/
|
*/
|
||||||
case TBLOCK_DEFAULT:
|
case TBLOCK_DEFAULT:
|
||||||
case TBLOCK_PARALLEL_INPROGRESS:
|
case TBLOCK_PARALLEL_INPROGRESS:
|
||||||
|
@ -727,7 +727,7 @@ static void getRelationIdentity(StringInfo buffer, Oid relid, List **objname);
|
|||||||
*
|
*
|
||||||
* Note: If the object is not found, we don't give any indication of the
|
* Note: If the object is not found, we don't give any indication of the
|
||||||
* reason. (It might have been a missing schema if the name was qualified, or
|
* reason. (It might have been a missing schema if the name was qualified, or
|
||||||
* an inexistant type name in case of a cast, function or operator; etc).
|
* a nonexistent type name in case of a cast, function or operator; etc).
|
||||||
* Currently there is only one caller that might be interested in such info, so
|
* Currently there is only one caller that might be interested in such info, so
|
||||||
* we don't spend much effort here. If more callers start to care, it might be
|
* we don't spend much effort here. If more callers start to care, it might be
|
||||||
* better to add some support for that in this function.
|
* better to add some support for that in this function.
|
||||||
|
@ -34,7 +34,7 @@ static const char *get_am_type_string(char amtype);
|
|||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* CreateAcessMethod
|
* CreateAccessMethod
|
||||||
* Registers a new access method.
|
* Registers a new access method.
|
||||||
*/
|
*/
|
||||||
ObjectAddress
|
ObjectAddress
|
||||||
|
@ -674,7 +674,7 @@ createdb(const CreatedbStmt *stmt)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Force synchronous commit, thus minimizing the window between
|
* Force synchronous commit, thus minimizing the window between
|
||||||
* creation of the database files and commital of the transaction. If
|
* creation of the database files and committal of the transaction. If
|
||||||
* we crash before committing, we'll have a DB that's taking up disk
|
* we crash before committing, we'll have a DB that's taking up disk
|
||||||
* space but is not in pg_database, which is not good.
|
* space but is not in pg_database, which is not good.
|
||||||
*/
|
*/
|
||||||
@ -928,7 +928,7 @@ dropdb(const char *dbname, bool missing_ok)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Force synchronous commit, thus minimizing the window between removal of
|
* Force synchronous commit, thus minimizing the window between removal of
|
||||||
* the database files and commital of the transaction. If we crash before
|
* the database files and committal of the transaction. If we crash before
|
||||||
* committing, we'll have a DB that's gone on disk but still there
|
* committing, we'll have a DB that's gone on disk but still there
|
||||||
* according to pg_database, which is not good.
|
* according to pg_database, which is not good.
|
||||||
*/
|
*/
|
||||||
@ -1286,7 +1286,7 @@ movedb(const char *dbname, const char *tblspcname)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Force synchronous commit, thus minimizing the window between
|
* Force synchronous commit, thus minimizing the window between
|
||||||
* copying the database files and commital of the transaction. If we
|
* copying the database files and committal of the transaction. If we
|
||||||
* crash before committing, we'll leave an orphaned set of files on
|
* crash before committing, we'll leave an orphaned set of files on
|
||||||
* disk, which is not fatal but not good either.
|
* disk, which is not fatal but not good either.
|
||||||
*/
|
*/
|
||||||
|
@ -3377,7 +3377,7 @@ ExplainYAMLLineStarting(ExplainState *es)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* YAML is a superset of JSON; unfortuantely, the YAML quoting rules are
|
* YAML is a superset of JSON; unfortunately, the YAML quoting rules are
|
||||||
* ridiculously complicated -- as documented in sections 5.3 and 7.3.3 of
|
* ridiculously complicated -- as documented in sections 5.3 and 7.3.3 of
|
||||||
* http://yaml.org/spec/1.2/spec.html -- so we chose to just quote everything.
|
* http://yaml.org/spec/1.2/spec.html -- so we chose to just quote everything.
|
||||||
* Empty strings, strings with leading or trailing whitespace, and strings
|
* Empty strings, strings with leading or trailing whitespace, and strings
|
||||||
|
@ -1041,7 +1041,7 @@ CreateFunction(CreateFunctionStmt *stmt, const char *queryString)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
/* store SQL NULL instead of emtpy array */
|
/* store SQL NULL instead of empty array */
|
||||||
trftypes = NULL;
|
trftypes = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1446,7 +1446,7 @@ CreateCast(CreateCastStmt *stmt)
|
|||||||
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
|
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
|
||||||
errmsg("cast will be ignored because the target data type is a domain")));
|
errmsg("cast will be ignored because the target data type is a domain")));
|
||||||
|
|
||||||
/* Detemine the cast method */
|
/* Determine the cast method */
|
||||||
if (stmt->func != NULL)
|
if (stmt->func != NULL)
|
||||||
castmethod = COERCION_METHOD_FUNCTION;
|
castmethod = COERCION_METHOD_FUNCTION;
|
||||||
else if (stmt->inout)
|
else if (stmt->inout)
|
||||||
|
@ -100,7 +100,7 @@ static void RangeVarCallbackForReindexIndex(const RangeVar *relation,
|
|||||||
* Errors arising from the attribute list still apply.
|
* Errors arising from the attribute list still apply.
|
||||||
*
|
*
|
||||||
* Most column type changes that can skip a table rewrite do not invalidate
|
* Most column type changes that can skip a table rewrite do not invalidate
|
||||||
* indexes. We ackowledge this when all operator classes, collations and
|
* indexes. We acknowledge this when all operator classes, collations and
|
||||||
* exclusion operators match. Though we could further permit intra-opfamily
|
* exclusion operators match. Though we could further permit intra-opfamily
|
||||||
* changes for btree and hash indexes, that adds subtle complexity with no
|
* changes for btree and hash indexes, that adds subtle complexity with no
|
||||||
* concrete benefit for core types.
|
* concrete benefit for core types.
|
||||||
@ -961,7 +961,7 @@ CheckMutability(Expr *expr)
|
|||||||
* indxpath.c could do something with. However, that seems overly
|
* indxpath.c could do something with. However, that seems overly
|
||||||
* restrictive. One useful application of partial indexes is to apply
|
* restrictive. One useful application of partial indexes is to apply
|
||||||
* a UNIQUE constraint across a subset of a table, and in that scenario
|
* a UNIQUE constraint across a subset of a table, and in that scenario
|
||||||
* any evaluatable predicate will work. So accept any predicate here
|
* any evaluable predicate will work. So accept any predicate here
|
||||||
* (except ones requiring a plan), and let indxpath.c fend for itself.
|
* (except ones requiring a plan), and let indxpath.c fend for itself.
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
|
@ -6194,7 +6194,7 @@ ATAddCheckConstraint(List **wqueue, AlteredTableInfo *tab, Relation rel,
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Check if ONLY was specified with ALTER TABLE. If so, allow the
|
* Check if ONLY was specified with ALTER TABLE. If so, allow the
|
||||||
* contraint creation only if there are no children currently. Error out
|
* constraint creation only if there are no children currently. Error out
|
||||||
* otherwise.
|
* otherwise.
|
||||||
*/
|
*/
|
||||||
if (!recurse && children != NIL)
|
if (!recurse && children != NIL)
|
||||||
|
@ -502,7 +502,7 @@ ExecParallelRetrieveInstrumentation(PlanState *planstate,
|
|||||||
int plan_node_id = planstate->plan->plan_node_id;
|
int plan_node_id = planstate->plan->plan_node_id;
|
||||||
MemoryContext oldcontext;
|
MemoryContext oldcontext;
|
||||||
|
|
||||||
/* Find the instumentation for this node. */
|
/* Find the instrumentation for this node. */
|
||||||
for (i = 0; i < instrumentation->num_plan_nodes; ++i)
|
for (i = 0; i < instrumentation->num_plan_nodes; ++i)
|
||||||
if (instrumentation->plan_node_id[i] == plan_node_id)
|
if (instrumentation->plan_node_id[i] == plan_node_id)
|
||||||
break;
|
break;
|
||||||
|
@ -354,7 +354,7 @@ advance_windowaggregate(WindowAggState *winstate,
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* We must track the number of rows included in transValue, since to
|
* We must track the number of rows included in transValue, since to
|
||||||
* remove the last input, advance_windowaggregate_base() musn't call the
|
* remove the last input, advance_windowaggregate_base() mustn't call the
|
||||||
* inverse transition function, but simply reset transValue back to its
|
* inverse transition function, but simply reset transValue back to its
|
||||||
* initial value.
|
* initial value.
|
||||||
*/
|
*/
|
||||||
|
@ -87,7 +87,7 @@ static MemoryContext parsed_hba_context = NULL;
|
|||||||
*
|
*
|
||||||
* NOTE: the IdentLine structs can contain pre-compiled regular expressions
|
* NOTE: the IdentLine structs can contain pre-compiled regular expressions
|
||||||
* that live outside the memory context. Before destroying or resetting the
|
* that live outside the memory context. Before destroying or resetting the
|
||||||
* memory context, they need to be expliticly free'd.
|
* memory context, they need to be explicitly free'd.
|
||||||
*/
|
*/
|
||||||
static List *parsed_ident_lines = NIL;
|
static List *parsed_ident_lines = NIL;
|
||||||
static MemoryContext parsed_ident_context = NULL;
|
static MemoryContext parsed_ident_context = NULL;
|
||||||
|
@ -111,7 +111,7 @@ gimme_edge_table(PlannerInfo *root, Gene *tour1, Gene *tour2,
|
|||||||
for (index1 = 0; index1 < num_gene; index1++)
|
for (index1 = 0; index1 < num_gene; index1++)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* presume the tour is circular, i.e. 1->2, 2->3, 3->1 this operaton
|
* presume the tour is circular, i.e. 1->2, 2->3, 3->1 this operation
|
||||||
* maps n back to 1
|
* maps n back to 1
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -314,7 +314,7 @@ gimme_gene(PlannerInfo *root, Edge edge, Edge *edge_table)
|
|||||||
/*
|
/*
|
||||||
* give priority to candidates with fewest remaining unused edges;
|
* give priority to candidates with fewest remaining unused edges;
|
||||||
* find out what the minimum number of unused edges is
|
* find out what the minimum number of unused edges is
|
||||||
* (minimum_edges); if there is more than one cadidate with the
|
* (minimum_edges); if there is more than one candidate with the
|
||||||
* minimum number of unused edges keep count of this number
|
* minimum number of unused edges keep count of this number
|
||||||
* (minimum_count);
|
* (minimum_count);
|
||||||
*/
|
*/
|
||||||
|
@ -1579,7 +1579,7 @@ select_mergejoin_clauses(PlannerInfo *root,
|
|||||||
/*
|
/*
|
||||||
* Insist that each side have a non-redundant eclass. This
|
* Insist that each side have a non-redundant eclass. This
|
||||||
* restriction is needed because various bits of the planner expect
|
* restriction is needed because various bits of the planner expect
|
||||||
* that each clause in a merge be associatable with some pathkey in a
|
* that each clause in a merge be associable with some pathkey in a
|
||||||
* canonical pathkey list, but redundant eclasses can't appear in
|
* canonical pathkey list, but redundant eclasses can't appear in
|
||||||
* canonical sort orderings. (XXX it might be worth relaxing this,
|
* canonical sort orderings. (XXX it might be worth relaxing this,
|
||||||
* but not enough time to address it for 8.3.)
|
* but not enough time to address it for 8.3.)
|
||||||
|
@ -196,7 +196,7 @@ query_planner(PlannerInfo *root, List *tlist,
|
|||||||
/*
|
/*
|
||||||
* Now distribute "placeholders" to base rels as needed. This has to be
|
* Now distribute "placeholders" to base rels as needed. This has to be
|
||||||
* done after join removal because removal could change whether a
|
* done after join removal because removal could change whether a
|
||||||
* placeholder is evaluatable at a base rel.
|
* placeholder is evaluable at a base rel.
|
||||||
*/
|
*/
|
||||||
add_placeholders_to_base_rels(root);
|
add_placeholders_to_base_rels(root);
|
||||||
|
|
||||||
|
@ -24,7 +24,7 @@
|
|||||||
* Detect whether there is a joinclause that involves
|
* Detect whether there is a joinclause that involves
|
||||||
* the two given relations.
|
* the two given relations.
|
||||||
*
|
*
|
||||||
* Note: the joinclause does not have to be evaluatable with only these two
|
* Note: the joinclause does not have to be evaluable with only these two
|
||||||
* relations. This is intentional. For example consider
|
* relations. This is intentional. For example consider
|
||||||
* SELECT * FROM a, b, c WHERE a.x = (b.y + c.z)
|
* SELECT * FROM a, b, c WHERE a.x = (b.y + c.z)
|
||||||
* If a is much larger than the other tables, it may be worthwhile to
|
* If a is much larger than the other tables, it may be worthwhile to
|
||||||
|
@ -550,7 +550,7 @@ join_clause_is_movable_into(RestrictInfo *rinfo,
|
|||||||
Relids currentrelids,
|
Relids currentrelids,
|
||||||
Relids current_and_outer)
|
Relids current_and_outer)
|
||||||
{
|
{
|
||||||
/* Clause must be evaluatable given available context */
|
/* Clause must be evaluable given available context */
|
||||||
if (!bms_is_subset(rinfo->clause_relids, current_and_outer))
|
if (!bms_is_subset(rinfo->clause_relids, current_and_outer))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
@ -10682,7 +10682,7 @@ table_ref: relation_expr opt_alias_clause
|
|||||||
n->lateral = true;
|
n->lateral = true;
|
||||||
n->subquery = $2;
|
n->subquery = $2;
|
||||||
n->alias = $3;
|
n->alias = $3;
|
||||||
/* same coment as above */
|
/* same comment as above */
|
||||||
if ($3 == NULL)
|
if ($3 == NULL)
|
||||||
{
|
{
|
||||||
if (IsA($2, SelectStmt) &&
|
if (IsA($2, SelectStmt) &&
|
||||||
|
@ -209,7 +209,7 @@ BackgroundWriterMain(void)
|
|||||||
/* Flush any leaked data in the top-level context */
|
/* Flush any leaked data in the top-level context */
|
||||||
MemoryContextResetAndDeleteChildren(bgwriter_context);
|
MemoryContextResetAndDeleteChildren(bgwriter_context);
|
||||||
|
|
||||||
/* re-initilialize to avoid repeated errors causing problems */
|
/* re-initialize to avoid repeated errors causing problems */
|
||||||
WritebackContextInit(&wb_context, &bgwriter_flush_after);
|
WritebackContextInit(&wb_context, &bgwriter_flush_after);
|
||||||
|
|
||||||
/* Now we can allow interrupts again */
|
/* Now we can allow interrupts again */
|
||||||
|
@ -5130,7 +5130,7 @@ PostmasterRandom(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Count up number of child processes of specified types (dead_end chidren
|
* Count up number of child processes of specified types (dead_end children
|
||||||
* are always excluded).
|
* are always excluded).
|
||||||
*/
|
*/
|
||||||
static int
|
static int
|
||||||
|
@ -1257,7 +1257,7 @@ pg_replication_origin_session_is_setup(PG_FUNCTION_ARGS)
|
|||||||
* Return the replication progress for origin setup in the current session.
|
* Return the replication progress for origin setup in the current session.
|
||||||
*
|
*
|
||||||
* If 'flush' is set to true it is ensured that the returned value corresponds
|
* If 'flush' is set to true it is ensured that the returned value corresponds
|
||||||
* to a local transaction that has been flushed. this is useful if asychronous
|
* to a local transaction that has been flushed. this is useful if asynchronous
|
||||||
* commits are used when replaying replicated transactions.
|
* commits are used when replaying replicated transactions.
|
||||||
*/
|
*/
|
||||||
Datum
|
Datum
|
||||||
@ -1343,7 +1343,7 @@ pg_replication_origin_advance(PG_FUNCTION_ARGS)
|
|||||||
* Return the replication progress for an individual replication origin.
|
* Return the replication progress for an individual replication origin.
|
||||||
*
|
*
|
||||||
* If 'flush' is set to true it is ensured that the returned value corresponds
|
* If 'flush' is set to true it is ensured that the returned value corresponds
|
||||||
* to a local transaction that has been flushed. this is useful if asychronous
|
* to a local transaction that has been flushed. this is useful if asynchronous
|
||||||
* commits are used when replaying replicated transactions.
|
* commits are used when replaying replicated transactions.
|
||||||
*/
|
*/
|
||||||
Datum
|
Datum
|
||||||
|
@ -1716,7 +1716,7 @@ ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid,
|
|||||||
*
|
*
|
||||||
* NB: Transactions handled here have to have actively aborted (i.e. have
|
* NB: Transactions handled here have to have actively aborted (i.e. have
|
||||||
* produced an abort record). Implicitly aborted transactions are handled via
|
* produced an abort record). Implicitly aborted transactions are handled via
|
||||||
* ReorderBufferAbortOld(); transactions we're just not interesteded in, but
|
* ReorderBufferAbortOld(); transactions we're just not interested in, but
|
||||||
* which have committed are handled in ReorderBufferForget().
|
* which have committed are handled in ReorderBufferForget().
|
||||||
*
|
*
|
||||||
* This function purges this transaction and its contents from memory and
|
* This function purges this transaction and its contents from memory and
|
||||||
@ -1784,7 +1784,7 @@ ReorderBufferAbortOld(ReorderBuffer *rb, TransactionId oldestRunningXid)
|
|||||||
* toplevel xid.
|
* toplevel xid.
|
||||||
*
|
*
|
||||||
* This is significantly different to ReorderBufferAbort() because
|
* This is significantly different to ReorderBufferAbort() because
|
||||||
* transactions that have committed need to be treated differenly from aborted
|
* transactions that have committed need to be treated differently from aborted
|
||||||
* ones since they may have modified the catalog.
|
* ones since they may have modified the catalog.
|
||||||
*
|
*
|
||||||
* Note that this is only allowed to be called in the moment a transaction
|
* Note that this is only allowed to be called in the moment a transaction
|
||||||
@ -2662,7 +2662,7 @@ StartupReorderBuffer(void)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* ok, has to be a surviving logical slot, iterate and delete
|
* ok, has to be a surviving logical slot, iterate and delete
|
||||||
* everythign starting with xid-*
|
* everything starting with xid-*
|
||||||
*/
|
*/
|
||||||
sprintf(path, "pg_replslot/%s", logical_de->d_name);
|
sprintf(path, "pg_replslot/%s", logical_de->d_name);
|
||||||
|
|
||||||
|
@ -614,7 +614,7 @@ SnapBuildGetOrBuildSnapshot(SnapBuild *builder, TransactionId xid)
|
|||||||
if (builder->snapshot == NULL)
|
if (builder->snapshot == NULL)
|
||||||
{
|
{
|
||||||
builder->snapshot = SnapBuildBuildSnapshot(builder, xid);
|
builder->snapshot = SnapBuildBuildSnapshot(builder, xid);
|
||||||
/* inrease refcount for the snapshot builder */
|
/* increase refcount for the snapshot builder */
|
||||||
SnapBuildSnapIncRefcount(builder->snapshot);
|
SnapBuildSnapIncRefcount(builder->snapshot);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -678,7 +678,7 @@ SnapBuildProcessChange(SnapBuild *builder, TransactionId xid, XLogRecPtr lsn)
|
|||||||
if (builder->snapshot == NULL)
|
if (builder->snapshot == NULL)
|
||||||
{
|
{
|
||||||
builder->snapshot = SnapBuildBuildSnapshot(builder, xid);
|
builder->snapshot = SnapBuildBuildSnapshot(builder, xid);
|
||||||
/* inrease refcount for the snapshot builder */
|
/* increase refcount for the snapshot builder */
|
||||||
SnapBuildSnapIncRefcount(builder->snapshot);
|
SnapBuildSnapIncRefcount(builder->snapshot);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -911,7 +911,7 @@ SnapBuildEndTxn(SnapBuild *builder, XLogRecPtr lsn, TransactionId xid)
|
|||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* None of the originally running transaction is running anymore,
|
* None of the originally running transaction is running anymore,
|
||||||
* so our incrementaly built snapshot now is consistent.
|
* so our incrementally built snapshot now is consistent.
|
||||||
*/
|
*/
|
||||||
ereport(LOG,
|
ereport(LOG,
|
||||||
(errmsg("logical decoding found consistent point at %X/%X",
|
(errmsg("logical decoding found consistent point at %X/%X",
|
||||||
|
@ -857,7 +857,7 @@ WaitEventAdjustWin32(WaitEventSet *set, WaitEvent *event)
|
|||||||
* reached. At most nevents occurred events are returned.
|
* reached. At most nevents occurred events are returned.
|
||||||
*
|
*
|
||||||
* If timeout = -1, block until an event occurs; if 0, check sockets for
|
* If timeout = -1, block until an event occurs; if 0, check sockets for
|
||||||
* readiness, but don't block; if > 0, block for at most timeout miliseconds.
|
* readiness, but don't block; if > 0, block for at most timeout milliseconds.
|
||||||
*
|
*
|
||||||
* Returns the number of events occurred, or 0 if the timeout was reached.
|
* Returns the number of events occurred, or 0 if the timeout was reached.
|
||||||
*
|
*
|
||||||
|
@ -500,7 +500,7 @@ shm_mq_sendv(shm_mq_handle *mqh, shm_mq_iovec *iov, int iovcnt, bool nowait)
|
|||||||
* it will point to a temporary buffer. This mostly avoids data copying in
|
* it will point to a temporary buffer. This mostly avoids data copying in
|
||||||
* the hoped-for case where messages are short compared to the buffer size,
|
* the hoped-for case where messages are short compared to the buffer size,
|
||||||
* while still allowing longer messages. In either case, the return value
|
* while still allowing longer messages. In either case, the return value
|
||||||
* remains valid until the next receive operation is perfomed on the queue.
|
* remains valid until the next receive operation is performed on the queue.
|
||||||
*
|
*
|
||||||
* When nowait = false, we'll wait on our process latch when the ring buffer
|
* When nowait = false, we'll wait on our process latch when the ring buffer
|
||||||
* is empty and we have not yet received a full message. The sender will
|
* is empty and we have not yet received a full message. The sender will
|
||||||
|
@ -2781,7 +2781,7 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode)
|
|||||||
vxids = (VirtualTransactionId *)
|
vxids = (VirtualTransactionId *)
|
||||||
palloc0(sizeof(VirtualTransactionId) * (MaxBackends + 1));
|
palloc0(sizeof(VirtualTransactionId) * (MaxBackends + 1));
|
||||||
|
|
||||||
/* Compute hash code and partiton lock, and look up conflicting modes. */
|
/* Compute hash code and partition lock, and look up conflicting modes. */
|
||||||
hashcode = LockTagHashCode(locktag);
|
hashcode = LockTagHashCode(locktag);
|
||||||
partitionLock = LockHashPartitionLock(hashcode);
|
partitionLock = LockHashPartitionLock(hashcode);
|
||||||
conflictMask = lockMethodTable->conflictTab[lockmode];
|
conflictMask = lockMethodTable->conflictTab[lockmode];
|
||||||
|
@ -834,7 +834,7 @@ LWLockAttemptLock(LWLock *lock, LWLockMode mode)
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
return true; /* someobdy else has the lock */
|
return true; /* somebody else has the lock */
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pg_unreachable();
|
pg_unreachable();
|
||||||
@ -1006,7 +1006,7 @@ LWLockWakeup(LWLock *lock)
|
|||||||
* that happens before the list unlink happens, the list would end up
|
* that happens before the list unlink happens, the list would end up
|
||||||
* being corrupted.
|
* being corrupted.
|
||||||
*
|
*
|
||||||
* The barrier pairs with the LWLockWaitListLock() when enqueing for
|
* The barrier pairs with the LWLockWaitListLock() when enqueuing for
|
||||||
* another lock.
|
* another lock.
|
||||||
*/
|
*/
|
||||||
pg_write_barrier();
|
pg_write_barrier();
|
||||||
@ -1082,7 +1082,7 @@ LWLockDequeueSelf(LWLock *lock)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Can't just remove ourselves from the list, but we need to iterate over
|
* Can't just remove ourselves from the list, but we need to iterate over
|
||||||
* all entries as somebody else could have unqueued us.
|
* all entries as somebody else could have dequeued us.
|
||||||
*/
|
*/
|
||||||
dlist_foreach_modify(iter, &lock->waiters)
|
dlist_foreach_modify(iter, &lock->waiters)
|
||||||
{
|
{
|
||||||
|
@ -3204,7 +3204,7 @@ ReleasePredicateLocks(bool isCommit)
|
|||||||
/*
|
/*
|
||||||
* We can't trust XactReadOnly here, because a transaction which started
|
* We can't trust XactReadOnly here, because a transaction which started
|
||||||
* as READ WRITE can show as READ ONLY later, e.g., within
|
* as READ WRITE can show as READ ONLY later, e.g., within
|
||||||
* substransactions. We want to flag a transaction as READ ONLY if it
|
* subtransactions. We want to flag a transaction as READ ONLY if it
|
||||||
* commits without writing so that de facto READ ONLY transactions get the
|
* commits without writing so that de facto READ ONLY transactions get the
|
||||||
* benefit of some RO optimizations, so we will use this local variable to
|
* benefit of some RO optimizations, so we will use this local variable to
|
||||||
* get some cleanup logic right which is based on whether the transaction
|
* get some cleanup logic right which is based on whether the transaction
|
||||||
|
@ -37,7 +37,7 @@
|
|||||||
* Spell field. The AffixData field is initialized if AF parameter is not
|
* Spell field. The AffixData field is initialized if AF parameter is not
|
||||||
* defined.
|
* defined.
|
||||||
* - NISortAffixes():
|
* - NISortAffixes():
|
||||||
* - builds a list of compond affixes from the affix list and stores it
|
* - builds a list of compound affixes from the affix list and stores it
|
||||||
* in the CompoundAffix.
|
* in the CompoundAffix.
|
||||||
* - builds prefix trees (Trie) from the affix list for prefixes and suffixes
|
* - builds prefix trees (Trie) from the affix list for prefixes and suffixes
|
||||||
* and stores them in Suffix and Prefix fields.
|
* and stores them in Suffix and Prefix fields.
|
||||||
|
@ -179,7 +179,7 @@ LexizeExec(LexizeData *ld, ParsedLex **correspondLexem)
|
|||||||
if (ld->curDictId == InvalidOid)
|
if (ld->curDictId == InvalidOid)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* usial mode: dictionary wants only one word, but we should keep in
|
* usual mode: dictionary wants only one word, but we should keep in
|
||||||
* mind that we should go through all stack
|
* mind that we should go through all stack
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -272,7 +272,7 @@ LexizeExec(LexizeData *ld, ParsedLex **correspondLexem)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* We should be sure that current type of lexeme is recognized
|
* We should be sure that current type of lexeme is recognized
|
||||||
* by our dictinonary: we just check is it exist in list of
|
* by our dictionary: we just check is it exist in list of
|
||||||
* dictionaries ?
|
* dictionaries ?
|
||||||
*/
|
*/
|
||||||
for (i = 0; i < map->len && !dictExists; i++)
|
for (i = 0; i < map->len && !dictExists; i++)
|
||||||
@ -627,7 +627,7 @@ generateHeadline(HeadlineParsedText *prs)
|
|||||||
/* start of a new fragment */
|
/* start of a new fragment */
|
||||||
infrag = 1;
|
infrag = 1;
|
||||||
numfragments++;
|
numfragments++;
|
||||||
/* add a fragment delimitor if this is after the first one */
|
/* add a fragment delimiter if this is after the first one */
|
||||||
if (numfragments > 1)
|
if (numfragments > 1)
|
||||||
{
|
{
|
||||||
memcpy(ptr, prs->fragdelim, prs->fragdelimlen);
|
memcpy(ptr, prs->fragdelim, prs->fragdelimlen);
|
||||||
|
@ -2445,7 +2445,7 @@ mark_hl_words(HeadlineParsedText *prs, TSQuery query, int highlight,
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (curlen < min_words && i >= prs->curwords)
|
if (curlen < min_words && i >= prs->curwords)
|
||||||
{ /* got end of text and our cover is shoter
|
{ /* got end of text and our cover is shorter
|
||||||
* than min_words */
|
* than min_words */
|
||||||
for (i = p - 1; i >= 0; i--)
|
for (i = p - 1; i >= 0; i--)
|
||||||
{
|
{
|
||||||
|
@ -2278,7 +2278,7 @@ seq_search(char *name, const char *const * array, int type, int max, int *len)
|
|||||||
|
|
||||||
for (last = 0, a = array; *a != NULL; a++)
|
for (last = 0, a = array; *a != NULL; a++)
|
||||||
{
|
{
|
||||||
/* comperate first chars */
|
/* compare first chars */
|
||||||
if (*name != **a)
|
if (*name != **a)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
@ -533,7 +533,7 @@ calc_hist_selectivity(TypeCacheEntry *typcache, VariableStatData *vardata,
|
|||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* Lower bound no longer matters. Just estimate the fraction
|
* Lower bound no longer matters. Just estimate the fraction
|
||||||
* with an upper bound <= const uppert bound
|
* with an upper bound <= const upper bound
|
||||||
*/
|
*/
|
||||||
hist_selec =
|
hist_selec =
|
||||||
calc_hist_selectivity_scalar(typcache, &const_upper,
|
calc_hist_selectivity_scalar(typcache, &const_upper,
|
||||||
|
@ -2504,7 +2504,7 @@ is_input_argument(int nth, const char *argmodes)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Append used transformated types to specified buffer
|
* Append used transformed types to specified buffer
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
print_function_trftypes(StringInfo buf, HeapTuple proctup)
|
print_function_trftypes(StringInfo buf, HeapTuple proctup)
|
||||||
|
@ -898,7 +898,7 @@ calc_rank_cd(const float4 *arrdata, TSVector txt, TSQuery query, int method)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* if doc are big enough then ext.q may be equal to ext.p due to limit
|
* if doc are big enough then ext.q may be equal to ext.p due to limit
|
||||||
* of posional information. In this case we approximate number of
|
* of positional information. In this case we approximate number of
|
||||||
* noise word as half cover's length
|
* noise word as half cover's length
|
||||||
*/
|
*/
|
||||||
nNoise = (ext.q - ext.p) - (ext.end - ext.begin);
|
nNoise = (ext.q - ext.p) - (ext.end - ext.begin);
|
||||||
@ -907,7 +907,7 @@ calc_rank_cd(const float4 *arrdata, TSVector txt, TSQuery query, int method)
|
|||||||
Wdoc += Cpos / ((double) (1 + nNoise));
|
Wdoc += Cpos / ((double) (1 + nNoise));
|
||||||
|
|
||||||
CurExtPos = ((double) (ext.q + ext.p)) / 2.0;
|
CurExtPos = ((double) (ext.q + ext.p)) / 2.0;
|
||||||
if (NExtent > 0 && CurExtPos > PrevExtPos /* prevent devision by
|
if (NExtent > 0 && CurExtPos > PrevExtPos /* prevent division by
|
||||||
* zero in a case of
|
* zero in a case of
|
||||||
multiple lexize */ )
|
multiple lexize */ )
|
||||||
SumDist += 1.0 / (CurExtPos - PrevExtPos);
|
SumDist += 1.0 / (CurExtPos - PrevExtPos);
|
||||||
|
@ -342,7 +342,7 @@ window_lag(PG_FUNCTION_ARGS)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* lag_with_offset
|
* lag_with_offset
|
||||||
* returns the value of VE evelulated on a row that is OFFSET
|
* returns the value of VE evaluated on a row that is OFFSET
|
||||||
* rows before the current row within a partition,
|
* rows before the current row within a partition,
|
||||||
* per spec.
|
* per spec.
|
||||||
*/
|
*/
|
||||||
|
2
src/backend/utils/cache/relcache.c
vendored
2
src/backend/utils/cache/relcache.c
vendored
@ -1125,7 +1125,7 @@ RelationInitPhysicalAddr(Relation relation)
|
|||||||
* points to the current file since the older file will be gone (or
|
* points to the current file since the older file will be gone (or
|
||||||
* truncated). The new file will still contain older rows so lookups
|
* truncated). The new file will still contain older rows so lookups
|
||||||
* in them will work correctly. This wouldn't work correctly if
|
* in them will work correctly. This wouldn't work correctly if
|
||||||
* rewrites were allowed to change the schema in a noncompatible way,
|
* rewrites were allowed to change the schema in an incompatible way,
|
||||||
* but those are prevented both on catalog tables and on user tables
|
* but those are prevented both on catalog tables and on user tables
|
||||||
* declared as additional catalog tables.
|
* declared as additional catalog tables.
|
||||||
*/
|
*/
|
||||||
|
@ -878,7 +878,7 @@ get_func_arg_info(HeapTuple procTup,
|
|||||||
/*
|
/*
|
||||||
* get_func_trftypes
|
* get_func_trftypes
|
||||||
*
|
*
|
||||||
* Returns a number of transformated types used by function.
|
* Returns the number of transformed types used by function.
|
||||||
*/
|
*/
|
||||||
int
|
int
|
||||||
get_func_trftypes(HeapTuple procTup,
|
get_func_trftypes(HeapTuple procTup,
|
||||||
|
@ -1108,7 +1108,7 @@ process_settings(Oid databaseid, Oid roleid)
|
|||||||
|
|
||||||
relsetting = heap_open(DbRoleSettingRelationId, AccessShareLock);
|
relsetting = heap_open(DbRoleSettingRelationId, AccessShareLock);
|
||||||
|
|
||||||
/* read all the settings under the same snapsot for efficiency */
|
/* read all the settings under the same snapshot for efficiency */
|
||||||
snapshot = RegisterSnapshot(GetCatalogSnapshot(DbRoleSettingRelationId));
|
snapshot = RegisterSnapshot(GetCatalogSnapshot(DbRoleSettingRelationId));
|
||||||
|
|
||||||
/* Later settings are ignored if set earlier. */
|
/* Later settings are ignored if set earlier. */
|
||||||
|
@ -18,7 +18,7 @@ OBJS = guc.o help_config.o pg_config.o pg_controldata.o pg_rusage.o \
|
|||||||
ps_status.o rls.o sampling.o superuser.o timeout.o tzparser.o
|
ps_status.o rls.o sampling.o superuser.o timeout.o tzparser.o
|
||||||
|
|
||||||
# This location might depend on the installation directories. Therefore
|
# This location might depend on the installation directories. Therefore
|
||||||
# we can't subsitute it into pg_config.h.
|
# we can't substitute it into pg_config.h.
|
||||||
ifdef krb_srvtab
|
ifdef krb_srvtab
|
||||||
override CPPFLAGS += -DPG_KRB_SRVTAB='"$(krb_srvtab)"'
|
override CPPFLAGS += -DPG_KRB_SRVTAB='"$(krb_srvtab)"'
|
||||||
endif
|
endif
|
||||||
|
@ -1625,7 +1625,7 @@ HeapTupleHeaderIsOnlyLocked(HeapTupleHeader tuple)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* check whether the transaciont id 'xid' is in the pre-sorted array 'xip'.
|
* check whether the transaction id 'xid' is in the pre-sorted array 'xip'.
|
||||||
*/
|
*/
|
||||||
static bool
|
static bool
|
||||||
TransactionIdInArray(TransactionId xid, TransactionId *xip, Size num)
|
TransactionIdInArray(TransactionId xid, TransactionId *xip, Size num)
|
||||||
|
@ -203,7 +203,7 @@ InitArchiveFmt_Custom(ArchiveHandle *AH)
|
|||||||
*
|
*
|
||||||
* Optional.
|
* Optional.
|
||||||
*
|
*
|
||||||
* Set up extrac format-related TOC data.
|
* Set up extract format-related TOC data.
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
_ArchiveEntry(ArchiveHandle *AH, TocEntry *te)
|
_ArchiveEntry(ArchiveHandle *AH, TocEntry *te)
|
||||||
|
@ -776,7 +776,7 @@ StoreQueryTuple(const PGresult *result)
|
|||||||
char *varname;
|
char *varname;
|
||||||
char *value;
|
char *value;
|
||||||
|
|
||||||
/* concate prefix and column name */
|
/* concatenate prefix and column name */
|
||||||
varname = psprintf("%s%s", pset.gset_prefix, colname);
|
varname = psprintf("%s%s", pset.gset_prefix, colname);
|
||||||
|
|
||||||
if (!PQgetisnull(result, 0, i))
|
if (!PQgetisnull(result, 0, i))
|
||||||
|
@ -2060,7 +2060,7 @@ describeOneTableDetails(const char *schemaname,
|
|||||||
printTableAddFooter(&cont, _("Check constraints:"));
|
printTableAddFooter(&cont, _("Check constraints:"));
|
||||||
for (i = 0; i < tuples; i++)
|
for (i = 0; i < tuples; i++)
|
||||||
{
|
{
|
||||||
/* untranslated contraint name and def */
|
/* untranslated constraint name and def */
|
||||||
printfPQExpBuffer(&buf, " \"%s\" %s",
|
printfPQExpBuffer(&buf, " \"%s\" %s",
|
||||||
PQgetvalue(result, i, 0),
|
PQgetvalue(result, i, 0),
|
||||||
PQgetvalue(result, i, 1));
|
PQgetvalue(result, i, 1));
|
||||||
@ -3052,7 +3052,7 @@ listTables(const char *tabtypes, const char *pattern, bool verbose, bool showSys
|
|||||||
if (verbose)
|
if (verbose)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* As of PostgreSQL 9.0, use pg_table_size() to show a more acurate
|
* As of PostgreSQL 9.0, use pg_table_size() to show a more accurate
|
||||||
* size of a table, including FSM, VM and TOAST tables.
|
* size of a table, including FSM, VM and TOAST tables.
|
||||||
*/
|
*/
|
||||||
if (pset.sversion >= 90000)
|
if (pset.sversion >= 90000)
|
||||||
|
@ -26,7 +26,7 @@
|
|||||||
#define VISIBILITYMAP_ALL_VISIBLE 0x01
|
#define VISIBILITYMAP_ALL_VISIBLE 0x01
|
||||||
#define VISIBILITYMAP_ALL_FROZEN 0x02
|
#define VISIBILITYMAP_ALL_FROZEN 0x02
|
||||||
#define VISIBILITYMAP_VALID_BITS 0x03 /* OR of all valid
|
#define VISIBILITYMAP_VALID_BITS 0x03 /* OR of all valid
|
||||||
* visiblitymap flags bits */
|
* visibilitymap flags bits */
|
||||||
|
|
||||||
/* Macros for visibilitymap test */
|
/* Macros for visibilitymap test */
|
||||||
#define VM_ALL_VISIBLE(r, b, v) \
|
#define VM_ALL_VISIBLE(r, b, v) \
|
||||||
|
@ -65,7 +65,7 @@ typedef enum
|
|||||||
* apply */
|
* apply */
|
||||||
} SyncCommitLevel;
|
} SyncCommitLevel;
|
||||||
|
|
||||||
/* Define the default setting for synchonous_commit */
|
/* Define the default setting for synchronous_commit */
|
||||||
#define SYNCHRONOUS_COMMIT_ON SYNCHRONOUS_COMMIT_REMOTE_FLUSH
|
#define SYNCHRONOUS_COMMIT_ON SYNCHRONOUS_COMMIT_REMOTE_FLUSH
|
||||||
|
|
||||||
/* Synchronous commit level */
|
/* Synchronous commit level */
|
||||||
|
@ -970,7 +970,7 @@ typedef NameData *Name;
|
|||||||
/* gettext domain name mangling */
|
/* gettext domain name mangling */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* To better support parallel installations of major PostgeSQL
|
* To better support parallel installations of major PostgreSQL
|
||||||
* versions as well as parallel installations of major library soname
|
* versions as well as parallel installations of major library soname
|
||||||
* versions, we mangle the gettext domain name by appending those
|
* versions, we mangle the gettext domain name by appending those
|
||||||
* version numbers. The coding rule ought to be that wherever the
|
* version numbers. The coding rule ought to be that wherever the
|
||||||
|
@ -865,7 +865,7 @@ typedef LONG slock_t;
|
|||||||
#define SPIN_DELAY() spin_delay()
|
#define SPIN_DELAY() spin_delay()
|
||||||
|
|
||||||
/* If using Visual C++ on Win64, inline assembly is unavailable.
|
/* If using Visual C++ on Win64, inline assembly is unavailable.
|
||||||
* Use a _mm_pause instrinsic instead of rep nop.
|
* Use a _mm_pause intrinsic instead of rep nop.
|
||||||
*/
|
*/
|
||||||
#if defined(_WIN64)
|
#if defined(_WIN64)
|
||||||
static __forceinline void
|
static __forceinline void
|
||||||
|
@ -147,7 +147,7 @@ typedef struct
|
|||||||
} CMPDAffix;
|
} CMPDAffix;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Type of encoding affix flags in Hunspel dictionaries
|
* Type of encoding affix flags in Hunspell dictionaries
|
||||||
*/
|
*/
|
||||||
typedef enum
|
typedef enum
|
||||||
{
|
{
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* The aim is to get a simpler interface to the database routines.
|
* The aim is to get a simpler interface to the database routines.
|
||||||
* All the tidieous messing around with tuples is supposed to be hidden
|
* All the tedious messing around with tuples is supposed to be hidden
|
||||||
* by this function.
|
* by this function.
|
||||||
*/
|
*/
|
||||||
/* Author: Linus Tolke
|
/* Author: Linus Tolke
|
||||||
|
@ -324,7 +324,7 @@ PGTYPESdate_fmt_asc(date dDate, const char *fmtstring, char *outbuf)
|
|||||||
*
|
*
|
||||||
* function works as follows:
|
* function works as follows:
|
||||||
* - first we analyze the parameters
|
* - first we analyze the parameters
|
||||||
* - if this is a special case with no delimiters, add delimters
|
* - if this is a special case with no delimiters, add delimiters
|
||||||
* - find the tokens. First we look for numerical values. If we have found
|
* - find the tokens. First we look for numerical values. If we have found
|
||||||
* less than 3 tokens, we check for the months' names and thereafter for
|
* less than 3 tokens, we check for the months' names and thereafter for
|
||||||
* the abbreviations of the months' names.
|
* the abbreviations of the months' names.
|
||||||
|
@ -1368,11 +1368,11 @@ PGTYPESnumeric_cmp(numeric *var1, numeric *var2)
|
|||||||
{
|
{
|
||||||
/* use cmp_abs function to calculate the result */
|
/* use cmp_abs function to calculate the result */
|
||||||
|
|
||||||
/* both are positive: normal comparation with cmp_abs */
|
/* both are positive: normal comparison with cmp_abs */
|
||||||
if (var1->sign == NUMERIC_POS && var2->sign == NUMERIC_POS)
|
if (var1->sign == NUMERIC_POS && var2->sign == NUMERIC_POS)
|
||||||
return cmp_abs(var1, var2);
|
return cmp_abs(var1, var2);
|
||||||
|
|
||||||
/* both are negative: return the inverse of the normal comparation */
|
/* both are negative: return the inverse of the normal comparison */
|
||||||
if (var1->sign == NUMERIC_NEG && var2->sign == NUMERIC_NEG)
|
if (var1->sign == NUMERIC_NEG && var2->sign == NUMERIC_NEG)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
|
@ -207,7 +207,7 @@ create_questionmarks(char *name, bool array)
|
|||||||
|
|
||||||
/* In case we have a struct, we have to print as many "?" as there are attributes in the struct
|
/* In case we have a struct, we have to print as many "?" as there are attributes in the struct
|
||||||
* An array is only allowed together with an element argument
|
* An array is only allowed together with an element argument
|
||||||
* This is essantially only used for inserts, but using a struct as input parameter is an error anywhere else
|
* This is essentially only used for inserts, but using a struct as input parameter is an error anywhere else
|
||||||
* so we don't have to worry here. */
|
* so we don't have to worry here. */
|
||||||
|
|
||||||
if (p->type->type == ECPGt_struct || (array && p->type->type == ECPGt_array && p->type->u.element->type == ECPGt_struct))
|
if (p->type->type == ECPGt_struct || (array && p->type->type == ECPGt_array && p->type->u.element->type == ECPGt_struct))
|
||||||
|
@ -355,7 +355,7 @@ ECPGExecuteImmediateStmt: EXECUTE IMMEDIATE execstring
|
|||||||
$$ = $3;
|
$$ = $3;
|
||||||
};
|
};
|
||||||
/*
|
/*
|
||||||
* variable decalartion outside exec sql declare block
|
* variable declaration outside exec sql declare block
|
||||||
*/
|
*/
|
||||||
ECPGVarDeclaration: single_vt_declaration;
|
ECPGVarDeclaration: single_vt_declaration;
|
||||||
|
|
||||||
@ -707,7 +707,7 @@ struct_union_type_with_symbol: s_struct_union_symbol
|
|||||||
free(forward_name);
|
free(forward_name);
|
||||||
forward_name = NULL;
|
forward_name = NULL;
|
||||||
|
|
||||||
/* This is essantially a typedef but needs the keyword struct/union as well.
|
/* This is essentially a typedef but needs the keyword struct/union as well.
|
||||||
* So we create the typedef for each struct definition with symbol */
|
* So we create the typedef for each struct definition with symbol */
|
||||||
for (ptr = types; ptr != NULL; ptr = ptr->next)
|
for (ptr = types; ptr != NULL; ptr = ptr->next)
|
||||||
{
|
{
|
||||||
@ -1275,7 +1275,7 @@ descriptor_item: SQL_CARDINALITY { $$ = ECPGd_cardinality; }
|
|||||||
;
|
;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* set/reset the automatic transaction mode, this needs a differnet handling
|
* set/reset the automatic transaction mode, this needs a different handling
|
||||||
* as the other set commands
|
* as the other set commands
|
||||||
*/
|
*/
|
||||||
ECPGSetAutocommit: SET SQL_AUTOCOMMIT '=' on_off { $$ = $4; }
|
ECPGSetAutocommit: SET SQL_AUTOCOMMIT '=' on_off { $$ = $4; }
|
||||||
@ -1287,7 +1287,7 @@ on_off: ON { $$ = mm_strdup("on"); }
|
|||||||
;
|
;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* set the actual connection, this needs a differnet handling as the other
|
* set the actual connection, this needs a different handling as the other
|
||||||
* set commands
|
* set commands
|
||||||
*/
|
*/
|
||||||
ECPGSetConnection: SET CONNECTION TO connection_object { $$ = $4; }
|
ECPGSetConnection: SET CONNECTION TO connection_object { $$ = $4; }
|
||||||
|
@ -550,7 +550,7 @@ sub dump_fields
|
|||||||
if ($len == 1)
|
if ($len == 1)
|
||||||
{
|
{
|
||||||
|
|
||||||
# Straight assignement
|
# Straight assignment
|
||||||
$str = ' $$ = ' . $flds_new[0] . ';';
|
$str = ' $$ = ' . $flds_new[0] . ';';
|
||||||
add_to_buffer('rules', $str);
|
add_to_buffer('rules', $str);
|
||||||
}
|
}
|
||||||
|
@ -794,7 +794,7 @@ pg_fe_getauthname(PQExpBuffer errorMessage)
|
|||||||
* be sent in cleartext if it is encrypted on the client side. This is
|
* be sent in cleartext if it is encrypted on the client side. This is
|
||||||
* good because it ensures the cleartext password won't end up in logs,
|
* good because it ensures the cleartext password won't end up in logs,
|
||||||
* pg_stat displays, etc. We export the function so that clients won't
|
* pg_stat displays, etc. We export the function so that clients won't
|
||||||
* be dependent on low-level details like whether the enceyption is MD5
|
* be dependent on low-level details like whether the encryption is MD5
|
||||||
* or something else.
|
* or something else.
|
||||||
*
|
*
|
||||||
* Arguments are the cleartext password, and the SQL name of the user it
|
* Arguments are the cleartext password, and the SQL name of the user it
|
||||||
|
@ -634,7 +634,7 @@ extern void pq_reset_sigpipe(sigset_t *osigset, bool sigpipe_pending,
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The SSL implementatation provides these functions (fe-secure-openssl.c)
|
* The SSL implementation provides these functions (fe-secure-openssl.c)
|
||||||
*/
|
*/
|
||||||
extern void pgtls_init_library(bool do_ssl, int do_crypto);
|
extern void pgtls_init_library(bool do_ssl, int do_crypto);
|
||||||
extern int pgtls_init(PGconn *conn);
|
extern int pgtls_init(PGconn *conn);
|
||||||
|
@ -32,7 +32,7 @@
|
|||||||
|
|
||||||
#include "win32.h"
|
#include "win32.h"
|
||||||
|
|
||||||
/* Declared here to avoid pulling in all includes, which causes name collissions */
|
/* Declared here to avoid pulling in all includes, which causes name collisions */
|
||||||
#ifdef ENABLE_NLS
|
#ifdef ENABLE_NLS
|
||||||
extern char *libpq_gettext(const char *msgid) pg_attribute_format_arg(1);
|
extern char *libpq_gettext(const char *msgid) pg_attribute_format_arg(1);
|
||||||
#else
|
#else
|
||||||
|
@ -79,7 +79,7 @@ to be installed on your system.
|
|||||||
If this option is given, a copy of each file will be saved with
|
If this option is given, a copy of each file will be saved with
|
||||||
the given suffix that contains the suggested changes. This does
|
the given suffix that contains the suggested changes. This does
|
||||||
not require any external programs. Note that this does not
|
not require any external programs. Note that this does not
|
||||||
automagially add a dot between the original filename and the
|
automagically add a dot between the original filename and the
|
||||||
suffix. If you want the dot, you have to include it in the option
|
suffix. If you want the dot, you have to include it in the option
|
||||||
argument.
|
argument.
|
||||||
|
|
||||||
@ -4364,9 +4364,9 @@ DPPP_(my_vload_module)(U32 flags, SV *name, SV *ver, va_list *args)
|
|||||||
|
|
||||||
OP * const modname = newSVOP(OP_CONST, 0, name);
|
OP * const modname = newSVOP(OP_CONST, 0, name);
|
||||||
/* 5.005 has a somewhat hacky force_normal that doesn't croak on
|
/* 5.005 has a somewhat hacky force_normal that doesn't croak on
|
||||||
SvREADONLY() if PL_compling is true. Current perls take care in
|
SvREADONLY() if PL_compiling is true. Current perls take care in
|
||||||
ck_require() to correctly turn off SvREADONLY before calling
|
ck_require() to correctly turn off SvREADONLY before calling
|
||||||
force_normal_flags(). This seems a better fix than fudging PL_compling
|
force_normal_flags(). This seems a better fix than fudging PL_compiling
|
||||||
*/
|
*/
|
||||||
SvREADONLY_off(((SVOP*)modname)->op_sv);
|
SvREADONLY_off(((SVOP*)modname)->op_sv);
|
||||||
modname->op_private |= OPpCONST_BARE;
|
modname->op_private |= OPpCONST_BARE;
|
||||||
|
@ -303,7 +303,7 @@ PLy_traceback(PyObject *e, PyObject *v, PyObject *tb,
|
|||||||
long plain_lineno;
|
long plain_lineno;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The second frame points at the internal function, but to mimick
|
* The second frame points at the internal function, but to mimic
|
||||||
* Python error reporting we want to say <module>.
|
* Python error reporting we want to say <module>.
|
||||||
*/
|
*/
|
||||||
if (*tb_depth == 1)
|
if (*tb_depth == 1)
|
||||||
|
@ -463,7 +463,7 @@ PLy_output(volatile int level, PyObject *self, PyObject *args, PyObject *kw)
|
|||||||
|
|
||||||
if (strcmp(keyword, "message") == 0)
|
if (strcmp(keyword, "message") == 0)
|
||||||
{
|
{
|
||||||
/* the message should not be overwriten */
|
/* the message should not be overwritten */
|
||||||
if (PyTuple_Size(args) != 0)
|
if (PyTuple_Size(args) != 0)
|
||||||
{
|
{
|
||||||
PLy_exception_set(PyExc_TypeError, "Argument 'message' given by name and position");
|
PLy_exception_set(PyExc_TypeError, "Argument 'message' given by name and position");
|
||||||
|
@ -71,7 +71,7 @@ typedef union PLyTypeOutput
|
|||||||
PLyObToTuple r;
|
PLyObToTuple r;
|
||||||
} PLyTypeOutput;
|
} PLyTypeOutput;
|
||||||
|
|
||||||
/* all we need to move Postgresql data to Python objects,
|
/* all we need to move PostgreSQL data to Python objects,
|
||||||
* and vice versa
|
* and vice versa
|
||||||
*/
|
*/
|
||||||
typedef struct PLyTypeInfo
|
typedef struct PLyTypeInfo
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
# be changed and a report of the closed day's receipts subsequently
|
# be changed and a report of the closed day's receipts subsequently
|
||||||
# run which will miss a receipt from the date which has been closed.
|
# run which will miss a receipt from the date which has been closed.
|
||||||
#
|
#
|
||||||
# There are only six permuations which must cause a serialization failure.
|
# There are only six permutations which must cause a serialization failure.
|
||||||
# Failure cases are where s1 overlaps both s2 and s3, but s2 commits before
|
# Failure cases are where s1 overlaps both s2 and s3, but s2 commits before
|
||||||
# s3 executes its first SELECT.
|
# s3 executes its first SELECT.
|
||||||
#
|
#
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
#
|
#
|
||||||
# Small, simple test showing read-only anomalies.
|
# Small, simple test showing read-only anomalies.
|
||||||
#
|
#
|
||||||
# There are only four permuations which must cause a serialization failure.
|
# There are only four permutations which must cause a serialization failure.
|
||||||
# Required failure cases are where s2 overlaps both s1 and s3, but s1
|
# Required failure cases are where s2 overlaps both s1 and s3, but s1
|
||||||
# commits before s3 executes its first SELECT.
|
# commits before s3 executes its first SELECT.
|
||||||
#
|
#
|
||||||
|
@ -310,7 +310,7 @@ INSERT INTO tmp3 values (5,50);
|
|||||||
-- Try (and fail) to add constraint due to invalid source columns
|
-- Try (and fail) to add constraint due to invalid source columns
|
||||||
ALTER TABLE tmp3 add constraint tmpconstr foreign key(c) references tmp2 match full;
|
ALTER TABLE tmp3 add constraint tmpconstr foreign key(c) references tmp2 match full;
|
||||||
ERROR: column "c" referenced in foreign key constraint does not exist
|
ERROR: column "c" referenced in foreign key constraint does not exist
|
||||||
-- Try (and fail) to add constraint due to invalide destination columns explicitly given
|
-- Try (and fail) to add constraint due to invalid destination columns explicitly given
|
||||||
ALTER TABLE tmp3 add constraint tmpconstr foreign key(a) references tmp2(b) match full;
|
ALTER TABLE tmp3 add constraint tmpconstr foreign key(a) references tmp2(b) match full;
|
||||||
ERROR: column "b" referenced in foreign key constraint does not exist
|
ERROR: column "b" referenced in foreign key constraint does not exist
|
||||||
-- Try (and fail) to add constraint due to invalid data
|
-- Try (and fail) to add constraint due to invalid data
|
||||||
@ -2842,7 +2842,7 @@ ALTER TABLE unlogged3 SET LOGGED; -- skip self-referencing foreign key
|
|||||||
ALTER TABLE unlogged2 SET LOGGED; -- fails because a foreign key to an unlogged table exists
|
ALTER TABLE unlogged2 SET LOGGED; -- fails because a foreign key to an unlogged table exists
|
||||||
ERROR: could not change table "unlogged2" to logged because it references unlogged table "unlogged1"
|
ERROR: could not change table "unlogged2" to logged because it references unlogged table "unlogged1"
|
||||||
ALTER TABLE unlogged1 SET LOGGED;
|
ALTER TABLE unlogged1 SET LOGGED;
|
||||||
-- check relpersistence of an unlogged table after changing to permament
|
-- check relpersistence of an unlogged table after changing to permanent
|
||||||
SELECT relname, relkind, relpersistence FROM pg_class WHERE relname ~ '^unlogged1'
|
SELECT relname, relkind, relpersistence FROM pg_class WHERE relname ~ '^unlogged1'
|
||||||
UNION ALL
|
UNION ALL
|
||||||
SELECT 'toast table', t.relkind, t.relpersistence FROM pg_class r JOIN pg_class t ON t.oid = r.reltoastrelid WHERE r.relname ~ '^unlogged1'
|
SELECT 'toast table', t.relkind, t.relpersistence FROM pg_class r JOIN pg_class t ON t.oid = r.reltoastrelid WHERE r.relname ~ '^unlogged1'
|
||||||
|
@ -23,7 +23,7 @@ UPDATE toasttest SET cnt = cnt +1 RETURNING substring(toasttest::text, 1, 200);
|
|||||||
("one-toasted,one-null",1,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123
|
("one-toasted,one-null",1,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123
|
||||||
(4 rows)
|
(4 rows)
|
||||||
|
|
||||||
-- modification without modifying asigned value
|
-- modification without modifying assigned value
|
||||||
UPDATE toasttest SET cnt = cnt +1, f1 = f1 RETURNING substring(toasttest::text, 1, 200);
|
UPDATE toasttest SET cnt = cnt +1, f1 = f1 RETURNING substring(toasttest::text, 1, 200);
|
||||||
substring
|
substring
|
||||||
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||||
@ -61,7 +61,7 @@ SELECT substring(toasttest::text, 1, 200) FROM toasttest;
|
|||||||
("one-toasted,one-null",4,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123
|
("one-toasted,one-null",4,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123
|
||||||
(4 rows)
|
(4 rows)
|
||||||
|
|
||||||
-- check we didn't screw with main/toast tuple visiblity
|
-- check we didn't screw with main/toast tuple visibility
|
||||||
VACUUM FREEZE toasttest;
|
VACUUM FREEZE toasttest;
|
||||||
SELECT substring(toasttest::text, 1, 200) FROM toasttest;
|
SELECT substring(toasttest::text, 1, 200) FROM toasttest;
|
||||||
substring
|
substring
|
||||||
@ -95,7 +95,7 @@ UPDATE toasttest SET cnt = cnt +1 RETURNING substring(toasttest::text, 1, 200);
|
|||||||
("one-toasted,one-null",5,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123
|
("one-toasted,one-null",5,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123
|
||||||
(4 rows)
|
(4 rows)
|
||||||
|
|
||||||
-- modification without modifying asigned value
|
-- modification without modifying assigned value
|
||||||
UPDATE toasttest SET cnt = cnt +1, f1 = f1 RETURNING substring(toasttest::text, 1, 200);
|
UPDATE toasttest SET cnt = cnt +1, f1 = f1 RETURNING substring(toasttest::text, 1, 200);
|
||||||
substring
|
substring
|
||||||
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||||
@ -135,7 +135,7 @@ SELECT substring(toasttest::text, 1, 200) FROM toasttest;
|
|||||||
("one-toasted,one-null, via indirect",0,1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890
|
("one-toasted,one-null, via indirect",0,1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890
|
||||||
(5 rows)
|
(5 rows)
|
||||||
|
|
||||||
-- check we didn't screw with main/toast tuple visiblity
|
-- check we didn't screw with main/toast tuple visibility
|
||||||
VACUUM FREEZE toasttest;
|
VACUUM FREEZE toasttest;
|
||||||
SELECT substring(toasttest::text, 1, 200) FROM toasttest;
|
SELECT substring(toasttest::text, 1, 200) FROM toasttest;
|
||||||
substring
|
substring
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
-- Test iniital privileges
|
-- Test initial privileges
|
||||||
-- There should always be some initial privileges, set up by initdb
|
-- There should always be some initial privileges, set up by initdb
|
||||||
SELECT count(*) > 0 FROM pg_init_privs;
|
SELECT count(*) > 0 FROM pg_init_privs;
|
||||||
?column?
|
?column?
|
||||||
|
@ -291,7 +291,7 @@ insert into insertconflicttest values (12, 'Date') on conflict (lower(fruit), ke
|
|||||||
ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification
|
ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification
|
||||||
drop index comp_key_index;
|
drop index comp_key_index;
|
||||||
--
|
--
|
||||||
-- Partial index tests, no inference predicate specificied
|
-- Partial index tests, no inference predicate specified
|
||||||
--
|
--
|
||||||
create unique index part_comp_key_index on insertconflicttest(key, fruit) where key < 5;
|
create unique index part_comp_key_index on insertconflicttest(key, fruit) where key < 5;
|
||||||
create unique index expr_part_comp_key_index on insertconflicttest(key, lower(fruit)) where key < 5;
|
create unique index expr_part_comp_key_index on insertconflicttest(key, lower(fruit)) where key < 5;
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user