postgres/src/backend/tcop/postgres.c

4513 lines
118 KiB
C
Raw Normal View History

/*-------------------------------------------------------------------------
*
* postgres.c
* POSTGRES C Backend Interface
*
* Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
2010-09-20 22:08:53 +02:00
* src/backend/tcop/postgres.c
*
* NOTES
* this is the "main" module of the postgres backend and
* hence the main module of the "traffic cop".
*
*-------------------------------------------------------------------------
*/
1996-11-08 06:02:30 +00:00
#include "postgres.h"
#include <fcntl.h>
#include <limits.h>
#include <signal.h>
1996-11-08 06:02:30 +00:00
#include <unistd.h>
#include <sys/socket.h>
#ifdef HAVE_SYS_SELECT_H
#include <sys/select.h>
#endif
#ifdef HAVE_SYS_RESOURCE_H
#include <sys/time.h>
#include <sys/resource.h>
#endif
#ifndef HAVE_GETRUSAGE
#include "rusagestub.h"
#endif
#include "access/parallel.h"
#include "access/printtup.h"
#include "access/xact.h"
#include "catalog/pg_type.h"
#include "commands/async.h"
#include "commands/prepare.h"
#include "libpq/libpq.h"
#include "libpq/pqformat.h"
#include "libpq/pqsignal.h"
1999-07-16 05:00:38 +00:00
#include "miscadmin.h"
#include "nodes/print.h"
#include "optimizer/planner.h"
#include "pgstat.h"
#include "pg_trace.h"
#include "parser/analyze.h"
#include "parser/parser.h"
#include "pg_getopt.h"
#include "postmaster/autovacuum.h"
#include "postmaster/postmaster.h"
#include "replication/slot.h"
#include "replication/walsender.h"
1999-07-16 03:14:30 +00:00
#include "rewrite/rewriteHandler.h"
#include "storage/bufmgr.h"
#include "storage/ipc.h"
#include "storage/proc.h"
#include "storage/procsignal.h"
#include "storage/sinval.h"
#include "tcop/fastpath.h"
#include "tcop/pquery.h"
1999-07-16 03:14:30 +00:00
#include "tcop/tcopprot.h"
#include "tcop/utility.h"
#include "utils/lsyscache.h"
#include "utils/memutils.h"
#include "utils/ps_status.h"
#include "utils/snapmgr.h"
#include "utils/timeout.h"
#include "utils/timestamp.h"
1999-07-16 05:00:38 +00:00
#include "mb/pg_wchar.h"
/* ----------------
* global variables
* ----------------
*/
const char *debug_query_string; /* client-supplied query string */
/* Note: whereToSendOutput is initialized for the bootstrap/standalone case */
CommandDest whereToSendOutput = DestDebug;
/* flag for logging end of session */
2004-08-29 05:07:03 +00:00
bool Log_disconnections = false;
int log_statement = LOGSTMT_NONE;
> >>1. change the type of "log_statement" option from boolean to string, > >>with allowed values of "all, mod, ddl, none" with default "none". OK, here is a patch that implements #1. Here is sample output: test=> set client_min_messages = 'log'; SET test=> set log_statement = 'mod'; SET test=> select 1; ?column? ---------- 1 (1 row) test=> update test set x=1; LOG: statement: update test set x=1; ERROR: relation "test" does not exist test=> update test set x=1; LOG: statement: update test set x=1; ERROR: relation "test" does not exist test=> copy test from '/tmp/x'; LOG: statement: copy test from '/tmp/x'; ERROR: relation "test" does not exist test=> copy test to '/tmp/x'; ERROR: relation "test" does not exist test=> prepare xx as select 1; PREPARE test=> prepare xx as update x set y=1; LOG: statement: prepare xx as update x set y=1; ERROR: relation "x" does not exist test=> explain analyze select 1;; QUERY PLAN ------------------------------------------------------------------------------------ Result (cost=0.00..0.01 rows=1 width=0) (actual time=0.006..0.007 rows=1 loops=1) Total runtime: 0.046 ms (2 rows) test=> explain analyze update test set x=1; LOG: statement: explain analyze update test set x=1; ERROR: relation "test" does not exist test=> explain update test set x=1; ERROR: relation "test" does not exist It checks PREPARE and EXECUTE ANALYZE too. The log_statement values are 'none', 'mod', 'ddl', and 'all'. For 'all', it prints before the query is parsed, and for ddl/mod, it does it right after parsing using the node tag (or command tag for CREATE/ALTER/DROP), so any non-parse errors will print after the log line.
2004-04-07 05:05:50 +00:00
/* GUC variable for maximum stack depth (measured in kilobytes) */
int max_stack_depth = 100;
/* wait N seconds to allow attach from a debugger */
int PostAuthDelay = 0;
/* ----------------
* private variables
* ----------------
*/
/* max_stack_depth converted to bytes for speed of checking */
static long max_stack_depth_bytes = 100 * 1024L;
2006-02-17 03:29:02 +00:00
/*
* Stack base pointer -- initialized by PostmasterMain and inherited by
* subprocesses. This is not static because old versions of PL/Java modify
* it directly. Newer versions use set_stack_base(), but we want to stay
* binary-compatible for the time being.
2006-02-17 03:29:02 +00:00
*/
2005-10-15 02:49:52 +00:00
char *stack_base_ptr = NULL;
/*
* On IA64 we also have to remember the register stack base.
*/
#if defined(__ia64__) || defined(__ia64)
char *register_stack_base_ptr = NULL;
#endif
/*
* Flag to mark SIGHUP. Whenever the main loop comes around it
* will reread the configuration file. (Better than doing the
* reading in the signal handler, ey?)
*/
static volatile sig_atomic_t got_SIGHUP = false;
/*
* Flag to keep track of whether we have started a transaction.
* For extended query protocol this has to be remembered across messages.
*/
static bool xact_started = false;
/*
* Flag to indicate that we are doing the outer loop's read-from-client,
* as opposed to any random read from client that might happen within
* commands like COPY FROM STDIN.
*/
static bool DoingCommandRead = false;
/*
* Flags to implement skip-till-Sync-after-error behavior for messages of
* the extended query protocol.
*/
static bool doing_extended_query_message = false;
static bool ignore_till_sync = false;
/*
* If an unnamed prepared statement exists, it's stored here.
* We keep it separate from the hashtable kept by commands/prepare.c
* in order to reduce overhead for short-lived queries.
*/
static CachedPlanSource *unnamed_stmt_psrc = NULL;
2007-11-15 21:14:46 +00:00
/* assorted command-line switches */
2010-02-26 02:01:40 +00:00
static const char *userDoption = NULL; /* -D switch */
static bool EchoQuery = false; /* -E switch */
Adjust behavior of single-user -j mode for better initdb error reporting. Previously, -j caused the entire input file to be read in and executed as a single command string. That's undesirable, not least because any error causes the entire file to be regurgitated as the "failing query". Some experimentation suggests a better rule: end the command string when we see a semicolon immediately followed by two newlines, ie, an empty line after a query. This serves nicely to break up the existing examples such as information_schema.sql and system_views.sql. A limitation is that it's no longer possible to write such a sequence within a string literal or multiline comment in a file meant to be read with -j; but there are no instances of such a problem within the data currently used by initdb. (If someone does make such a mistake in future, it'll be obvious because they'll get an unterminated-literal or unterminated-comment syntax error.) Other than that, there shouldn't be any negative consequences; you're not forced to end statements that way, it's just a better idea in most cases. In passing, remove src/include/tcop/tcopdebug.h, which is dead code because it's not included anywhere, and hasn't been for more than ten years. One of the debug-support symbols it purported to describe has been unreferenced for at least the same amount of time, and the other is removed by this commit on the grounds that it was useless: forcing -j mode all the time would have broken initdb. The lack of complaints about that, or about the missing inclusion, shows that no one has tried to use TCOP_DONTUSENEWLINE in many years.
2015-12-17 19:34:15 -05:00
static bool UseSemiNewlineNewline = false; /* -j switch */
/* whether or not, and why, we were canceled by conflict with recovery */
static bool RecoveryConflictPending = false;
static bool RecoveryConflictRetryable = true;
2010-02-26 02:01:40 +00:00
static ProcSignalReason RecoveryConflictReason;
/* ----------------------------------------------------------------
* decls for routines only used in this file
* ----------------------------------------------------------------
*/
static int InteractiveBackend(StringInfo inBuf);
static int interactive_getc(void);
static int SocketBackend(StringInfo inBuf);
static int ReadCommand(StringInfo inBuf);
static void forbidden_in_wal_sender(char firstchar);
static List *pg_rewrite_query(Query *query);
static bool check_log_statement(List *stmt_list);
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
static int errdetail_execute(List *raw_parsetree_list);
static int errdetail_params(ParamListInfo params);
2010-02-26 02:01:40 +00:00
static int errdetail_abort(void);
static int errdetail_recovery_conflict(void);
static void start_xact_command(void);
static void finish_xact_command(void);
static bool IsTransactionExitStmt(Node *parsetree);
static bool IsTransactionExitStmtList(List *parseTrees);
static bool IsTransactionStmtList(List *parseTrees);
static void drop_unnamed_stmt(void);
static void SigHupHandler(SIGNAL_ARGS);
static void log_disconnections(int code, Datum arg);
/* ----------------------------------------------------------------
* routines to obtain user input
* ----------------------------------------------------------------
*/
/* ----------------
* InteractiveBackend() is called for user interactive connections
*
* the string entered by the user is placed in its parameter inBuf,
* and we act like a Q message was received.
*
* EOF is returned if end-of-file input is seen; time to shut down.
* ----------------
*/
static int
InteractiveBackend(StringInfo inBuf)
{
int c; /* character read from getc() */
/*
* display a prompt and obtain input from the user
*/
printf("backend> ");
fflush(stdout);
resetStringInfo(inBuf);
Adjust behavior of single-user -j mode for better initdb error reporting. Previously, -j caused the entire input file to be read in and executed as a single command string. That's undesirable, not least because any error causes the entire file to be regurgitated as the "failing query". Some experimentation suggests a better rule: end the command string when we see a semicolon immediately followed by two newlines, ie, an empty line after a query. This serves nicely to break up the existing examples such as information_schema.sql and system_views.sql. A limitation is that it's no longer possible to write such a sequence within a string literal or multiline comment in a file meant to be read with -j; but there are no instances of such a problem within the data currently used by initdb. (If someone does make such a mistake in future, it'll be obvious because they'll get an unterminated-literal or unterminated-comment syntax error.) Other than that, there shouldn't be any negative consequences; you're not forced to end statements that way, it's just a better idea in most cases. In passing, remove src/include/tcop/tcopdebug.h, which is dead code because it's not included anywhere, and hasn't been for more than ten years. One of the debug-support symbols it purported to describe has been unreferenced for at least the same amount of time, and the other is removed by this commit on the grounds that it was useless: forcing -j mode all the time would have broken initdb. The lack of complaints about that, or about the missing inclusion, shows that no one has tried to use TCOP_DONTUSENEWLINE in many years.
2015-12-17 19:34:15 -05:00
/*
* Read characters until EOF or the appropriate delimiter is seen.
*/
while ((c = interactive_getc()) != EOF)
{
Adjust behavior of single-user -j mode for better initdb error reporting. Previously, -j caused the entire input file to be read in and executed as a single command string. That's undesirable, not least because any error causes the entire file to be regurgitated as the "failing query". Some experimentation suggests a better rule: end the command string when we see a semicolon immediately followed by two newlines, ie, an empty line after a query. This serves nicely to break up the existing examples such as information_schema.sql and system_views.sql. A limitation is that it's no longer possible to write such a sequence within a string literal or multiline comment in a file meant to be read with -j; but there are no instances of such a problem within the data currently used by initdb. (If someone does make such a mistake in future, it'll be obvious because they'll get an unterminated-literal or unterminated-comment syntax error.) Other than that, there shouldn't be any negative consequences; you're not forced to end statements that way, it's just a better idea in most cases. In passing, remove src/include/tcop/tcopdebug.h, which is dead code because it's not included anywhere, and hasn't been for more than ten years. One of the debug-support symbols it purported to describe has been unreferenced for at least the same amount of time, and the other is removed by this commit on the grounds that it was useless: forcing -j mode all the time would have broken initdb. The lack of complaints about that, or about the missing inclusion, shows that no one has tried to use TCOP_DONTUSENEWLINE in many years.
2015-12-17 19:34:15 -05:00
if (c == '\n')
{
Adjust behavior of single-user -j mode for better initdb error reporting. Previously, -j caused the entire input file to be read in and executed as a single command string. That's undesirable, not least because any error causes the entire file to be regurgitated as the "failing query". Some experimentation suggests a better rule: end the command string when we see a semicolon immediately followed by two newlines, ie, an empty line after a query. This serves nicely to break up the existing examples such as information_schema.sql and system_views.sql. A limitation is that it's no longer possible to write such a sequence within a string literal or multiline comment in a file meant to be read with -j; but there are no instances of such a problem within the data currently used by initdb. (If someone does make such a mistake in future, it'll be obvious because they'll get an unterminated-literal or unterminated-comment syntax error.) Other than that, there shouldn't be any negative consequences; you're not forced to end statements that way, it's just a better idea in most cases. In passing, remove src/include/tcop/tcopdebug.h, which is dead code because it's not included anywhere, and hasn't been for more than ten years. One of the debug-support symbols it purported to describe has been unreferenced for at least the same amount of time, and the other is removed by this commit on the grounds that it was useless: forcing -j mode all the time would have broken initdb. The lack of complaints about that, or about the missing inclusion, shows that no one has tried to use TCOP_DONTUSENEWLINE in many years.
2015-12-17 19:34:15 -05:00
if (UseSemiNewlineNewline)
{
Adjust behavior of single-user -j mode for better initdb error reporting. Previously, -j caused the entire input file to be read in and executed as a single command string. That's undesirable, not least because any error causes the entire file to be regurgitated as the "failing query". Some experimentation suggests a better rule: end the command string when we see a semicolon immediately followed by two newlines, ie, an empty line after a query. This serves nicely to break up the existing examples such as information_schema.sql and system_views.sql. A limitation is that it's no longer possible to write such a sequence within a string literal or multiline comment in a file meant to be read with -j; but there are no instances of such a problem within the data currently used by initdb. (If someone does make such a mistake in future, it'll be obvious because they'll get an unterminated-literal or unterminated-comment syntax error.) Other than that, there shouldn't be any negative consequences; you're not forced to end statements that way, it's just a better idea in most cases. In passing, remove src/include/tcop/tcopdebug.h, which is dead code because it's not included anywhere, and hasn't been for more than ten years. One of the debug-support symbols it purported to describe has been unreferenced for at least the same amount of time, and the other is removed by this commit on the grounds that it was useless: forcing -j mode all the time would have broken initdb. The lack of complaints about that, or about the missing inclusion, shows that no one has tried to use TCOP_DONTUSENEWLINE in many years.
2015-12-17 19:34:15 -05:00
/*
* In -j mode, semicolon followed by two newlines ends the
* command; otherwise treat newline as regular character.
*/
if (inBuf->len > 1 &&
inBuf->data[inBuf->len - 1] == '\n' &&
inBuf->data[inBuf->len - 2] == ';')
{
/* might as well drop the second newline */
break;
}
}
else
{
/*
* In plain mode, newline ends the command unless preceded by
* backslash.
*/
if (inBuf->len > 0 &&
inBuf->data[inBuf->len - 1] == '\\')
{
/* discard backslash from inBuf */
inBuf->data[--inBuf->len] = '\0';
Adjust behavior of single-user -j mode for better initdb error reporting. Previously, -j caused the entire input file to be read in and executed as a single command string. That's undesirable, not least because any error causes the entire file to be regurgitated as the "failing query". Some experimentation suggests a better rule: end the command string when we see a semicolon immediately followed by two newlines, ie, an empty line after a query. This serves nicely to break up the existing examples such as information_schema.sql and system_views.sql. A limitation is that it's no longer possible to write such a sequence within a string literal or multiline comment in a file meant to be read with -j; but there are no instances of such a problem within the data currently used by initdb. (If someone does make such a mistake in future, it'll be obvious because they'll get an unterminated-literal or unterminated-comment syntax error.) Other than that, there shouldn't be any negative consequences; you're not forced to end statements that way, it's just a better idea in most cases. In passing, remove src/include/tcop/tcopdebug.h, which is dead code because it's not included anywhere, and hasn't been for more than ten years. One of the debug-support symbols it purported to describe has been unreferenced for at least the same amount of time, and the other is removed by this commit on the grounds that it was useless: forcing -j mode all the time would have broken initdb. The lack of complaints about that, or about the missing inclusion, shows that no one has tried to use TCOP_DONTUSENEWLINE in many years.
2015-12-17 19:34:15 -05:00
/* discard newline too */
continue;
}
else
{
Adjust behavior of single-user -j mode for better initdb error reporting. Previously, -j caused the entire input file to be read in and executed as a single command string. That's undesirable, not least because any error causes the entire file to be regurgitated as the "failing query". Some experimentation suggests a better rule: end the command string when we see a semicolon immediately followed by two newlines, ie, an empty line after a query. This serves nicely to break up the existing examples such as information_schema.sql and system_views.sql. A limitation is that it's no longer possible to write such a sequence within a string literal or multiline comment in a file meant to be read with -j; but there are no instances of such a problem within the data currently used by initdb. (If someone does make such a mistake in future, it'll be obvious because they'll get an unterminated-literal or unterminated-comment syntax error.) Other than that, there shouldn't be any negative consequences; you're not forced to end statements that way, it's just a better idea in most cases. In passing, remove src/include/tcop/tcopdebug.h, which is dead code because it's not included anywhere, and hasn't been for more than ten years. One of the debug-support symbols it purported to describe has been unreferenced for at least the same amount of time, and the other is removed by this commit on the grounds that it was useless: forcing -j mode all the time would have broken initdb. The lack of complaints about that, or about the missing inclusion, shows that no one has tried to use TCOP_DONTUSENEWLINE in many years.
2015-12-17 19:34:15 -05:00
/* keep the newline character, but end the command */
appendStringInfoChar(inBuf, '\n');
break;
}
}
}
Adjust behavior of single-user -j mode for better initdb error reporting. Previously, -j caused the entire input file to be read in and executed as a single command string. That's undesirable, not least because any error causes the entire file to be regurgitated as the "failing query". Some experimentation suggests a better rule: end the command string when we see a semicolon immediately followed by two newlines, ie, an empty line after a query. This serves nicely to break up the existing examples such as information_schema.sql and system_views.sql. A limitation is that it's no longer possible to write such a sequence within a string literal or multiline comment in a file meant to be read with -j; but there are no instances of such a problem within the data currently used by initdb. (If someone does make such a mistake in future, it'll be obvious because they'll get an unterminated-literal or unterminated-comment syntax error.) Other than that, there shouldn't be any negative consequences; you're not forced to end statements that way, it's just a better idea in most cases. In passing, remove src/include/tcop/tcopdebug.h, which is dead code because it's not included anywhere, and hasn't been for more than ten years. One of the debug-support symbols it purported to describe has been unreferenced for at least the same amount of time, and the other is removed by this commit on the grounds that it was useless: forcing -j mode all the time would have broken initdb. The lack of complaints about that, or about the missing inclusion, shows that no one has tried to use TCOP_DONTUSENEWLINE in many years.
2015-12-17 19:34:15 -05:00
/* Not newline, or newline treated as regular character */
appendStringInfoChar(inBuf, (char) c);
}
Adjust behavior of single-user -j mode for better initdb error reporting. Previously, -j caused the entire input file to be read in and executed as a single command string. That's undesirable, not least because any error causes the entire file to be regurgitated as the "failing query". Some experimentation suggests a better rule: end the command string when we see a semicolon immediately followed by two newlines, ie, an empty line after a query. This serves nicely to break up the existing examples such as information_schema.sql and system_views.sql. A limitation is that it's no longer possible to write such a sequence within a string literal or multiline comment in a file meant to be read with -j; but there are no instances of such a problem within the data currently used by initdb. (If someone does make such a mistake in future, it'll be obvious because they'll get an unterminated-literal or unterminated-comment syntax error.) Other than that, there shouldn't be any negative consequences; you're not forced to end statements that way, it's just a better idea in most cases. In passing, remove src/include/tcop/tcopdebug.h, which is dead code because it's not included anywhere, and hasn't been for more than ten years. One of the debug-support symbols it purported to describe has been unreferenced for at least the same amount of time, and the other is removed by this commit on the grounds that it was useless: forcing -j mode all the time would have broken initdb. The lack of complaints about that, or about the missing inclusion, shows that no one has tried to use TCOP_DONTUSENEWLINE in many years.
2015-12-17 19:34:15 -05:00
/* No input before EOF signal means time to quit. */
if (c == EOF && inBuf->len == 0)
return EOF;
/*
* otherwise we have a user query so process it.
*/
/* Add '\0' to make it look the same as message case. */
appendStringInfoChar(inBuf, (char) '\0');
/*
* if the query echo flag was given, print the query..
*/
if (EchoQuery)
printf("statement: %s\n", inBuf->data);
fflush(stdout);
1998-09-01 03:29:17 +00:00
return 'Q';
}
/*
* interactive_getc -- collect one character from stdin
*
* Even though we are not reading from a "client" process, we still want to
Introduce and use infrastructure for interrupt processing during client reads. Up to now large swathes of backend code ran inside signal handlers while reading commands from the client, to allow for speedy reaction to asynchronous events. Most prominently shared invalidation and NOTIFY handling. That means that complex code like the starting/stopping of transactions is run in signal handlers... The required code was fragile and verbose, and is likely to contain bugs. That approach also severely limited what could be done while communicating with the client. As the read might be from within openssl it wasn't safely possible to trigger an error, e.g. to cancel a backend in idle-in-transaction state. We did that in some cases, namely fatal errors, nonetheless. Now that FE/BE communication in the backend employs non-blocking sockets and latches to block, we can quite simply interrupt reads from signal handlers by setting the latch. That allows us to signal an interrupted read, which is supposed to be retried after returning from within the ssl library. As signal handlers now only need to set the latch to guarantee timely interrupt processing, remove a fair amount of complicated & fragile code from async.c and sinval.c. We could now actually start to process some kinds of interrupts, like sinval ones, more often that before, but that seems better done separately. This work will hopefully allow to handle cases like being blocked by sending data, interrupting idle transactions and similar to be implemented without too much effort. In addition to allowing getting rid of ImmediateInterruptOK, that is. Author: Andres Freund Reviewed-By: Heikki Linnakangas
2015-02-03 22:25:20 +01:00
* respond to signals, particularly SIGTERM/SIGQUIT.
*/
static int
interactive_getc(void)
{
int c;
Introduce and use infrastructure for interrupt processing during client reads. Up to now large swathes of backend code ran inside signal handlers while reading commands from the client, to allow for speedy reaction to asynchronous events. Most prominently shared invalidation and NOTIFY handling. That means that complex code like the starting/stopping of transactions is run in signal handlers... The required code was fragile and verbose, and is likely to contain bugs. That approach also severely limited what could be done while communicating with the client. As the read might be from within openssl it wasn't safely possible to trigger an error, e.g. to cancel a backend in idle-in-transaction state. We did that in some cases, namely fatal errors, nonetheless. Now that FE/BE communication in the backend employs non-blocking sockets and latches to block, we can quite simply interrupt reads from signal handlers by setting the latch. That allows us to signal an interrupted read, which is supposed to be retried after returning from within the ssl library. As signal handlers now only need to set the latch to guarantee timely interrupt processing, remove a fair amount of complicated & fragile code from async.c and sinval.c. We could now actually start to process some kinds of interrupts, like sinval ones, more often that before, but that seems better done separately. This work will hopefully allow to handle cases like being blocked by sending data, interrupting idle transactions and similar to be implemented without too much effort. In addition to allowing getting rid of ImmediateInterruptOK, that is. Author: Andres Freund Reviewed-By: Heikki Linnakangas
2015-02-03 22:25:20 +01:00
/*
* This will not process catchup interrupts or notifications while
* reading. But those can't really be relevant for a standalone backend
* anyway. To properly handle SIGTERM there's a hack in die() that
* directly processes interrupts at this stage...
*/
CHECK_FOR_INTERRUPTS();
c = getc(stdin);
Introduce and use infrastructure for interrupt processing during client reads. Up to now large swathes of backend code ran inside signal handlers while reading commands from the client, to allow for speedy reaction to asynchronous events. Most prominently shared invalidation and NOTIFY handling. That means that complex code like the starting/stopping of transactions is run in signal handlers... The required code was fragile and verbose, and is likely to contain bugs. That approach also severely limited what could be done while communicating with the client. As the read might be from within openssl it wasn't safely possible to trigger an error, e.g. to cancel a backend in idle-in-transaction state. We did that in some cases, namely fatal errors, nonetheless. Now that FE/BE communication in the backend employs non-blocking sockets and latches to block, we can quite simply interrupt reads from signal handlers by setting the latch. That allows us to signal an interrupted read, which is supposed to be retried after returning from within the ssl library. As signal handlers now only need to set the latch to guarantee timely interrupt processing, remove a fair amount of complicated & fragile code from async.c and sinval.c. We could now actually start to process some kinds of interrupts, like sinval ones, more often that before, but that seems better done separately. This work will hopefully allow to handle cases like being blocked by sending data, interrupting idle transactions and similar to be implemented without too much effort. In addition to allowing getting rid of ImmediateInterruptOK, that is. Author: Andres Freund Reviewed-By: Heikki Linnakangas
2015-02-03 22:25:20 +01:00
ProcessClientReadInterrupt(true);
Introduce and use infrastructure for interrupt processing during client reads. Up to now large swathes of backend code ran inside signal handlers while reading commands from the client, to allow for speedy reaction to asynchronous events. Most prominently shared invalidation and NOTIFY handling. That means that complex code like the starting/stopping of transactions is run in signal handlers... The required code was fragile and verbose, and is likely to contain bugs. That approach also severely limited what could be done while communicating with the client. As the read might be from within openssl it wasn't safely possible to trigger an error, e.g. to cancel a backend in idle-in-transaction state. We did that in some cases, namely fatal errors, nonetheless. Now that FE/BE communication in the backend employs non-blocking sockets and latches to block, we can quite simply interrupt reads from signal handlers by setting the latch. That allows us to signal an interrupted read, which is supposed to be retried after returning from within the ssl library. As signal handlers now only need to set the latch to guarantee timely interrupt processing, remove a fair amount of complicated & fragile code from async.c and sinval.c. We could now actually start to process some kinds of interrupts, like sinval ones, more often that before, but that seems better done separately. This work will hopefully allow to handle cases like being blocked by sending data, interrupting idle transactions and similar to be implemented without too much effort. In addition to allowing getting rid of ImmediateInterruptOK, that is. Author: Andres Freund Reviewed-By: Heikki Linnakangas
2015-02-03 22:25:20 +01:00
return c;
}
/* ----------------
* SocketBackend() Is called for frontend-backend connections
*
* Returns the message type code, and loads message body data into inBuf.
*
* EOF is returned if the connection is lost.
* ----------------
*/
static int
SocketBackend(StringInfo inBuf)
{
int qtype;
/*
* Get message type code from the frontend.
*/
Be more careful to not lose sync in the FE/BE protocol. If any error occurred while we were in the middle of reading a protocol message from the client, we could lose sync, and incorrectly try to interpret a part of another message as a new protocol message. That will usually lead to an "invalid frontend message" error that terminates the connection. However, this is a security issue because an attacker might be able to deliberately cause an error, inject a Query message in what's supposed to be just user data, and have the server execute it. We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other operations that could ereport(ERROR) in the middle of processing a message, but a query cancel interrupt or statement timeout could nevertheless cause it to happen. Also, the V2 fastpath and COPY handling were not so careful. It's very difficult to recover in the V2 COPY protocol, so we will just terminate the connection on error. In practice, that's what happened previously anyway, as we lost protocol sync. To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set whenever we're in the middle of reading a message. When it's set, we cannot safely ERROR out and continue running, because we might've read only part of a message. PqCommReadingMsg acts somewhat similarly to critical sections in that if an error occurs while it's set, the error handler will force the connection to be terminated, as if the error was FATAL. It's not implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted to PANIC in critical sections, because we want to be able to use PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes advantage of that to prevent an OOM error from terminating the connection. To prevent unnecessary connection terminations, add a holdoff mechanism similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel interrupts, but still allow die interrupts. The rules on which interrupts are processed when are now a bit more complicated, so refactor ProcessInterrupts() and the calls to it in signal handlers so that the signal handlers always call it if ImmediateInterruptOK is set, and ProcessInterrupts() can decide to not do anything if the other conditions are not met. Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund. Backpatch to all supported versions. Security: CVE-2015-0244
2015-02-02 17:08:45 +02:00
HOLD_CANCEL_INTERRUPTS();
pq_startmsgread();
qtype = pq_getbyte();
if (qtype == EOF) /* frontend disconnected */
{
if (IsTransactionState())
ereport(COMMERROR,
(errcode(ERRCODE_CONNECTION_FAILURE),
errmsg("unexpected EOF on client connection with an open transaction")));
else
{
/*
* Can't send DEBUG log messages to client at this point. Since
* we're disconnecting right away, we don't need to restore
* whereToSendOutput.
*/
whereToSendOutput = DestNone;
ereport(DEBUG1,
(errcode(ERRCODE_CONNECTION_DOES_NOT_EXIST),
errmsg("unexpected EOF on client connection")));
}
return qtype;
}
/*
2005-10-15 02:49:52 +00:00
* Validate message type code before trying to read body; if we have lost
* sync, better to say "command unknown" than to run out of memory because
* we used garbage as a length word.
*
* This also gives us a place to set the doing_extended_query_message flag
* as soon as possible.
*/
switch (qtype)
{
case 'Q': /* simple query */
doing_extended_query_message = false;
if (PG_PROTOCOL_MAJOR(FrontendProtocol) < 3)
{
/* old style without length word; convert */
if (pq_getstring(inBuf))
{
if (IsTransactionState())
ereport(COMMERROR,
(errcode(ERRCODE_CONNECTION_FAILURE),
errmsg("unexpected EOF on client connection with an open transaction")));
else
{
/*
* Can't send DEBUG log messages to client at this
Be more careful to not lose sync in the FE/BE protocol. If any error occurred while we were in the middle of reading a protocol message from the client, we could lose sync, and incorrectly try to interpret a part of another message as a new protocol message. That will usually lead to an "invalid frontend message" error that terminates the connection. However, this is a security issue because an attacker might be able to deliberately cause an error, inject a Query message in what's supposed to be just user data, and have the server execute it. We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other operations that could ereport(ERROR) in the middle of processing a message, but a query cancel interrupt or statement timeout could nevertheless cause it to happen. Also, the V2 fastpath and COPY handling were not so careful. It's very difficult to recover in the V2 COPY protocol, so we will just terminate the connection on error. In practice, that's what happened previously anyway, as we lost protocol sync. To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set whenever we're in the middle of reading a message. When it's set, we cannot safely ERROR out and continue running, because we might've read only part of a message. PqCommReadingMsg acts somewhat similarly to critical sections in that if an error occurs while it's set, the error handler will force the connection to be terminated, as if the error was FATAL. It's not implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted to PANIC in critical sections, because we want to be able to use PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes advantage of that to prevent an OOM error from terminating the connection. To prevent unnecessary connection terminations, add a holdoff mechanism similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel interrupts, but still allow die interrupts. The rules on which interrupts are processed when are now a bit more complicated, so refactor ProcessInterrupts() and the calls to it in signal handlers so that the signal handlers always call it if ImmediateInterruptOK is set, and ProcessInterrupts() can decide to not do anything if the other conditions are not met. Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund. Backpatch to all supported versions. Security: CVE-2015-0244
2015-02-02 17:08:45 +02:00
* point. Since we're disconnecting right away, we
* don't need to restore whereToSendOutput.
*/
whereToSendOutput = DestNone;
ereport(DEBUG1,
(errcode(ERRCODE_CONNECTION_DOES_NOT_EXIST),
errmsg("unexpected EOF on client connection")));
}
return EOF;
}
}
break;
case 'F': /* fastpath function call */
doing_extended_query_message = false;
Be more careful to not lose sync in the FE/BE protocol. If any error occurred while we were in the middle of reading a protocol message from the client, we could lose sync, and incorrectly try to interpret a part of another message as a new protocol message. That will usually lead to an "invalid frontend message" error that terminates the connection. However, this is a security issue because an attacker might be able to deliberately cause an error, inject a Query message in what's supposed to be just user data, and have the server execute it. We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other operations that could ereport(ERROR) in the middle of processing a message, but a query cancel interrupt or statement timeout could nevertheless cause it to happen. Also, the V2 fastpath and COPY handling were not so careful. It's very difficult to recover in the V2 COPY protocol, so we will just terminate the connection on error. In practice, that's what happened previously anyway, as we lost protocol sync. To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set whenever we're in the middle of reading a message. When it's set, we cannot safely ERROR out and continue running, because we might've read only part of a message. PqCommReadingMsg acts somewhat similarly to critical sections in that if an error occurs while it's set, the error handler will force the connection to be terminated, as if the error was FATAL. It's not implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted to PANIC in critical sections, because we want to be able to use PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes advantage of that to prevent an OOM error from terminating the connection. To prevent unnecessary connection terminations, add a holdoff mechanism similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel interrupts, but still allow die interrupts. The rules on which interrupts are processed when are now a bit more complicated, so refactor ProcessInterrupts() and the calls to it in signal handlers so that the signal handlers always call it if ImmediateInterruptOK is set, and ProcessInterrupts() can decide to not do anything if the other conditions are not met. Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund. Backpatch to all supported versions. Security: CVE-2015-0244
2015-02-02 17:08:45 +02:00
if (PG_PROTOCOL_MAJOR(FrontendProtocol) < 3)
{
if (GetOldFunctionMessage(inBuf))
{
if (IsTransactionState())
ereport(COMMERROR,
(errcode(ERRCODE_CONNECTION_FAILURE),
errmsg("unexpected EOF on client connection with an open transaction")));
else
{
/*
* Can't send DEBUG log messages to client at this
* point. Since we're disconnecting right away, we
* don't need to restore whereToSendOutput.
*/
whereToSendOutput = DestNone;
ereport(DEBUG1,
(errcode(ERRCODE_CONNECTION_DOES_NOT_EXIST),
errmsg("unexpected EOF on client connection")));
}
return EOF;
}
}
break;
case 'X': /* terminate */
doing_extended_query_message = false;
ignore_till_sync = false;
break;
case 'B': /* bind */
case 'C': /* close */
case 'D': /* describe */
case 'E': /* execute */
case 'H': /* flush */
case 'P': /* parse */
doing_extended_query_message = true;
/* these are only legal in protocol 3 */
if (PG_PROTOCOL_MAJOR(FrontendProtocol) < 3)
ereport(FATAL,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
2005-10-15 02:49:52 +00:00
errmsg("invalid frontend message type %d", qtype)));
break;
case 'S': /* sync */
/* stop any active skip-till-Sync */
ignore_till_sync = false;
/* mark not-extended, so that a new error doesn't begin skip */
doing_extended_query_message = false;
/* only legal in protocol 3 */
if (PG_PROTOCOL_MAJOR(FrontendProtocol) < 3)
ereport(FATAL,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
2005-10-15 02:49:52 +00:00
errmsg("invalid frontend message type %d", qtype)));
break;
case 'd': /* copy data */
case 'c': /* copy done */
case 'f': /* copy fail */
doing_extended_query_message = false;
/* these are only legal in protocol 3 */
if (PG_PROTOCOL_MAJOR(FrontendProtocol) < 3)
ereport(FATAL,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
2005-10-15 02:49:52 +00:00
errmsg("invalid frontend message type %d", qtype)));
break;
default:
2003-08-04 00:43:34 +00:00
/*
* Otherwise we got garbage from the frontend. We treat this as
2005-10-15 02:49:52 +00:00
* fatal because we have probably lost message boundary sync, and
* there's no good way to recover.
*/
ereport(FATAL,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
errmsg("invalid frontend message type %d", qtype)));
break;
}
/*
2005-10-15 02:49:52 +00:00
* In protocol version 3, all frontend messages have a length word next
* after the type code; we can read the message contents independently of
* the type.
*/
if (PG_PROTOCOL_MAJOR(FrontendProtocol) >= 3)
{
if (pq_getmessage(inBuf, 0))
return EOF; /* suitable message already logged */
}
Be more careful to not lose sync in the FE/BE protocol. If any error occurred while we were in the middle of reading a protocol message from the client, we could lose sync, and incorrectly try to interpret a part of another message as a new protocol message. That will usually lead to an "invalid frontend message" error that terminates the connection. However, this is a security issue because an attacker might be able to deliberately cause an error, inject a Query message in what's supposed to be just user data, and have the server execute it. We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other operations that could ereport(ERROR) in the middle of processing a message, but a query cancel interrupt or statement timeout could nevertheless cause it to happen. Also, the V2 fastpath and COPY handling were not so careful. It's very difficult to recover in the V2 COPY protocol, so we will just terminate the connection on error. In practice, that's what happened previously anyway, as we lost protocol sync. To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set whenever we're in the middle of reading a message. When it's set, we cannot safely ERROR out and continue running, because we might've read only part of a message. PqCommReadingMsg acts somewhat similarly to critical sections in that if an error occurs while it's set, the error handler will force the connection to be terminated, as if the error was FATAL. It's not implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted to PANIC in critical sections, because we want to be able to use PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes advantage of that to prevent an OOM error from terminating the connection. To prevent unnecessary connection terminations, add a holdoff mechanism similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel interrupts, but still allow die interrupts. The rules on which interrupts are processed when are now a bit more complicated, so refactor ProcessInterrupts() and the calls to it in signal handlers so that the signal handlers always call it if ImmediateInterruptOK is set, and ProcessInterrupts() can decide to not do anything if the other conditions are not met. Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund. Backpatch to all supported versions. Security: CVE-2015-0244
2015-02-02 17:08:45 +02:00
else
pq_endmsgread();
RESUME_CANCEL_INTERRUPTS();
return qtype;
}
/* ----------------
* ReadCommand reads a command from either the frontend or
* standard input, places it in inBuf, and returns the
* message type code (first byte of the message).
* EOF is returned if end of file.
* ----------------
*/
static int
ReadCommand(StringInfo inBuf)
{
int result;
if (whereToSendOutput == DestRemote)
result = SocketBackend(inBuf);
else
result = InteractiveBackend(inBuf);
return result;
}
/*
Introduce and use infrastructure for interrupt processing during client reads. Up to now large swathes of backend code ran inside signal handlers while reading commands from the client, to allow for speedy reaction to asynchronous events. Most prominently shared invalidation and NOTIFY handling. That means that complex code like the starting/stopping of transactions is run in signal handlers... The required code was fragile and verbose, and is likely to contain bugs. That approach also severely limited what could be done while communicating with the client. As the read might be from within openssl it wasn't safely possible to trigger an error, e.g. to cancel a backend in idle-in-transaction state. We did that in some cases, namely fatal errors, nonetheless. Now that FE/BE communication in the backend employs non-blocking sockets and latches to block, we can quite simply interrupt reads from signal handlers by setting the latch. That allows us to signal an interrupted read, which is supposed to be retried after returning from within the ssl library. As signal handlers now only need to set the latch to guarantee timely interrupt processing, remove a fair amount of complicated & fragile code from async.c and sinval.c. We could now actually start to process some kinds of interrupts, like sinval ones, more often that before, but that seems better done separately. This work will hopefully allow to handle cases like being blocked by sending data, interrupting idle transactions and similar to be implemented without too much effort. In addition to allowing getting rid of ImmediateInterruptOK, that is. Author: Andres Freund Reviewed-By: Heikki Linnakangas
2015-02-03 22:25:20 +01:00
* ProcessClientReadInterrupt() - Process interrupts specific to client reads
*
Introduce and use infrastructure for interrupt processing during client reads. Up to now large swathes of backend code ran inside signal handlers while reading commands from the client, to allow for speedy reaction to asynchronous events. Most prominently shared invalidation and NOTIFY handling. That means that complex code like the starting/stopping of transactions is run in signal handlers... The required code was fragile and verbose, and is likely to contain bugs. That approach also severely limited what could be done while communicating with the client. As the read might be from within openssl it wasn't safely possible to trigger an error, e.g. to cancel a backend in idle-in-transaction state. We did that in some cases, namely fatal errors, nonetheless. Now that FE/BE communication in the backend employs non-blocking sockets and latches to block, we can quite simply interrupt reads from signal handlers by setting the latch. That allows us to signal an interrupted read, which is supposed to be retried after returning from within the ssl library. As signal handlers now only need to set the latch to guarantee timely interrupt processing, remove a fair amount of complicated & fragile code from async.c and sinval.c. We could now actually start to process some kinds of interrupts, like sinval ones, more often that before, but that seems better done separately. This work will hopefully allow to handle cases like being blocked by sending data, interrupting idle transactions and similar to be implemented without too much effort. In addition to allowing getting rid of ImmediateInterruptOK, that is. Author: Andres Freund Reviewed-By: Heikki Linnakangas
2015-02-03 22:25:20 +01:00
* This is called just after low-level reads. That might be after the read
* finished successfully, or it was interrupted via interrupt.
*
Introduce and use infrastructure for interrupt processing during client reads. Up to now large swathes of backend code ran inside signal handlers while reading commands from the client, to allow for speedy reaction to asynchronous events. Most prominently shared invalidation and NOTIFY handling. That means that complex code like the starting/stopping of transactions is run in signal handlers... The required code was fragile and verbose, and is likely to contain bugs. That approach also severely limited what could be done while communicating with the client. As the read might be from within openssl it wasn't safely possible to trigger an error, e.g. to cancel a backend in idle-in-transaction state. We did that in some cases, namely fatal errors, nonetheless. Now that FE/BE communication in the backend employs non-blocking sockets and latches to block, we can quite simply interrupt reads from signal handlers by setting the latch. That allows us to signal an interrupted read, which is supposed to be retried after returning from within the ssl library. As signal handlers now only need to set the latch to guarantee timely interrupt processing, remove a fair amount of complicated & fragile code from async.c and sinval.c. We could now actually start to process some kinds of interrupts, like sinval ones, more often that before, but that seems better done separately. This work will hopefully allow to handle cases like being blocked by sending data, interrupting idle transactions and similar to be implemented without too much effort. In addition to allowing getting rid of ImmediateInterruptOK, that is. Author: Andres Freund Reviewed-By: Heikki Linnakangas
2015-02-03 22:25:20 +01:00
* Must preserve errno!
*/
void
ProcessClientReadInterrupt(bool blocked)
{
Introduce and use infrastructure for interrupt processing during client reads. Up to now large swathes of backend code ran inside signal handlers while reading commands from the client, to allow for speedy reaction to asynchronous events. Most prominently shared invalidation and NOTIFY handling. That means that complex code like the starting/stopping of transactions is run in signal handlers... The required code was fragile and verbose, and is likely to contain bugs. That approach also severely limited what could be done while communicating with the client. As the read might be from within openssl it wasn't safely possible to trigger an error, e.g. to cancel a backend in idle-in-transaction state. We did that in some cases, namely fatal errors, nonetheless. Now that FE/BE communication in the backend employs non-blocking sockets and latches to block, we can quite simply interrupt reads from signal handlers by setting the latch. That allows us to signal an interrupted read, which is supposed to be retried after returning from within the ssl library. As signal handlers now only need to set the latch to guarantee timely interrupt processing, remove a fair amount of complicated & fragile code from async.c and sinval.c. We could now actually start to process some kinds of interrupts, like sinval ones, more often that before, but that seems better done separately. This work will hopefully allow to handle cases like being blocked by sending data, interrupting idle transactions and similar to be implemented without too much effort. In addition to allowing getting rid of ImmediateInterruptOK, that is. Author: Andres Freund Reviewed-By: Heikki Linnakangas
2015-02-03 22:25:20 +01:00
int save_errno = errno;
if (DoingCommandRead)
{
Introduce and use infrastructure for interrupt processing during client reads. Up to now large swathes of backend code ran inside signal handlers while reading commands from the client, to allow for speedy reaction to asynchronous events. Most prominently shared invalidation and NOTIFY handling. That means that complex code like the starting/stopping of transactions is run in signal handlers... The required code was fragile and verbose, and is likely to contain bugs. That approach also severely limited what could be done while communicating with the client. As the read might be from within openssl it wasn't safely possible to trigger an error, e.g. to cancel a backend in idle-in-transaction state. We did that in some cases, namely fatal errors, nonetheless. Now that FE/BE communication in the backend employs non-blocking sockets and latches to block, we can quite simply interrupt reads from signal handlers by setting the latch. That allows us to signal an interrupted read, which is supposed to be retried after returning from within the ssl library. As signal handlers now only need to set the latch to guarantee timely interrupt processing, remove a fair amount of complicated & fragile code from async.c and sinval.c. We could now actually start to process some kinds of interrupts, like sinval ones, more often that before, but that seems better done separately. This work will hopefully allow to handle cases like being blocked by sending data, interrupting idle transactions and similar to be implemented without too much effort. In addition to allowing getting rid of ImmediateInterruptOK, that is. Author: Andres Freund Reviewed-By: Heikki Linnakangas
2015-02-03 22:25:20 +01:00
/* Check for general interrupts that arrived while reading */
CHECK_FOR_INTERRUPTS();
Introduce and use infrastructure for interrupt processing during client reads. Up to now large swathes of backend code ran inside signal handlers while reading commands from the client, to allow for speedy reaction to asynchronous events. Most prominently shared invalidation and NOTIFY handling. That means that complex code like the starting/stopping of transactions is run in signal handlers... The required code was fragile and verbose, and is likely to contain bugs. That approach also severely limited what could be done while communicating with the client. As the read might be from within openssl it wasn't safely possible to trigger an error, e.g. to cancel a backend in idle-in-transaction state. We did that in some cases, namely fatal errors, nonetheless. Now that FE/BE communication in the backend employs non-blocking sockets and latches to block, we can quite simply interrupt reads from signal handlers by setting the latch. That allows us to signal an interrupted read, which is supposed to be retried after returning from within the ssl library. As signal handlers now only need to set the latch to guarantee timely interrupt processing, remove a fair amount of complicated & fragile code from async.c and sinval.c. We could now actually start to process some kinds of interrupts, like sinval ones, more often that before, but that seems better done separately. This work will hopefully allow to handle cases like being blocked by sending data, interrupting idle transactions and similar to be implemented without too much effort. In addition to allowing getting rid of ImmediateInterruptOK, that is. Author: Andres Freund Reviewed-By: Heikki Linnakangas
2015-02-03 22:25:20 +01:00
/* Process sinval catchup interrupts that happened while reading */
if (catchupInterruptPending)
ProcessCatchupInterrupt();
Introduce and use infrastructure for interrupt processing during client reads. Up to now large swathes of backend code ran inside signal handlers while reading commands from the client, to allow for speedy reaction to asynchronous events. Most prominently shared invalidation and NOTIFY handling. That means that complex code like the starting/stopping of transactions is run in signal handlers... The required code was fragile and verbose, and is likely to contain bugs. That approach also severely limited what could be done while communicating with the client. As the read might be from within openssl it wasn't safely possible to trigger an error, e.g. to cancel a backend in idle-in-transaction state. We did that in some cases, namely fatal errors, nonetheless. Now that FE/BE communication in the backend employs non-blocking sockets and latches to block, we can quite simply interrupt reads from signal handlers by setting the latch. That allows us to signal an interrupted read, which is supposed to be retried after returning from within the ssl library. As signal handlers now only need to set the latch to guarantee timely interrupt processing, remove a fair amount of complicated & fragile code from async.c and sinval.c. We could now actually start to process some kinds of interrupts, like sinval ones, more often that before, but that seems better done separately. This work will hopefully allow to handle cases like being blocked by sending data, interrupting idle transactions and similar to be implemented without too much effort. In addition to allowing getting rid of ImmediateInterruptOK, that is. Author: Andres Freund Reviewed-By: Heikki Linnakangas
2015-02-03 22:25:20 +01:00
/* Process sinval catchup interrupts that happened while reading */
if (notifyInterruptPending)
ProcessNotifyInterrupt();
}
else if (ProcDiePending && blocked)
{
/*
* We're dying. It's safe (and sane) to handle that now.
*/
CHECK_FOR_INTERRUPTS();
}
Introduce and use infrastructure for interrupt processing during client reads. Up to now large swathes of backend code ran inside signal handlers while reading commands from the client, to allow for speedy reaction to asynchronous events. Most prominently shared invalidation and NOTIFY handling. That means that complex code like the starting/stopping of transactions is run in signal handlers... The required code was fragile and verbose, and is likely to contain bugs. That approach also severely limited what could be done while communicating with the client. As the read might be from within openssl it wasn't safely possible to trigger an error, e.g. to cancel a backend in idle-in-transaction state. We did that in some cases, namely fatal errors, nonetheless. Now that FE/BE communication in the backend employs non-blocking sockets and latches to block, we can quite simply interrupt reads from signal handlers by setting the latch. That allows us to signal an interrupted read, which is supposed to be retried after returning from within the ssl library. As signal handlers now only need to set the latch to guarantee timely interrupt processing, remove a fair amount of complicated & fragile code from async.c and sinval.c. We could now actually start to process some kinds of interrupts, like sinval ones, more often that before, but that seems better done separately. This work will hopefully allow to handle cases like being blocked by sending data, interrupting idle transactions and similar to be implemented without too much effort. In addition to allowing getting rid of ImmediateInterruptOK, that is. Author: Andres Freund Reviewed-By: Heikki Linnakangas
2015-02-03 22:25:20 +01:00
errno = save_errno;
}
/*
* ProcessClientWriteInterrupt() - Process interrupts specific to client writes
*
* This is called just after low-level writes. That might be after the read
* finished successfully, or it was interrupted via interrupt. 'blocked' tells
* us whether the
*
* Must preserve errno!
*/
void
ProcessClientWriteInterrupt(bool blocked)
{
int save_errno = errno;
/*
* We only want to process the interrupt here if socket writes are
2015-05-23 21:35:49 -04:00
* blocking to increase the chance to get an error message to the client.
* If we're not blocked there'll soon be a CHECK_FOR_INTERRUPTS(). But if
* we're blocked we'll never get out of that situation if the client has
* died.
*/
if (ProcDiePending && blocked)
{
/*
* We're dying. It's safe (and sane) to handle that now. But we don't
* want to send the client the error message as that a) would possibly
* block again b) would possibly lead to sending an error message to
* the client, while we already started to send something else.
*/
if (whereToSendOutput == DestRemote)
whereToSendOutput = DestNone;
CHECK_FOR_INTERRUPTS();
}
errno = save_errno;
}
/*
* Do raw parsing (only).
*
* A list of parsetrees is returned, since there might be multiple
* commands in the given string.
*
* NOTE: for interactive queries, it is important to keep this routine
* separate from the analysis & rewrite stages. Analysis and rewriting
* cannot be done in an aborted transaction, since they require access to
* database tables. So, we rely on the raw parser to determine whether
* we've seen a COMMIT or ABORT command; when we are in abort state, other
* commands are not processed any further than the raw parse stage.
*/
List *
pg_parse_query(const char *query_string)
{
List *raw_parsetree_list;
TRACE_POSTGRESQL_QUERY_PARSE_START(query_string);
if (log_parser_stats)
ResetUsage();
raw_parsetree_list = raw_parser(query_string);
if (log_parser_stats)
ShowUsage("PARSER STATISTICS");
#ifdef COPY_PARSE_PLAN_TREES
/* Optional debugging check: pass raw parsetrees through copyObject() */
{
List *new_list = (List *) copyObject(raw_parsetree_list);
/* This checks both copyObject() and the equal() routines... */
if (!equal(new_list, raw_parsetree_list))
elog(WARNING, "copyObject() failed to produce an equal raw parse tree");
else
raw_parsetree_list = new_list;
}
#endif
TRACE_POSTGRESQL_QUERY_PARSE_DONE(query_string);
return raw_parsetree_list;
}
/*
* Given a raw parsetree (gram.y output), and optionally information about
* types of parameter symbols ($n), perform parse analysis and rule rewriting.
*
* A list of Query nodes is returned, since either the analyzer or the
* rewriter might expand one query to several.
*
* NOTE: for reasons mentioned above, this must be separate from raw parsing.
*/
List *
pg_analyze_and_rewrite(Node *parsetree, const char *query_string,
Oid *paramTypes, int numParams)
{
Query *query;
List *querytree_list;
TRACE_POSTGRESQL_QUERY_REWRITE_START(query_string);
/*
* (1) Perform parse analysis.
*/
if (log_parser_stats)
ResetUsage();
query = parse_analyze(parsetree, query_string, paramTypes, numParams);
if (log_parser_stats)
ShowUsage("PARSE ANALYSIS STATISTICS");
/*
* (2) Rewrite the queries, as necessary
*/
querytree_list = pg_rewrite_query(query);
TRACE_POSTGRESQL_QUERY_REWRITE_DONE(query_string);
return querytree_list;
}
/*
* Do parse analysis and rewriting. This is the same as pg_analyze_and_rewrite
* except that external-parameter resolution is determined by parser callback
* hooks instead of a fixed list of parameter datatypes.
*/
List *
pg_analyze_and_rewrite_params(Node *parsetree,
const char *query_string,
ParserSetupHook parserSetup,
void *parserSetupArg)
{
ParseState *pstate;
Query *query;
List *querytree_list;
2010-02-26 02:01:40 +00:00
Assert(query_string != NULL); /* required as of 8.4 */
TRACE_POSTGRESQL_QUERY_REWRITE_START(query_string);
/*
* (1) Perform parse analysis.
*/
if (log_parser_stats)
ResetUsage();
pstate = make_parsestate(NULL);
pstate->p_sourcetext = query_string;
(*parserSetup) (pstate, parserSetupArg);
query = transformTopLevelStmt(pstate, parsetree);
if (post_parse_analyze_hook)
(*post_parse_analyze_hook) (pstate, query);
free_parsestate(pstate);
if (log_parser_stats)
ShowUsage("PARSE ANALYSIS STATISTICS");
/*
* (2) Rewrite the queries, as necessary
*/
querytree_list = pg_rewrite_query(query);
TRACE_POSTGRESQL_QUERY_REWRITE_DONE(query_string);
return querytree_list;
}
/*
* Perform rewriting of a query produced by parse analysis.
*
* Note: query must just have come from the parser, because we do not do
* AcquireRewriteLocks() on it.
*/
static List *
pg_rewrite_query(Query *query)
{
List *querytree_list;
if (Debug_print_parse)
elog_node_display(LOG, "parse tree", query,
Debug_pretty_print);
if (log_parser_stats)
ResetUsage();
if (query->commandType == CMD_UTILITY)
{
/* don't rewrite utilities, just dump 'em into result list */
querytree_list = list_make1(query);
}
else
{
/* rewrite regular queries */
querytree_list = QueryRewrite(query);
}
if (log_parser_stats)
ShowUsage("REWRITER STATISTICS");
#ifdef COPY_PARSE_PLAN_TREES
/* Optional debugging check: pass querytree output through copyObject() */
{
List *new_list;
2001-03-22 04:01:46 +00:00
new_list = (List *) copyObject(querytree_list);
/* This checks both copyObject() and the equal() routines... */
if (!equal(new_list, querytree_list))
elog(WARNING, "copyObject() failed to produce equal parse tree");
else
querytree_list = new_list;
}
#endif
if (Debug_print_rewritten)
elog_node_display(LOG, "rewritten parse tree", querytree_list,
Debug_pretty_print);
return querytree_list;
}
/*
* Generate a plan for a single already-rewritten query.
* This is a thin wrapper around planner() and takes the same parameters.
*/
PlannedStmt *
pg_plan_query(Query *querytree, int cursorOptions, ParamListInfo boundParams)
{
PlannedStmt *plan;
/* Utility commands have no plans. */
if (querytree->commandType == CMD_UTILITY)
return NULL;
/* Planner must have a snapshot in case it calls user-defined functions. */
Assert(ActiveSnapshotSet());
TRACE_POSTGRESQL_QUERY_PLAN_START();
if (log_planner_stats)
ResetUsage();
/* call the optimizer */
plan = planner(querytree, cursorOptions, boundParams);
if (log_planner_stats)
ShowUsage("PLANNER STATISTICS");
#ifdef COPY_PARSE_PLAN_TREES
/* Optional debugging check: pass plan output through copyObject() */
{
PlannedStmt *new_plan = (PlannedStmt *) copyObject(plan);
2001-03-22 04:01:46 +00:00
/*
2005-10-15 02:49:52 +00:00
* equal() currently does not have routines to compare Plan nodes, so
* don't try to test equality here. Perhaps fix someday?
*/
#ifdef NOT_USED
/* This checks both copyObject() and the equal() routines... */
2001-03-22 04:01:46 +00:00
if (!equal(new_plan, plan))
elog(WARNING, "copyObject() failed to produce an equal plan tree");
else
#endif
plan = new_plan;
}
#endif
/*
* Print plan if debugging.
*/
if (Debug_print_plan)
elog_node_display(LOG, "plan", plan, Debug_pretty_print);
This is the final state of the rule system for 6.4 after the patch is applied: Rewrite rules on relation level work fine now. Event qualifications on insert/update/delete rules work fine now. I added the new keyword OLD to reference the CURRENT tuple. CURRENT will be removed in 6.5. Update rules can reference NEW and OLD in the rule qualification and the actions. Insert/update/delete rules on views can be established to let them behave like real tables. For insert/update/delete rules multiple actions are supported now. The actions can also be surrounded by parantheses to make psql happy. Multiple actions are required if update to a view requires updates to multiple tables. Regular users are permitted to create/drop rules on tables they have RULE permissions for (DefineQueryRewrite() is now able to get around the access restrictions on pg_rewrite). This enables view creation for regular users too. This required an extra boolean parameter to pg_parse_and_plan() that tells to set skipAcl on all rangetable entries of the resulting queries. There is a new function pg_exec_query_acl_override() that could be used by backend utilities to use this facility. All rule actions (not only views) inherit the permissions of the event relations owner. Sample: User A creates tables T1 and T2, creates rules that log INSERT/UPDATE/DELETE on T1 in T2 (like in the regression tests for rules I created) and grants ALL but RULE on T1 to user B. User B can now fully access T1 and the logging happens in T2. But user B cannot access T2 at all, only the rule actions can. And due to missing RULE permissions on T1, user B cannot disable logging. Rules on the attribute level are disabled (they don't work properly and since regular users are now permitted to create rules I decided to disable them). Rules on select must have exactly one action that is a select (so select rules must be a view definition). UPDATE NEW/OLD rules are disabled (still broken, but triggers can do it). There are two new system views (pg_rule and pg_view) that show the definition of the rules or views so the db admin can see what the users do. They use two new functions pg_get_ruledef() and pg_get_viewdef() that are builtins. The functions pg_get_ruledef() and pg_get_viewdef() could be used to implement rule and view support in pg_dump. PostgreSQL is now the only database system I know, that has rewrite rules on the query level. All others (where I found a rule statement at all) use stored database procedures or the like (triggers as we call them) for active rules (as some call them). Future of the rule system: The now disabled parts of the rule system (attribute level, multiple actions on select and update new stuff) require a complete new rewrite handler from scratch. The old one is too badly wired up. After 6.4 I'll start to work on a new rewrite handler, that fully supports the attribute level rules, multiple actions on select and update new. This will be available for 6.5 so we get full rewrite rule capabilities. Jan
1998-08-24 01:38:11 +00:00
TRACE_POSTGRESQL_QUERY_PLAN_DONE();
return plan;
}
/*
* Generate plans for a list of already-rewritten queries.
*
* Normal optimizable statements generate PlannedStmt entries in the result
* list. Utility statements are simply represented by their statement nodes.
*/
List *
pg_plan_queries(List *querytrees, int cursorOptions, ParamListInfo boundParams)
{
List *stmt_list = NIL;
ListCell *query_list;
foreach(query_list, querytrees)
{
Query *query = (Query *) lfirst(query_list);
Node *stmt;
if (query->commandType == CMD_UTILITY)
{
/* Utility commands have no plans. */
stmt = query->utilityStmt;
}
else
{
stmt = (Node *) pg_plan_query(query, cursorOptions, boundParams);
}
stmt_list = lappend(stmt_list, stmt);
}
return stmt_list;
}
/*
* exec_simple_query
*
* Execute a "simple Query" protocol message.
*/
static void
exec_simple_query(const char *query_string)
{
2003-08-04 00:43:34 +00:00
CommandDest dest = whereToSendOutput;
MemoryContext oldcontext;
List *parsetree_list;
ListCell *parsetree_item;
bool save_log_statement_stats = log_statement_stats;
bool was_logged = false;
bool isTopLevel;
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
char msec_str[32];
2005-10-15 02:49:52 +00:00
/*
* Report query to various monitoring facilities.
*/
debug_query_string = query_string;
pgstat_report_activity(STATE_RUNNING, query_string);
TRACE_POSTGRESQL_QUERY_START(query_string);
/*
* We use save_log_statement_stats so ShowUsage doesn't report incorrect
* results because ResetUsage wasn't called.
*/
if (save_log_statement_stats)
ResetUsage();
/*
* Start up a transaction command. All queries generated by the
2001-03-22 04:01:46 +00:00
* query_string will be in this same command block, *unless* we find a
2005-10-15 02:49:52 +00:00
* BEGIN/COMMIT/ABORT statement; we have to force a new xact command after
* one of those, else bad things will happen in xact.c. (Note that this
* will normally change current memory context.)
*/
start_xact_command();
/*
* Zap any pre-existing unnamed statement. (While not strictly necessary,
2005-10-15 02:49:52 +00:00
* it seems best to define simple-Query mode as if it used the unnamed
* statement and portal; this ensures we recover any storage used by prior
* unnamed operations.)
*/
drop_unnamed_stmt();
/*
* Switch to appropriate context for constructing parsetrees.
*/
oldcontext = MemoryContextSwitchTo(MessageContext);
1999-05-25 16:15:34 +00:00
/*
2005-10-15 02:49:52 +00:00
* Do basic parsing of the query or queries (this should be safe even if
* we are in aborted transaction state!)
*/
parsetree_list = pg_parse_query(query_string);
/* Log immediately if dictated by log_statement */
if (check_log_statement(parsetree_list))
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
{
ereport(LOG,
(errmsg("statement: %s", query_string),
errhidestmt(true),
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
errdetail_execute(parsetree_list)));
was_logged = true;
}
/*
* Switch back to transaction context to enter the loop.
*/
MemoryContextSwitchTo(oldcontext);
/*
2007-11-15 21:14:46 +00:00
* We'll tell PortalRun it's a top-level command iff there's exactly one
* raw parsetree. If more than one, it's effectively a transaction block
* and we want PreventTransactionChain to reject unsafe commands. (Note:
* we're assuming that query rewrite cannot add commands that are
* significant to PreventTransactionChain.)
*/
isTopLevel = (list_length(parsetree_list) == 1);
/*
* Run through the raw parsetree(s) and process each one.
*/
foreach(parsetree_item, parsetree_list)
{
2001-03-22 04:01:46 +00:00
Node *parsetree = (Node *) lfirst(parsetree_item);
bool snapshot_set = false;
const char *commandTag;
char completionTag[COMPLETION_TAG_BUFSIZE];
2001-03-22 04:01:46 +00:00
List *querytree_list,
*plantree_list;
Portal portal;
DestReceiver *receiver;
int16 format;
/*
2005-10-15 02:49:52 +00:00
* Get the command name for use in status display (it also becomes the
* default completion tag, down inside PortalRun). Set ps_status and
2005-10-15 02:49:52 +00:00
* do any special start-of-SQL-command processing needed by the
* destination.
*/
commandTag = CreateCommandTag(parsetree);
set_ps_display(commandTag, false);
BeginCommand(commandTag, dest);
/*
* If we are in an aborted transaction, reject all commands except
2005-10-15 02:49:52 +00:00
* COMMIT/ABORT. It is important that this test occur before we try
* to do parse analysis, rewrite, or planning, since all those phases
* try to do database accesses, which may fail in abort state. (It
* might be safe to allow some additional utility commands in this
* state, but not many...)
*/
if (IsAbortedTransactionBlockState() &&
!IsTransactionExitStmt(parsetree))
ereport(ERROR,
(errcode(ERRCODE_IN_FAILED_SQL_TRANSACTION),
errmsg("current transaction is aborted, "
2010-02-26 02:01:40 +00:00
"commands ignored until end of transaction block"),
errdetail_abort()));
/* Make sure we are in a transaction command */
start_xact_command();
/* If we got a cancel signal in parsing or prior command, quit */
CHECK_FOR_INTERRUPTS();
/*
* Set up a snapshot if parse analysis/planning will need one.
*/
if (analyze_requires_snapshot(parsetree))
{
PushActiveSnapshot(GetTransactionSnapshot());
snapshot_set = true;
}
/*
* OK to analyze, rewrite, and plan this query.
*
2001-03-22 04:01:46 +00:00
* Switch to appropriate context for constructing querytrees (again,
* these must outlive the execution context).
*/
oldcontext = MemoryContextSwitchTo(MessageContext);
querytree_list = pg_analyze_and_rewrite(parsetree, query_string,
NULL, 0);
1998-12-16 11:53:55 +00:00
Determine whether it's safe to attempt a parallel plan for a query. Commit 924bcf4f16d54c55310b28f77686608684734f42 introduced a framework for parallel computation in PostgreSQL that makes most but not all built-in functions safe to execute in parallel mode. In order to have parallel query, we'll need to be able to determine whether that query contains functions (either built-in or user-defined) that cannot be safely executed in parallel mode. This requires those functions to be labeled, so this patch introduces an infrastructure for that. Some functions currently labeled as safe may need to be revised depending on how pending issues related to heavyweight locking under paralllelism are resolved. Parallel plans can't be used except for the case where the query will run to completion. If portal execution were suspended, the parallel mode restrictions would need to remain in effect during that time, but that might make other queries fail. Therefore, this patch introduces a framework that enables consideration of parallel plans only when it is known that the plan will be run to completion. This probably needs some refinement; for example, at bind time, we do not know whether a query run via the extended protocol will be execution to completion or run with a limited fetch count. Having the client indicate its intentions at bind time would constitute a wire protocol break. Some contexts in which parallel mode would be safe are not adjusted by this patch; the default is not to try parallel plans except from call sites that have been updated to say that such plans are OK. This commit doesn't introduce any parallel paths or plans; it just provides a way to determine whether they could potentially be used. I'm committing it on the theory that the remaining parallel sequential scan patches will also get committed to this release, hopefully in the not-too-distant future. Robert Haas and Amit Kapila. Reviewed (in earlier versions) by Noah Misch.
2015-09-16 15:38:47 -04:00
plantree_list = pg_plan_queries(querytree_list,
CURSOR_OPT_PARALLEL_OK, NULL);
/* Done with the snapshot used for parsing/planning */
if (snapshot_set)
PopActiveSnapshot();
/* If we got a cancel signal in analysis or planning, quit */
CHECK_FOR_INTERRUPTS();
/*
2003-08-04 00:43:34 +00:00
* Create unnamed portal to run the query or queries in. If there
* already is one, silently drop it.
*/
portal = CreatePortal("", true, true);
/* Don't display the portal in pg_cursors */
portal->visible = false;
/*
* We don't have to copy anything into the portal, because everything
* we are passing here is in MessageContext, which will outlive the
* portal anyway.
*/
PortalDefineQuery(portal,
NULL,
query_string,
commandTag,
plantree_list,
NULL);
/*
* Start the portal. No parameters here.
*/
PortalStart(portal, NULL, 0, InvalidSnapshot);
/*
2005-10-15 02:49:52 +00:00
* Select the appropriate output format: text unless we are doing a
* FETCH from a binary cursor. (Pretty grotty to have to do this here
2005-10-15 02:49:52 +00:00
* --- but it avoids grottiness in other places. Ah, the joys of
* backward compatibility...)
*/
format = 0; /* TEXT is default */
if (IsA(parsetree, FetchStmt))
{
FetchStmt *stmt = (FetchStmt *) parsetree;
if (!stmt->ismove)
{
Portal fportal = GetPortalByName(stmt->portalname);
if (PortalIsValid(fportal) &&
(fportal->cursorOptions & CURSOR_OPT_BINARY))
2003-08-04 00:43:34 +00:00
format = 1; /* BINARY */
}
}
PortalSetResultFormat(portal, 1, &format);
/*
* Now we can create the destination receiver object.
*/
receiver = CreateDestReceiver(dest);
if (dest == DestRemote)
SetRemoteDestReceiverParams(receiver, portal);
/*
* Switch back to transaction context for execution.
*/
MemoryContextSwitchTo(oldcontext);
/*
2005-10-15 02:49:52 +00:00
* Run the portal to completion, and then drop it (and the receiver).
*/
(void) PortalRun(portal,
FETCH_ALL,
isTopLevel,
receiver,
receiver,
completionTag);
(*receiver->rDestroy) (receiver);
PortalDrop(portal, false);
if (IsA(parsetree, TransactionStmt))
{
/*
2005-10-15 02:49:52 +00:00
* If this was a transaction control statement, commit it. We will
* start a new xact command for the next command (if any).
*/
finish_xact_command();
}
else if (lnext(parsetree_item) == NULL)
{
/*
2005-10-15 02:49:52 +00:00
* If this is the last parsetree of the query string, close down
* transaction statement before reporting command-complete. This
* is so that any end-of-transaction errors are reported before
* the command-complete message is issued, to avoid confusing
* clients who will expect either a command-complete message or an
* error, not one and then the other. But for compatibility with
* historical Postgres behavior, we do not force a transaction
* boundary between queries appearing in a single query string.
*/
finish_xact_command();
}
else
{
/*
2003-08-04 00:43:34 +00:00
* We need a CommandCounterIncrement after every query, except
* those that start or end a transaction block.
*/
CommandCounterIncrement();
}
/*
2005-10-15 02:49:52 +00:00
* Tell client that we're done with this query. Note we emit exactly
* one EndCommand report for each raw parsetree, thus one for each SQL
* command the client sent, regardless of rewriting. (But a command
* aborted by error will not send an EndCommand report at all.)
*/
EndCommand(completionTag, dest);
2001-03-22 04:01:46 +00:00
} /* end loop over parsetrees */
/*
* Close down transaction statement, if one is open.
*/
finish_xact_command();
/*
* If there were no parsetrees, return EmptyQueryResponse message.
*/
if (!parsetree_list)
NullCommand(dest);
/*
* Emit duration logging if appropriate.
*/
switch (check_log_duration(msec_str, was_logged))
{
case 1:
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
ereport(LOG,
(errmsg("duration: %s ms", msec_str),
errhidestmt(true)));
break;
case 2:
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
ereport(LOG,
(errmsg("duration: %s ms statement: %s",
msec_str, query_string),
errhidestmt(true),
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
errdetail_execute(parsetree_list)));
break;
}
if (save_log_statement_stats)
ShowUsage("QUERY STATISTICS");
TRACE_POSTGRESQL_QUERY_DONE(query_string);
debug_query_string = NULL;
}
/*
* exec_parse_message
*
* Execute a "Parse" protocol message.
*/
static void
exec_parse_message(const char *query_string, /* string to execute */
const char *stmt_name, /* name for prepared stmt */
2003-08-04 00:43:34 +00:00
Oid *paramTypes, /* parameter types */
int numParams) /* number of parameters */
{
MemoryContext unnamed_stmt_context = NULL;
MemoryContext oldcontext;
List *parsetree_list;
Node *raw_parse_tree;
const char *commandTag;
List *querytree_list;
CachedPlanSource *psrc;
bool is_named;
bool save_log_statement_stats = log_statement_stats;
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
char msec_str[32];
/*
* Report query to various monitoring facilities.
*/
debug_query_string = query_string;
pgstat_report_activity(STATE_RUNNING, query_string);
set_ps_display("PARSE", false);
if (save_log_statement_stats)
ResetUsage();
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
ereport(DEBUG2,
(errmsg("parse %s: %s",
*stmt_name ? stmt_name : "<unnamed>",
query_string)));
/*
2005-10-15 02:49:52 +00:00
* Start up a transaction command so we can run parse analysis etc. (Note
* that this will normally change current memory context.) Nothing happens
* if we are already in one.
*/
start_xact_command();
/*
* Switch to appropriate context for constructing parsetrees.
*
2003-08-04 00:43:34 +00:00
* We have two strategies depending on whether the prepared statement is
* named or not. For a named prepared statement, we do parsing in
* MessageContext and copy the finished trees into the prepared
* statement's plancache entry; then the reset of MessageContext releases
* temporary space used by parsing and rewriting. For an unnamed prepared
2005-10-15 02:49:52 +00:00
* statement, we assume the statement isn't going to hang around long, so
* getting rid of temp space quickly is probably not worth the costs of
* copying parse trees. So in this case, we create the plancache entry's
* query_context here, and do all the parsing work therein.
*/
is_named = (stmt_name[0] != '\0');
if (is_named)
{
/* Named prepared statement --- parse in MessageContext */
oldcontext = MemoryContextSwitchTo(MessageContext);
}
else
{
/* Unnamed prepared statement --- release any prior unnamed stmt */
drop_unnamed_stmt();
/* Create context for parsing */
unnamed_stmt_context =
AllocSetContextCreate(MessageContext,
"unnamed prepared statement",
ALLOCSET_DEFAULT_MINSIZE,
ALLOCSET_DEFAULT_INITSIZE,
ALLOCSET_DEFAULT_MAXSIZE);
oldcontext = MemoryContextSwitchTo(unnamed_stmt_context);
}
/*
2005-10-15 02:49:52 +00:00
* Do basic parsing of the query or queries (this should be safe even if
* we are in aborted transaction state!)
*/
parsetree_list = pg_parse_query(query_string);
/*
2005-10-15 02:49:52 +00:00
* We only allow a single user statement in a prepared statement. This is
* mainly to keep the protocol simple --- otherwise we'd need to worry
* about multiple result tupdescs and things like that.
*/
if (list_length(parsetree_list) > 1)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
2005-10-15 02:49:52 +00:00
errmsg("cannot insert multiple commands into a prepared statement")));
if (parsetree_list != NIL)
{
Query *query;
bool snapshot_set = false;
2003-08-04 00:43:34 +00:00
int i;
raw_parse_tree = (Node *) linitial(parsetree_list);
/*
* Get the command name for possible use in status display.
*/
commandTag = CreateCommandTag(raw_parse_tree);
/*
* If we are in an aborted transaction, reject all commands except
2005-10-15 02:49:52 +00:00
* COMMIT/ROLLBACK. It is important that this test occur before we
* try to do parse analysis, rewrite, or planning, since all those
* phases try to do database accesses, which may fail in abort state.
* (It might be safe to allow some additional utility commands in this
* state, but not many...)
*/
if (IsAbortedTransactionBlockState() &&
!IsTransactionExitStmt(raw_parse_tree))
ereport(ERROR,
(errcode(ERRCODE_IN_FAILED_SQL_TRANSACTION),
errmsg("current transaction is aborted, "
2010-02-26 02:01:40 +00:00
"commands ignored until end of transaction block"),
errdetail_abort()));
/*
* Create the CachedPlanSource before we do parse analysis, since it
* needs to see the unmodified raw parse tree.
*/
psrc = CreateCachedPlan(raw_parse_tree, query_string, commandTag);
/*
* Set up a snapshot if parse analysis will need one.
*/
if (analyze_requires_snapshot(raw_parse_tree))
{
PushActiveSnapshot(GetTransactionSnapshot());
snapshot_set = true;
}
/*
* Analyze and rewrite the query. Note that the originally specified
* parameter set is not required to be complete, so we have to use
* parse_analyze_varparams().
*/
if (log_parser_stats)
ResetUsage();
query = parse_analyze_varparams(raw_parse_tree,
query_string,
&paramTypes,
&numParams);
/*
* Check all parameter types got determined.
*/
for (i = 0; i < numParams; i++)
{
2003-08-04 00:43:34 +00:00
Oid ptype = paramTypes[i];
if (ptype == InvalidOid || ptype == UNKNOWNOID)
ereport(ERROR,
(errcode(ERRCODE_INDETERMINATE_DATATYPE),
2005-10-15 02:49:52 +00:00
errmsg("could not determine data type of parameter $%d",
i + 1)));
}
if (log_parser_stats)
ShowUsage("PARSE ANALYSIS STATISTICS");
querytree_list = pg_rewrite_query(query);
/* Done with the snapshot used for parsing */
if (snapshot_set)
PopActiveSnapshot();
}
else
{
/* Empty input string. This is legal. */
raw_parse_tree = NULL;
commandTag = NULL;
psrc = CreateCachedPlan(raw_parse_tree, query_string, commandTag);
querytree_list = NIL;
}
/*
* CachedPlanSource must be a direct child of MessageContext before we
* reparent unnamed_stmt_context under it, else we have a disconnected
* circular subgraph. Klugy, but less so than flipping contexts even more
* above.
*/
if (unnamed_stmt_context)
MemoryContextSetParent(psrc->context, MessageContext);
/* Finish filling in the CachedPlanSource */
CompleteCachedPlan(psrc,
querytree_list,
unnamed_stmt_context,
paramTypes,
numParams,
NULL,
NULL,
CURSOR_OPT_PARALLEL_OK, /* allow parallel mode */
true); /* fixed result */
/* If we got a cancel signal during analysis, quit */
CHECK_FOR_INTERRUPTS();
if (is_named)
{
/*
* Store the query as a prepared statement.
*/
StorePreparedStatement(stmt_name, psrc, false);
}
else
{
/*
* We just save the CachedPlanSource into unnamed_stmt_psrc.
*/
SaveCachedPlan(psrc);
unnamed_stmt_psrc = psrc;
}
MemoryContextSwitchTo(oldcontext);
/*
2005-10-15 02:49:52 +00:00
* We do NOT close the open transaction command here; that only happens
* when the client sends Sync. Instead, do CommandCounterIncrement just
2005-10-15 02:49:52 +00:00
* in case something happened during parse/plan.
*/
CommandCounterIncrement();
/*
* Send ParseComplete.
*/
if (whereToSendOutput == DestRemote)
pq_putemptymessage('1');
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
/*
* Emit duration logging if appropriate.
*/
switch (check_log_duration(msec_str, false))
{
case 1:
ereport(LOG,
(errmsg("duration: %s ms", msec_str),
errhidestmt(true)));
break;
case 2:
ereport(LOG,
(errmsg("duration: %s ms parse %s: %s",
msec_str,
*stmt_name ? stmt_name : "<unnamed>",
query_string),
errhidestmt(true)));
break;
}
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
if (save_log_statement_stats)
ShowUsage("PARSE MESSAGE STATISTICS");
debug_query_string = NULL;
}
/*
* exec_bind_message
*
* Process a "Bind" message to create a portal from a prepared statement
*/
static void
exec_bind_message(StringInfo input_message)
{
const char *portal_name;
const char *stmt_name;
int numPFormats;
int16 *pformats = NULL;
int numParams;
int numRFormats;
int16 *rformats = NULL;
CachedPlanSource *psrc;
CachedPlan *cplan;
Portal portal;
char *query_string;
char *saved_stmt_name;
ParamListInfo params;
MemoryContext oldContext;
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
bool save_log_statement_stats = log_statement_stats;
bool snapshot_set = false;
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
char msec_str[32];
/* Get the fixed part of the message */
portal_name = pq_getmsgstring(input_message);
stmt_name = pq_getmsgstring(input_message);
ereport(DEBUG2,
(errmsg("bind %s to %s",
*portal_name ? portal_name : "<unnamed>",
*stmt_name ? stmt_name : "<unnamed>")));
/* Find prepared statement */
if (stmt_name[0] != '\0')
{
PreparedStatement *pstmt;
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
pstmt = FetchPreparedStatement(stmt_name, true);
psrc = pstmt->plansource;
}
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
else
{
/* special-case the unnamed statement */
psrc = unnamed_stmt_psrc;
if (!psrc)
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_PSTATEMENT),
errmsg("unnamed prepared statement does not exist")));
}
/*
* Report query to various monitoring facilities.
*/
debug_query_string = psrc->query_string;
pgstat_report_activity(STATE_RUNNING, psrc->query_string);
set_ps_display("BIND", false);
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
if (save_log_statement_stats)
ResetUsage();
/*
2005-10-15 02:49:52 +00:00
* Start up a transaction command so we can call functions etc. (Note that
* this will normally change current memory context.) Nothing happens if
* we are already in one.
*/
start_xact_command();
/* Switch back to message context */
MemoryContextSwitchTo(MessageContext);
/* Get the parameter format codes */
numPFormats = pq_getmsgint(input_message, 2);
if (numPFormats > 0)
{
2006-10-04 00:30:14 +00:00
int i;
pformats = (int16 *) palloc(numPFormats * sizeof(int16));
for (i = 0; i < numPFormats; i++)
pformats[i] = pq_getmsgint(input_message, 2);
}
/* Get the parameter value count */
numParams = pq_getmsgint(input_message, 2);
if (numPFormats > 1 && numPFormats != numParams)
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
2005-10-15 02:49:52 +00:00
errmsg("bind message has %d parameter formats but %d parameters",
numPFormats, numParams)));
if (numParams != psrc->num_params)
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
errmsg("bind message supplies %d parameters, but prepared statement \"%s\" requires %d",
2007-11-15 21:14:46 +00:00
numParams, stmt_name, psrc->num_params)));
/*
* If we are in aborted transaction state, the only portals we can
* actually run are those containing COMMIT or ROLLBACK commands. We
* disallow binding anything else to avoid problems with infrastructure
* that expects to run inside a valid transaction. We also disallow
* binding any parameters, since we can't risk calling user-defined I/O
* functions.
*/
if (IsAbortedTransactionBlockState() &&
(!IsTransactionExitStmt(psrc->raw_parse_tree) ||
numParams != 0))
ereport(ERROR,
(errcode(ERRCODE_IN_FAILED_SQL_TRANSACTION),
errmsg("current transaction is aborted, "
"commands ignored until end of transaction block"),
errdetail_abort()));
/*
2005-10-15 02:49:52 +00:00
* Create the portal. Allow silent replacement of an existing portal only
* if the unnamed portal is specified.
*/
if (portal_name[0] == '\0')
portal = CreatePortal(portal_name, true, true);
else
portal = CreatePortal(portal_name, false, false);
/*
* Prepare to copy stuff into the portal's memory context. We do all this
* copying first, because it could possibly fail (out-of-memory) and we
* don't want a failure to occur between GetCachedPlan and
* PortalDefineQuery; that would result in leaking our plancache refcount.
*/
oldContext = MemoryContextSwitchTo(PortalGetHeapMemory(portal));
/* Copy the plan's query string into the portal */
query_string = pstrdup(psrc->query_string);
/* Likewise make a copy of the statement name, unless it's unnamed */
if (stmt_name[0])
saved_stmt_name = pstrdup(stmt_name);
else
saved_stmt_name = NULL;
/*
* Set a snapshot if we have parameters to fetch (since the input
* functions might need it) or the query isn't a utility command (and
* hence could require redoing parse analysis and planning). We keep the
* snapshot active till we're done, so that plancache.c doesn't have to
* take new ones.
*/
if (numParams > 0 ||
(psrc->raw_parse_tree &&
analyze_requires_snapshot(psrc->raw_parse_tree)))
{
PushActiveSnapshot(GetTransactionSnapshot());
snapshot_set = true;
}
/*
* Fetch parameters, if any, and store in the portal's memory context.
*/
if (numParams > 0)
{
int paramno;
params = (ParamListInfo) palloc(offsetof(ParamListInfoData, params) +
numParams * sizeof(ParamExternData));
/* we have static list of params, so no hooks needed */
params->paramFetch = NULL;
params->paramFetchArg = NULL;
params->parserSetup = NULL;
params->parserSetupArg = NULL;
params->numParams = numParams;
params->paramMask = NULL;
for (paramno = 0; paramno < numParams; paramno++)
{
Oid ptype = psrc->param_types[paramno];
int32 plength;
Datum pval;
bool isNull;
StringInfoData pbuf;
char csave;
int16 pformat;
plength = pq_getmsgint(input_message, 4);
isNull = (plength == -1);
if (!isNull)
{
const char *pvalue = pq_getmsgbytes(input_message, plength);
/*
* Rather than copying data around, we just set up a phony
* StringInfo pointing to the correct portion of the message
* buffer. We assume we can scribble on the message buffer so
* as to maintain the convention that StringInfos have a
* trailing null. This is grotty but is a big win when
* dealing with very large parameter strings.
*/
pbuf.data = (char *) pvalue;
pbuf.maxlen = plength + 1;
pbuf.len = plength;
pbuf.cursor = 0;
csave = pbuf.data[plength];
pbuf.data[plength] = '\0';
}
else
{
pbuf.data = NULL; /* keep compiler quiet */
csave = 0;
}
if (numPFormats > 1)
pformat = pformats[paramno];
else if (numPFormats > 0)
pformat = pformats[0];
else
pformat = 0; /* default = text */
if (pformat == 0) /* text mode */
{
Oid typinput;
Oid typioparam;
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
char *pstring;
getTypeInputInfo(ptype, &typinput, &typioparam);
/*
* We have to do encoding conversion before calling the
* typinput routine.
*/
if (isNull)
pstring = NULL;
else
pstring = pg_client_to_server(pbuf.data, plength);
pval = OidInputFunctionCall(typinput, pstring, typioparam, -1);
/* Free result of encoding conversion, if any */
if (pstring && pstring != pbuf.data)
pfree(pstring);
}
2006-10-04 00:30:14 +00:00
else if (pformat == 1) /* binary mode */
{
Oid typreceive;
Oid typioparam;
StringInfo bufptr;
/*
* Call the parameter type's binary input converter
*/
getTypeBinaryInputInfo(ptype, &typreceive, &typioparam);
if (isNull)
bufptr = NULL;
else
bufptr = &pbuf;
pval = OidReceiveFunctionCall(typreceive, bufptr, typioparam, -1);
/* Trouble if it didn't eat the whole buffer */
if (!isNull && pbuf.cursor != pbuf.len)
ereport(ERROR,
(errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
errmsg("incorrect binary data format in bind parameter %d",
paramno + 1)));
}
else
{
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("unsupported format code: %d",
pformat)));
pval = 0; /* keep compiler quiet */
}
/* Restore message buffer contents */
if (!isNull)
pbuf.data[plength] = csave;
params->params[paramno].value = pval;
params->params[paramno].isnull = isNull;
2006-10-04 00:30:14 +00:00
/*
* We mark the params as CONST. This ensures that any custom plan
* makes full use of the parameter values.
*/
params->params[paramno].pflags = PARAM_FLAG_CONST;
params->params[paramno].ptype = ptype;
}
}
else
params = NULL;
/* Done storing stuff in portal's context */
MemoryContextSwitchTo(oldContext);
/* Get the result format codes */
numRFormats = pq_getmsgint(input_message, 2);
if (numRFormats > 0)
{
2006-10-04 00:30:14 +00:00
int i;
rformats = (int16 *) palloc(numRFormats * sizeof(int16));
for (i = 0; i < numRFormats; i++)
rformats[i] = pq_getmsgint(input_message, 2);
}
pq_getmsgend(input_message);
/*
* Obtain a plan from the CachedPlanSource. Any cruft from (re)planning
* will be generated in MessageContext. The plan refcount will be
* assigned to the Portal, so it will be released at portal destruction.
*/
cplan = GetCachedPlan(psrc, params, false);
/*
* Now we can define the portal.
*
* DO NOT put any code that could possibly throw an error between the
* above GetCachedPlan call and here.
*/
PortalDefineQuery(portal,
saved_stmt_name,
query_string,
psrc->commandTag,
cplan->stmt_list,
cplan);
/* Done with the snapshot used for parameter I/O and parsing/planning */
if (snapshot_set)
PopActiveSnapshot();
/*
* And we're ready to start portal execution.
*/
PortalStart(portal, params, 0, InvalidSnapshot);
/*
* Apply the result format requests to the portal.
*/
PortalSetResultFormat(portal, numRFormats, rformats);
/*
* Send BindComplete.
*/
if (whereToSendOutput == DestRemote)
pq_putemptymessage('2');
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
/*
* Emit duration logging if appropriate.
*/
switch (check_log_duration(msec_str, false))
{
case 1:
ereport(LOG,
(errmsg("duration: %s ms", msec_str),
errhidestmt(true)));
break;
case 2:
ereport(LOG,
(errmsg("duration: %s ms bind %s%s%s: %s",
msec_str,
*stmt_name ? stmt_name : "<unnamed>",
*portal_name ? "/" : "",
*portal_name ? portal_name : "",
psrc->query_string),
errhidestmt(true),
errdetail_params(params)));
break;
}
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
if (save_log_statement_stats)
ShowUsage("BIND MESSAGE STATISTICS");
debug_query_string = NULL;
}
/*
* exec_execute_message
*
* Process an "Execute" message for a portal
*/
static void
exec_execute_message(const char *portal_name, long max_rows)
{
2003-08-04 00:43:34 +00:00
CommandDest dest;
DestReceiver *receiver;
Portal portal;
bool completed;
char completionTag[COMPLETION_TAG_BUFSIZE];
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
const char *sourceText;
const char *prepStmtName;
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
ParamListInfo portalParams;
bool save_log_statement_stats = log_statement_stats;
bool is_xact_command;
bool execute_is_fetch;
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
bool was_logged = false;
char msec_str[32];
/* Adjust destination to tell printtup.c what to do */
dest = whereToSendOutput;
if (dest == DestRemote)
dest = DestRemoteExecute;
portal = GetPortalByName(portal_name);
if (!PortalIsValid(portal))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_CURSOR),
errmsg("portal \"%s\" does not exist", portal_name)));
/*
2003-08-04 00:43:34 +00:00
* If the original query was a null string, just return
* EmptyQueryResponse.
*/
if (portal->commandTag == NULL)
{
Assert(portal->stmts == NIL);
NullCommand(dest);
return;
}
/* Does the portal contain a transaction command? */
is_xact_command = IsTransactionStmtList(portal->stmts);
/*
2006-10-04 00:30:14 +00:00
* We must copy the sourceText and prepStmtName into MessageContext in
* case the portal is destroyed during finish_xact_command. Can avoid the
* copy if it's not an xact command, though.
*/
if (is_xact_command)
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
{
sourceText = pstrdup(portal->sourceText);
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
if (portal->prepStmtName)
prepStmtName = pstrdup(portal->prepStmtName);
else
prepStmtName = "<unnamed>";
2006-10-04 00:30:14 +00:00
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
/*
* An xact command shouldn't have any parameters, which is a good
* thing because they wouldn't be around after finish_xact_command.
*/
portalParams = NULL;
}
else
{
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
sourceText = portal->sourceText;
if (portal->prepStmtName)
prepStmtName = portal->prepStmtName;
else
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
prepStmtName = "<unnamed>";
portalParams = portal->portalParams;
}
/*
* Report query to various monitoring facilities.
*/
debug_query_string = sourceText;
pgstat_report_activity(STATE_RUNNING, sourceText);
set_ps_display(portal->commandTag, false);
if (save_log_statement_stats)
ResetUsage();
BeginCommand(portal->commandTag, dest);
/*
2005-10-15 02:49:52 +00:00
* Create dest receiver in MessageContext (we don't want it in transaction
* context, because that may get deleted if portal contains VACUUM).
*/
receiver = CreateDestReceiver(dest);
if (dest == DestRemoteExecute)
SetRemoteDestReceiverParams(receiver, portal);
/*
2003-08-04 00:43:34 +00:00
* Ensure we are in a transaction command (this should normally be the
* case already due to prior BIND).
*/
start_xact_command();
/*
* If we re-issue an Execute protocol request against an existing portal,
* then we are only fetching more rows rather than completely re-executing
* the query from the start. atStart is never reset for a v3 portal, so we
* are safe to use this check.
*/
execute_is_fetch = !portal->atStart;
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
/* Log immediately if dictated by log_statement */
if (check_log_statement(portal->stmts))
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
{
ereport(LOG,
(errmsg("%s %s%s%s: %s",
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
execute_is_fetch ?
_("execute fetch from") :
_("execute"),
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
prepStmtName,
*portal_name ? "/" : "",
*portal_name ? portal_name : "",
sourceText),
errhidestmt(true),
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
errdetail_params(portalParams)));
was_logged = true;
}
/*
* If we are in aborted transaction state, the only portals we can
* actually run are those containing COMMIT or ROLLBACK commands.
*/
if (IsAbortedTransactionBlockState() &&
!IsTransactionExitStmtList(portal->stmts))
ereport(ERROR,
(errcode(ERRCODE_IN_FAILED_SQL_TRANSACTION),
errmsg("current transaction is aborted, "
"commands ignored until end of transaction block"),
errdetail_abort()));
/* Check for cancel signal before we start execution */
CHECK_FOR_INTERRUPTS();
/*
* Okay to run the portal.
*/
if (max_rows <= 0)
max_rows = FETCH_ALL;
completed = PortalRun(portal,
max_rows,
2007-11-15 21:14:46 +00:00
true, /* always top level */
receiver,
receiver,
completionTag);
(*receiver->rDestroy) (receiver);
if (completed)
{
if (is_xact_command)
{
/*
* If this was a transaction control statement, commit it. We
2005-10-15 02:49:52 +00:00
* will start a new xact command for the next command (if any).
*/
finish_xact_command();
}
else
{
/*
2003-08-04 00:43:34 +00:00
* We need a CommandCounterIncrement after every query, except
* those that start or end a transaction block.
*/
CommandCounterIncrement();
}
/* Send appropriate CommandComplete to client */
EndCommand(completionTag, dest);
}
else
{
/* Portal run not complete, so send PortalSuspended */
if (whereToSendOutput == DestRemote)
pq_putemptymessage('s');
}
/*
* Emit duration logging if appropriate.
*/
switch (check_log_duration(msec_str, was_logged))
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
{
case 1:
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
ereport(LOG,
(errmsg("duration: %s ms", msec_str),
errhidestmt(true)));
break;
case 2:
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
ereport(LOG,
(errmsg("duration: %s ms %s %s%s%s: %s",
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
msec_str,
execute_is_fetch ?
_("execute fetch from") :
_("execute"),
prepStmtName,
*portal_name ? "/" : "",
*portal_name ? portal_name : "",
sourceText),
errhidestmt(true),
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
errdetail_params(portalParams)));
break;
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
}
if (save_log_statement_stats)
ShowUsage("EXECUTE MESSAGE STATISTICS");
debug_query_string = NULL;
}
/*
* check_log_statement
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
* Determine whether command should be logged because of log_statement
*
2014-07-07 19:39:42 +09:00
* stmt_list can be either raw grammar output or a list of planned
* statements
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
*/
static bool
check_log_statement(List *stmt_list)
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
{
ListCell *stmt_item;
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
if (log_statement == LOGSTMT_NONE)
return false;
if (log_statement == LOGSTMT_ALL)
return true;
/* Else we have to inspect the statement(s) to see whether to log */
foreach(stmt_item, stmt_list)
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
{
Node *stmt = (Node *) lfirst(stmt_item);
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
if (GetCommandLogLevel(stmt) <= log_statement)
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
return true;
}
return false;
}
/*
* check_log_duration
* Determine whether current command's duration should be logged
*
* Returns:
* 0 if no logging is needed
* 1 if just the duration should be logged
* 2 if duration and query details should be logged
*
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
* If logging is needed, the duration in msec is formatted into msec_str[],
* which must be a 32-byte buffer.
*
* was_logged should be TRUE if caller already logged query details (this
* essentially prevents 2 from being returned).
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
*/
int
check_log_duration(char *msec_str, bool was_logged)
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
{
if (log_duration || log_min_duration_statement >= 0)
{
long secs;
int usecs;
int msecs;
bool exceeded;
TimestampDifference(GetCurrentStatementStartTimestamp(),
GetCurrentTimestamp(),
&secs, &usecs);
msecs = usecs / 1000;
/*
2006-10-04 00:30:14 +00:00
* This odd-looking test for log_min_duration_statement being exceeded
* is designed to avoid integer overflow with very long durations:
* don't compute secs * 1000 until we've verified it will fit in int.
*/
exceeded = (log_min_duration_statement == 0 ||
(log_min_duration_statement > 0 &&
(secs > log_min_duration_statement / 1000 ||
secs * 1000 + msecs >= log_min_duration_statement)));
if (exceeded || log_duration)
{
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
snprintf(msec_str, 32, "%ld.%03d",
secs * 1000 + msecs, usecs % 1000);
if (exceeded && !was_logged)
return 2;
else
return 1;
}
}
return 0;
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
}
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
/*
* errdetail_execute
*
* Add an errdetail() line showing the query referenced by an EXECUTE, if any.
* The argument is the raw parsetree list.
*/
static int
errdetail_execute(List *raw_parsetree_list)
{
ListCell *parsetree_item;
foreach(parsetree_item, raw_parsetree_list)
{
Node *parsetree = (Node *) lfirst(parsetree_item);
if (IsA(parsetree, ExecuteStmt))
{
ExecuteStmt *stmt = (ExecuteStmt *) parsetree;
PreparedStatement *pstmt;
pstmt = FetchPreparedStatement(stmt->name, false);
if (pstmt)
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
{
errdetail("prepare: %s", pstmt->plansource->query_string);
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
return 0;
}
}
}
return 0;
}
/*
* errdetail_params
*
* Add an errdetail() line showing bind-parameter data, if available.
*/
static int
errdetail_params(ParamListInfo params)
{
/* We mustn't call user-defined I/O functions when in an aborted xact */
if (params && params->numParams > 0 && !IsAbortedTransactionBlockState())
{
StringInfoData param_str;
MemoryContext oldcontext;
int paramno;
/* Make sure any trash is generated in MessageContext */
oldcontext = MemoryContextSwitchTo(MessageContext);
initStringInfo(&param_str);
for (paramno = 0; paramno < params->numParams; paramno++)
{
ParamExternData *prm = &params->params[paramno];
Oid typoutput;
bool typisvarlena;
char *pstring;
char *p;
appendStringInfo(&param_str, "%s$%d = ",
paramno > 0 ? ", " : "",
paramno + 1);
if (prm->isnull || !OidIsValid(prm->ptype))
{
appendStringInfoString(&param_str, "NULL");
continue;
}
getTypeOutputInfo(prm->ptype, &typoutput, &typisvarlena);
pstring = OidOutputFunctionCall(typoutput, prm->value);
appendStringInfoCharMacro(&param_str, '\'');
for (p = pstring; *p; p++)
{
2006-10-04 00:30:14 +00:00
if (*p == '\'') /* double single quotes */
Clean up logging for extended-query-protocol operations, as per my recent proposal. Parameter logging works even for binary-format parameters, and logging overhead is avoided when disabled. log_statement = all output for the src/test/examples/testlibpq3.c example now looks like LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: statement: execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' and log_min_duration_statement = 0 results in LOG: duration: 2.431 ms parse <unnamed>: SELECT * FROM test1 WHERE t = $1 LOG: duration: 2.335 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 0.394 ms execute <unnamed>: SELECT * FROM test1 WHERE t = $1 DETAIL: parameters: $1 = 'joe''s place' LOG: duration: 1.251 ms parse <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 LOG: duration: 0.566 ms bind <unnamed> to <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' LOG: duration: 0.173 ms execute <unnamed>: SELECT * FROM test1 WHERE i = $1::int4 DETAIL: parameters: $1 = '2' (This example demonstrates the folly of ignoring parse/bind steps for duration logging purposes, BTW.) Along the way, create a less ad-hoc mechanism for determining which commands are logged by log_statement = mod and log_statement = ddl. The former coding was actually missing quite a few things that look like ddl to me, and it did not handle EXECUTE or extended query protocol correctly at all. This commit does not do anything about the question of whether log_duration should be removed or made less redundant with log_min_duration_statement.
2006-09-07 22:52:01 +00:00
appendStringInfoCharMacro(&param_str, *p);
appendStringInfoCharMacro(&param_str, *p);
}
appendStringInfoCharMacro(&param_str, '\'');
pfree(pstring);
}
errdetail("parameters: %s", param_str.data);
pfree(param_str.data);
MemoryContextSwitchTo(oldcontext);
}
return 0;
}
/*
* errdetail_abort
*
* Add an errdetail() line showing abort reason, if any.
*/
static int
errdetail_abort(void)
{
if (MyProc->recoveryConflictPending)
errdetail("abort reason: recovery conflict");
return 0;
}
/*
* errdetail_recovery_conflict
*
* Add an errdetail() line showing conflict source.
*/
static int
errdetail_recovery_conflict(void)
{
switch (RecoveryConflictReason)
{
case PROCSIG_RECOVERY_CONFLICT_BUFFERPIN:
2010-02-26 02:01:40 +00:00
errdetail("User was holding shared buffer pin for too long.");
break;
case PROCSIG_RECOVERY_CONFLICT_LOCK:
2010-02-26 02:01:40 +00:00
errdetail("User was holding a relation lock for too long.");
break;
case PROCSIG_RECOVERY_CONFLICT_TABLESPACE:
2010-03-21 00:17:59 +00:00
errdetail("User was or might have been using tablespace that must be dropped.");
2010-02-26 02:01:40 +00:00
break;
case PROCSIG_RECOVERY_CONFLICT_SNAPSHOT:
2010-02-26 02:01:40 +00:00
errdetail("User query might have needed to see row versions that must be removed.");
break;
case PROCSIG_RECOVERY_CONFLICT_STARTUP_DEADLOCK:
2010-02-26 02:01:40 +00:00
errdetail("User transaction caused buffer deadlock with recovery.");
break;
case PROCSIG_RECOVERY_CONFLICT_DATABASE:
2010-02-26 02:01:40 +00:00
errdetail("User was connected to a database that must be dropped.");
break;
default:
2010-02-26 02:01:40 +00:00
break;
/* no errdetail */
}
return 0;
}
/*
* exec_describe_statement_message
*
* Process a "Describe" message for a prepared statement
*/
static void
exec_describe_statement_message(const char *stmt_name)
{
CachedPlanSource *psrc;
StringInfoData buf;
int i;
/*
* Start up a transaction command. (Note that this will normally change
* current memory context.) Nothing happens if we are already in one.
*/
start_xact_command();
/* Switch back to message context */
MemoryContextSwitchTo(MessageContext);
/* Find prepared statement */
if (stmt_name[0] != '\0')
{
PreparedStatement *pstmt;
pstmt = FetchPreparedStatement(stmt_name, true);
psrc = pstmt->plansource;
}
else
{
/* special-case the unnamed statement */
psrc = unnamed_stmt_psrc;
if (!psrc)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_PSTATEMENT),
2005-10-15 02:49:52 +00:00
errmsg("unnamed prepared statement does not exist")));
}
/* Prepared statements shouldn't have changeable result descs */
Assert(psrc->fixed_result);
/*
* If we are in aborted transaction state, we can't run
* SendRowDescriptionMessage(), because that needs catalog accesses.
* Hence, refuse to Describe statements that return data. (We shouldn't
* just refuse all Describes, since that might break the ability of some
* clients to issue COMMIT or ROLLBACK commands, if they use code that
* blindly Describes whatever it does.) We can Describe parameters
* without doing anything dangerous, so we don't restrict that.
*/
if (IsAbortedTransactionBlockState() &&
psrc->resultDesc)
ereport(ERROR,
(errcode(ERRCODE_IN_FAILED_SQL_TRANSACTION),
errmsg("current transaction is aborted, "
"commands ignored until end of transaction block"),
errdetail_abort()));
if (whereToSendOutput != DestRemote)
return; /* can't actually do anything... */
/*
* First describe the parameters...
*/
2003-08-04 00:43:34 +00:00
pq_beginmessage(&buf, 't'); /* parameter description message type */
pq_sendint(&buf, psrc->num_params, 2);
for (i = 0; i < psrc->num_params; i++)
{
Oid ptype = psrc->param_types[i];
pq_sendint(&buf, (int) ptype, 4);
}
pq_endmessage(&buf);
/*
* Next send RowDescription or NoData to describe the result...
*/
if (psrc->resultDesc)
{
List *tlist;
/* Get the plan's primary targetlist */
tlist = CachedPlanGetTargetList(psrc);
SendRowDescriptionMessage(psrc->resultDesc, tlist, NULL);
}
else
pq_putemptymessage('n'); /* NoData */
}
/*
* exec_describe_portal_message
*
* Process a "Describe" message for a portal
*/
static void
exec_describe_portal_message(const char *portal_name)
{
Portal portal;
/*
* Start up a transaction command. (Note that this will normally change
* current memory context.) Nothing happens if we are already in one.
*/
start_xact_command();
/* Switch back to message context */
MemoryContextSwitchTo(MessageContext);
portal = GetPortalByName(portal_name);
if (!PortalIsValid(portal))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_CURSOR),
errmsg("portal \"%s\" does not exist", portal_name)));
/*
* If we are in aborted transaction state, we can't run
* SendRowDescriptionMessage(), because that needs catalog accesses.
* Hence, refuse to Describe portals that return data. (We shouldn't just
* refuse all Describes, since that might break the ability of some
* clients to issue COMMIT or ROLLBACK commands, if they use code that
* blindly Describes whatever it does.)
*/
if (IsAbortedTransactionBlockState() &&
portal->tupDesc)
ereport(ERROR,
(errcode(ERRCODE_IN_FAILED_SQL_TRANSACTION),
errmsg("current transaction is aborted, "
"commands ignored until end of transaction block"),
errdetail_abort()));
if (whereToSendOutput != DestRemote)
return; /* can't actually do anything... */
if (portal->tupDesc)
SendRowDescriptionMessage(portal->tupDesc,
FetchPortalTargetList(portal),
portal->formats);
else
pq_putemptymessage('n'); /* NoData */
}
/*
* Convenience routines for starting/committing a single command.
*/
static void
start_xact_command(void)
{
if (!xact_started)
{
ereport(DEBUG3,
(errmsg_internal("StartTransactionCommand")));
StartTransactionCommand();
/* Set statement timeout running, if any */
/* NB: this mustn't be enabled until we are within an xact */
if (StatementTimeout > 0)
enable_timeout_after(STATEMENT_TIMEOUT, StatementTimeout);
else
disable_timeout(STATEMENT_TIMEOUT, false);
2005-10-15 02:49:52 +00:00
xact_started = true;
}
}
static void
finish_xact_command(void)
{
if (xact_started)
{
/* Cancel any active statement timeout before committing */
disable_timeout(STATEMENT_TIMEOUT, false);
/* Now commit the command */
ereport(DEBUG3,
(errmsg_internal("CommitTransactionCommand")));
CommitTransactionCommand();
#ifdef MEMORY_CONTEXT_CHECKING
/* Check all memory contexts that weren't freed during commit */
/* (those that were, were checked before being deleted) */
MemoryContextCheck(TopMemoryContext);
#endif
#ifdef SHOW_MEMORY_STATS
/* Print mem stats after each commit for leak tracking */
MemoryContextStats(TopMemoryContext);
#endif
xact_started = false;
}
}
/*
* Convenience routines for checking whether a statement is one of the
* ones that we allow in transaction-aborted state.
*/
/* Test a bare parsetree */
static bool
IsTransactionExitStmt(Node *parsetree)
{
if (parsetree && IsA(parsetree, TransactionStmt))
{
TransactionStmt *stmt = (TransactionStmt *) parsetree;
if (stmt->kind == TRANS_STMT_COMMIT ||
stmt->kind == TRANS_STMT_PREPARE ||
stmt->kind == TRANS_STMT_ROLLBACK ||
stmt->kind == TRANS_STMT_ROLLBACK_TO)
return true;
}
return false;
}
/* Test a list that might contain Query nodes or bare parsetrees */
static bool
IsTransactionExitStmtList(List *parseTrees)
{
if (list_length(parseTrees) == 1)
{
Node *stmt = (Node *) linitial(parseTrees);
if (IsA(stmt, Query))
{
Query *query = (Query *) stmt;
if (query->commandType == CMD_UTILITY &&
IsTransactionExitStmt(query->utilityStmt))
return true;
}
else if (IsTransactionExitStmt(stmt))
return true;
}
return false;
}
/* Test a list that might contain Query nodes or bare parsetrees */
static bool
IsTransactionStmtList(List *parseTrees)
{
if (list_length(parseTrees) == 1)
{
Node *stmt = (Node *) linitial(parseTrees);
if (IsA(stmt, Query))
{
Query *query = (Query *) stmt;
if (query->commandType == CMD_UTILITY &&
IsA(query->utilityStmt, TransactionStmt))
return true;
}
else if (IsA(stmt, TransactionStmt))
return true;
}
return false;
}
/* Release any existing unnamed prepared statement */
static void
drop_unnamed_stmt(void)
{
/* paranoia to avoid a dangling pointer in case of error */
if (unnamed_stmt_psrc)
{
CachedPlanSource *psrc = unnamed_stmt_psrc;
2007-11-15 21:14:46 +00:00
unnamed_stmt_psrc = NULL;
DropCachedPlan(psrc);
}
}
/* --------------------------------
* signal handler routines used in PostgresMain()
* --------------------------------
*/
/*
XLOG (and related) changes: * Store two past checkpoint locations, not just one, in pg_control. On startup, we fall back to the older checkpoint if the newer one is unreadable. Also, a physical copy of the newest checkpoint record is kept in pg_control for possible use in disaster recovery (ie, complete loss of pg_xlog). Also add a version number for pg_control itself. Remove archdir from pg_control; it ought to be a GUC parameter, not a special case (not that it's implemented yet anyway). * Suppress successive checkpoint records when nothing has been entered in the WAL log since the last one. This is not so much to avoid I/O as to make it actually useful to keep track of the last two checkpoints. If the things are right next to each other then there's not a lot of redundancy gained... * Change CRC scheme to a true 64-bit CRC, not a pair of 32-bit CRCs on alternate bytes. Polynomial borrowed from ECMA DLT1 standard. * Fix XLOG record length handling so that it will work at BLCKSZ = 32k. * Change XID allocation to work more like OID allocation. (This is of dubious necessity, but I think it's a good idea anyway.) * Fix a number of minor bugs, such as off-by-one logic for XLOG file wraparound at the 4 gig mark. * Add documentation and clean up some coding infelicities; move file format declarations out to include files where planned contrib utilities can get at them. * Checkpoint will now occur every CHECKPOINT_SEGMENTS log segments or every CHECKPOINT_TIMEOUT seconds, whichever comes first. It is also possible to force a checkpoint by sending SIGUSR1 to the postmaster (undocumented feature...) * Defend against kill -9 postmaster by storing shmem block's key and ID in postmaster.pid lockfile, and checking at startup to ensure that no processes are still connected to old shmem block (if it still exists). * Switch backends to accept SIGQUIT rather than SIGUSR1 for emergency stop, for symmetry with postmaster and xlog utilities. Clean up signal handling in bootstrap.c so that xlog utilities launched by postmaster will react to signals better. * Standalone bootstrap now grabs lockfile in target directory, as added insurance against running it in parallel with live postmaster.
2001-03-13 01:17:06 +00:00
* quickdie() occurs when signalled SIGQUIT by the postmaster.
*
* Some backend has bought the farm,
* so we need to stop what we're doing and exit.
*/
XLOG (and related) changes: * Store two past checkpoint locations, not just one, in pg_control. On startup, we fall back to the older checkpoint if the newer one is unreadable. Also, a physical copy of the newest checkpoint record is kept in pg_control for possible use in disaster recovery (ie, complete loss of pg_xlog). Also add a version number for pg_control itself. Remove archdir from pg_control; it ought to be a GUC parameter, not a special case (not that it's implemented yet anyway). * Suppress successive checkpoint records when nothing has been entered in the WAL log since the last one. This is not so much to avoid I/O as to make it actually useful to keep track of the last two checkpoints. If the things are right next to each other then there's not a lot of redundancy gained... * Change CRC scheme to a true 64-bit CRC, not a pair of 32-bit CRCs on alternate bytes. Polynomial borrowed from ECMA DLT1 standard. * Fix XLOG record length handling so that it will work at BLCKSZ = 32k. * Change XID allocation to work more like OID allocation. (This is of dubious necessity, but I think it's a good idea anyway.) * Fix a number of minor bugs, such as off-by-one logic for XLOG file wraparound at the 4 gig mark. * Add documentation and clean up some coding infelicities; move file format declarations out to include files where planned contrib utilities can get at them. * Checkpoint will now occur every CHECKPOINT_SEGMENTS log segments or every CHECKPOINT_TIMEOUT seconds, whichever comes first. It is also possible to force a checkpoint by sending SIGUSR1 to the postmaster (undocumented feature...) * Defend against kill -9 postmaster by storing shmem block's key and ID in postmaster.pid lockfile, and checking at startup to ensure that no processes are still connected to old shmem block (if it still exists). * Switch backends to accept SIGQUIT rather than SIGUSR1 for emergency stop, for symmetry with postmaster and xlog utilities. Clean up signal handling in bootstrap.c so that xlog utilities launched by postmaster will react to signals better. * Standalone bootstrap now grabs lockfile in target directory, as added insurance against running it in parallel with live postmaster.
2001-03-13 01:17:06 +00:00
void
quickdie(SIGNAL_ARGS)
{
2010-02-26 02:01:40 +00:00
sigaddset(&BlockSig, SIGQUIT); /* prevent nested calls */
PG_SETMASK(&BlockSig);
2003-08-04 00:43:34 +00:00
/*
* Prevent interrupts while exiting; though we just blocked signals that
* would queue new interrupts, one may have been pending. We don't want a
* quickdie() downgraded to a mere query cancel.
*/
HOLD_INTERRUPTS();
/*
* If we're aborting out of client auth, don't risk trying to send
2010-02-26 02:01:40 +00:00
* anything to the client; we will likely violate the protocol, not to
* mention that we may have interrupted the guts of OpenSSL or some
* authentication library.
*/
if (ClientAuthInProgress && whereToSendOutput == DestRemote)
whereToSendOutput = DestNone;
/*
2005-10-15 02:49:52 +00:00
* Ideally this should be ereport(FATAL), but then we'd not get control
* back...
*/
ereport(WARNING,
(errcode(ERRCODE_CRASH_SHUTDOWN),
2004-08-29 05:07:03 +00:00
errmsg("terminating connection because of crash of another server process"),
2005-10-15 02:49:52 +00:00
errdetail("The postmaster has commanded this server process to roll back"
" the current transaction and exit, because another"
" server process exited abnormally and possibly corrupted"
" shared memory."),
errhint("In a moment you should be able to reconnect to the"
" database and repeat your command.")));
2003-08-04 00:43:34 +00:00
/*
* We DO NOT want to run proc_exit() callbacks -- we're here because
* shared memory may be corrupted, so we don't want to try to clean up our
* transaction. Just nail the windows shut and get out of town. Now that
* there's an atexit callback to prevent third-party code from breaking
* things by calling exit() directly, we have to reset the callbacks
* explicitly to make this work as intended.
*/
on_exit_reset();
/*
* Note we do exit(2) not exit(0). This is to force the postmaster into a
2005-10-15 02:49:52 +00:00
* system reset cycle if some idiot DBA sends a manual SIGQUIT to a random
* backend. This is necessary precisely because we don't clean up our
* shared memory state. (The "dead man switch" mechanism in pmsignal.c
* should ensure the postmaster sees this as a crash, too, but no harm in
* being doubly sure.)
*/
exit(2);
}
/*
* Shutdown signal from postmaster: abort transaction and exit
* at soonest convenient time
*/
void
die(SIGNAL_ARGS)
{
int save_errno = errno;
/* Don't joggle the elbow of proc_exit */
2001-03-22 04:01:46 +00:00
if (!proc_exit_inprogress)
{
InterruptPending = true;
ProcDiePending = true;
}
/* If we're still here, waken anything waiting on the process latch */
SetLatch(MyLatch);
Introduce and use infrastructure for interrupt processing during client reads. Up to now large swathes of backend code ran inside signal handlers while reading commands from the client, to allow for speedy reaction to asynchronous events. Most prominently shared invalidation and NOTIFY handling. That means that complex code like the starting/stopping of transactions is run in signal handlers... The required code was fragile and verbose, and is likely to contain bugs. That approach also severely limited what could be done while communicating with the client. As the read might be from within openssl it wasn't safely possible to trigger an error, e.g. to cancel a backend in idle-in-transaction state. We did that in some cases, namely fatal errors, nonetheless. Now that FE/BE communication in the backend employs non-blocking sockets and latches to block, we can quite simply interrupt reads from signal handlers by setting the latch. That allows us to signal an interrupted read, which is supposed to be retried after returning from within the ssl library. As signal handlers now only need to set the latch to guarantee timely interrupt processing, remove a fair amount of complicated & fragile code from async.c and sinval.c. We could now actually start to process some kinds of interrupts, like sinval ones, more often that before, but that seems better done separately. This work will hopefully allow to handle cases like being blocked by sending data, interrupting idle transactions and similar to be implemented without too much effort. In addition to allowing getting rid of ImmediateInterruptOK, that is. Author: Andres Freund Reviewed-By: Heikki Linnakangas
2015-02-03 22:25:20 +01:00
/*
* If we're in single user mode, we want to quit immediately - we can't
2015-05-23 21:35:49 -04:00
* rely on latches as they wouldn't work when stdin/stdout is a file.
* Rather ugly, but it's unlikely to be worthwhile to invest much more
* effort just for the benefit of single user mode.
Introduce and use infrastructure for interrupt processing during client reads. Up to now large swathes of backend code ran inside signal handlers while reading commands from the client, to allow for speedy reaction to asynchronous events. Most prominently shared invalidation and NOTIFY handling. That means that complex code like the starting/stopping of transactions is run in signal handlers... The required code was fragile and verbose, and is likely to contain bugs. That approach also severely limited what could be done while communicating with the client. As the read might be from within openssl it wasn't safely possible to trigger an error, e.g. to cancel a backend in idle-in-transaction state. We did that in some cases, namely fatal errors, nonetheless. Now that FE/BE communication in the backend employs non-blocking sockets and latches to block, we can quite simply interrupt reads from signal handlers by setting the latch. That allows us to signal an interrupted read, which is supposed to be retried after returning from within the ssl library. As signal handlers now only need to set the latch to guarantee timely interrupt processing, remove a fair amount of complicated & fragile code from async.c and sinval.c. We could now actually start to process some kinds of interrupts, like sinval ones, more often that before, but that seems better done separately. This work will hopefully allow to handle cases like being blocked by sending data, interrupting idle transactions and similar to be implemented without too much effort. In addition to allowing getting rid of ImmediateInterruptOK, that is. Author: Andres Freund Reviewed-By: Heikki Linnakangas
2015-02-03 22:25:20 +01:00
*/
if (DoingCommandRead && whereToSendOutput != DestRemote)
ProcessInterrupts();
errno = save_errno;
}
/*
* Query-cancel signal from postmaster: abort current transaction
* at soonest convenient time
*/
void
StatementCancelHandler(SIGNAL_ARGS)
{
int save_errno = errno;
2001-03-22 04:01:46 +00:00
/*
* Don't joggle the elbow of proc_exit
2001-03-22 04:01:46 +00:00
*/
if (!proc_exit_inprogress)
{
InterruptPending = true;
QueryCancelPending = true;
}
/* If we're still here, waken anything waiting on the process latch */
SetLatch(MyLatch);
errno = save_errno;
}
/* signal handler for floating point exception */
void
FloatExceptionHandler(SIGNAL_ARGS)
{
/* We're not returning, so no need to save errno */
ereport(ERROR,
(errcode(ERRCODE_FLOATING_POINT_EXCEPTION),
errmsg("floating-point exception"),
2005-10-15 02:49:52 +00:00
errdetail("An invalid floating-point operation was signaled. "
"This probably means an out-of-range result or an "
"invalid operation, such as division by zero.")));
}
/* SIGHUP: set flag to re-read config file at next convenient time */
static void
SigHupHandler(SIGNAL_ARGS)
{
int save_errno = errno;
got_SIGHUP = true;
SetLatch(MyLatch);
errno = save_errno;
}
/*
* RecoveryConflictInterrupt: out-of-line portion of recovery conflict
* handling following receipt of SIGUSR1. Designed to be similar to die()
* and StatementCancelHandler(). Called only by a normal user backend
* that begins a transaction during recovery.
*/
void
RecoveryConflictInterrupt(ProcSignalReason reason)
{
2010-02-26 02:01:40 +00:00
int save_errno = errno;
/*
2010-02-26 02:01:40 +00:00
* Don't joggle the elbow of proc_exit
*/
if (!proc_exit_inprogress)
{
RecoveryConflictReason = reason;
switch (reason)
{
case PROCSIG_RECOVERY_CONFLICT_STARTUP_DEADLOCK:
2010-02-26 02:01:40 +00:00
/*
* If we aren't waiting for a lock we can never deadlock.
*/
if (!IsWaitingForLock())
return;
/* Intentional drop through to check wait for pin */
case PROCSIG_RECOVERY_CONFLICT_BUFFERPIN:
2010-02-26 02:01:40 +00:00
/*
* If we aren't blocking the Startup process there is nothing
* more to do.
*/
if (!HoldingBufferPinThatDelaysRecovery())
return;
MyProc->recoveryConflictPending = true;
2010-02-26 02:01:40 +00:00
/* Intentional drop through to error handling */
case PROCSIG_RECOVERY_CONFLICT_LOCK:
case PROCSIG_RECOVERY_CONFLICT_TABLESPACE:
case PROCSIG_RECOVERY_CONFLICT_SNAPSHOT:
2010-02-26 02:01:40 +00:00
/*
* If we aren't in a transaction any longer then ignore.
*/
if (!IsTransactionOrTransactionBlock())
return;
/*
* If we can abort just the current subtransaction then we are
* OK to throw an ERROR to resolve the conflict. Otherwise
* drop through to the FATAL case.
*
* XXX other times that we can throw just an ERROR *may* be
* PROCSIG_RECOVERY_CONFLICT_LOCK if no locks are held in
* parent transactions
*
* PROCSIG_RECOVERY_CONFLICT_SNAPSHOT if no snapshots are held
* by parent transactions and the transaction is not
* transaction-snapshot mode
2010-02-26 02:01:40 +00:00
*
* PROCSIG_RECOVERY_CONFLICT_TABLESPACE if no temp files or
* cursors open in parent transactions
*/
if (!IsSubTransaction())
{
/*
2010-02-26 02:01:40 +00:00
* If we already aborted then we no longer need to cancel.
* We do this here since we do not wish to ignore aborted
* subtransactions, which must cause FATAL, currently.
*/
2010-02-26 02:01:40 +00:00
if (IsAbortedTransactionBlockState())
return;
RecoveryConflictPending = true;
2010-02-26 02:01:40 +00:00
QueryCancelPending = true;
InterruptPending = true;
break;
2010-02-26 02:01:40 +00:00
}
/* Intentional drop through to session cancel */
case PROCSIG_RECOVERY_CONFLICT_DATABASE:
RecoveryConflictPending = true;
ProcDiePending = true;
InterruptPending = true;
break;
default:
elog(FATAL, "unrecognized conflict mode: %d",
(int) reason);
}
Assert(RecoveryConflictPending && (QueryCancelPending || ProcDiePending));
/*
* All conflicts apart from database cause dynamic errors where the
* command or transaction can be retried at a later point with some
2010-07-06 19:19:02 +00:00
* potential for success. No need to reset this, since non-retryable
* conflict errors are currently FATAL.
*/
if (reason == PROCSIG_RECOVERY_CONFLICT_DATABASE)
RecoveryConflictRetryable = false;
}
/*
* Set the process latch. This function essentially emulates signal
* handlers like die() and StatementCancelHandler() and it seems prudent
* to behave similarly as they do.
*/
SetLatch(MyLatch);
errno = save_errno;
}
/*
* ProcessInterrupts: out-of-line portion of CHECK_FOR_INTERRUPTS() macro
*
* If an interrupt condition is pending, and it's safe to service it,
* then clear the flag and accept the interrupt. Called only when
* InterruptPending is true.
*/
void
ProcessInterrupts(void)
{
Be more careful to not lose sync in the FE/BE protocol. If any error occurred while we were in the middle of reading a protocol message from the client, we could lose sync, and incorrectly try to interpret a part of another message as a new protocol message. That will usually lead to an "invalid frontend message" error that terminates the connection. However, this is a security issue because an attacker might be able to deliberately cause an error, inject a Query message in what's supposed to be just user data, and have the server execute it. We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other operations that could ereport(ERROR) in the middle of processing a message, but a query cancel interrupt or statement timeout could nevertheless cause it to happen. Also, the V2 fastpath and COPY handling were not so careful. It's very difficult to recover in the V2 COPY protocol, so we will just terminate the connection on error. In practice, that's what happened previously anyway, as we lost protocol sync. To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set whenever we're in the middle of reading a message. When it's set, we cannot safely ERROR out and continue running, because we might've read only part of a message. PqCommReadingMsg acts somewhat similarly to critical sections in that if an error occurs while it's set, the error handler will force the connection to be terminated, as if the error was FATAL. It's not implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted to PANIC in critical sections, because we want to be able to use PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes advantage of that to prevent an OOM error from terminating the connection. To prevent unnecessary connection terminations, add a holdoff mechanism similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel interrupts, but still allow die interrupts. The rules on which interrupts are processed when are now a bit more complicated, so refactor ProcessInterrupts() and the calls to it in signal handlers so that the signal handlers always call it if ImmediateInterruptOK is set, and ProcessInterrupts() can decide to not do anything if the other conditions are not met. Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund. Backpatch to all supported versions. Security: CVE-2015-0244
2015-02-02 17:08:45 +02:00
/* OK to accept any interrupts now? */
if (InterruptHoldoffCount != 0 || CritSectionCount != 0)
return;
InterruptPending = false;
Be more careful to not lose sync in the FE/BE protocol. If any error occurred while we were in the middle of reading a protocol message from the client, we could lose sync, and incorrectly try to interpret a part of another message as a new protocol message. That will usually lead to an "invalid frontend message" error that terminates the connection. However, this is a security issue because an attacker might be able to deliberately cause an error, inject a Query message in what's supposed to be just user data, and have the server execute it. We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other operations that could ereport(ERROR) in the middle of processing a message, but a query cancel interrupt or statement timeout could nevertheless cause it to happen. Also, the V2 fastpath and COPY handling were not so careful. It's very difficult to recover in the V2 COPY protocol, so we will just terminate the connection on error. In practice, that's what happened previously anyway, as we lost protocol sync. To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set whenever we're in the middle of reading a message. When it's set, we cannot safely ERROR out and continue running, because we might've read only part of a message. PqCommReadingMsg acts somewhat similarly to critical sections in that if an error occurs while it's set, the error handler will force the connection to be terminated, as if the error was FATAL. It's not implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted to PANIC in critical sections, because we want to be able to use PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes advantage of that to prevent an OOM error from terminating the connection. To prevent unnecessary connection terminations, add a holdoff mechanism similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel interrupts, but still allow die interrupts. The rules on which interrupts are processed when are now a bit more complicated, so refactor ProcessInterrupts() and the calls to it in signal handlers so that the signal handlers always call it if ImmediateInterruptOK is set, and ProcessInterrupts() can decide to not do anything if the other conditions are not met. Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund. Backpatch to all supported versions. Security: CVE-2015-0244
2015-02-02 17:08:45 +02:00
if (ProcDiePending)
{
ProcDiePending = false;
2001-03-22 04:01:46 +00:00
QueryCancelPending = false; /* ProcDie trumps QueryCancel */
Be more careful to not lose sync in the FE/BE protocol. If any error occurred while we were in the middle of reading a protocol message from the client, we could lose sync, and incorrectly try to interpret a part of another message as a new protocol message. That will usually lead to an "invalid frontend message" error that terminates the connection. However, this is a security issue because an attacker might be able to deliberately cause an error, inject a Query message in what's supposed to be just user data, and have the server execute it. We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other operations that could ereport(ERROR) in the middle of processing a message, but a query cancel interrupt or statement timeout could nevertheless cause it to happen. Also, the V2 fastpath and COPY handling were not so careful. It's very difficult to recover in the V2 COPY protocol, so we will just terminate the connection on error. In practice, that's what happened previously anyway, as we lost protocol sync. To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set whenever we're in the middle of reading a message. When it's set, we cannot safely ERROR out and continue running, because we might've read only part of a message. PqCommReadingMsg acts somewhat similarly to critical sections in that if an error occurs while it's set, the error handler will force the connection to be terminated, as if the error was FATAL. It's not implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted to PANIC in critical sections, because we want to be able to use PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes advantage of that to prevent an OOM error from terminating the connection. To prevent unnecessary connection terminations, add a holdoff mechanism similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel interrupts, but still allow die interrupts. The rules on which interrupts are processed when are now a bit more complicated, so refactor ProcessInterrupts() and the calls to it in signal handlers so that the signal handlers always call it if ImmediateInterruptOK is set, and ProcessInterrupts() can decide to not do anything if the other conditions are not met. Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund. Backpatch to all supported versions. Security: CVE-2015-0244
2015-02-02 17:08:45 +02:00
LockErrorCleanup();
/* As in quickdie, don't risk sending to client during auth */
if (ClientAuthInProgress && whereToSendOutput == DestRemote)
whereToSendOutput = DestNone;
if (ClientAuthInProgress)
ereport(FATAL,
(errcode(ERRCODE_QUERY_CANCELED),
errmsg("canceling authentication due to timeout")));
else if (IsAutoVacuumWorkerProcess())
ereport(FATAL,
(errcode(ERRCODE_ADMIN_SHUTDOWN),
errmsg("terminating autovacuum process due to administrator command")));
else if (RecoveryConflictPending && RecoveryConflictRetryable)
{
pgstat_report_recovery_conflict(RecoveryConflictReason);
ereport(FATAL,
(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
errmsg("terminating connection due to conflict with recovery"),
errdetail_recovery_conflict()));
}
else if (RecoveryConflictPending)
{
/* Currently there is only one non-retryable recovery conflict */
Assert(RecoveryConflictReason == PROCSIG_RECOVERY_CONFLICT_DATABASE);
pgstat_report_recovery_conflict(RecoveryConflictReason);
ereport(FATAL,
(errcode(ERRCODE_DATABASE_DROPPED),
2010-02-26 02:01:40 +00:00
errmsg("terminating connection due to conflict with recovery"),
errdetail_recovery_conflict()));
}
else
ereport(FATAL,
(errcode(ERRCODE_ADMIN_SHUTDOWN),
2007-11-15 21:14:46 +00:00
errmsg("terminating connection due to administrator command")));
}
if (ClientConnectionLost)
{
QueryCancelPending = false; /* lost connection trumps QueryCancel */
Be more careful to not lose sync in the FE/BE protocol. If any error occurred while we were in the middle of reading a protocol message from the client, we could lose sync, and incorrectly try to interpret a part of another message as a new protocol message. That will usually lead to an "invalid frontend message" error that terminates the connection. However, this is a security issue because an attacker might be able to deliberately cause an error, inject a Query message in what's supposed to be just user data, and have the server execute it. We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other operations that could ereport(ERROR) in the middle of processing a message, but a query cancel interrupt or statement timeout could nevertheless cause it to happen. Also, the V2 fastpath and COPY handling were not so careful. It's very difficult to recover in the V2 COPY protocol, so we will just terminate the connection on error. In practice, that's what happened previously anyway, as we lost protocol sync. To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set whenever we're in the middle of reading a message. When it's set, we cannot safely ERROR out and continue running, because we might've read only part of a message. PqCommReadingMsg acts somewhat similarly to critical sections in that if an error occurs while it's set, the error handler will force the connection to be terminated, as if the error was FATAL. It's not implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted to PANIC in critical sections, because we want to be able to use PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes advantage of that to prevent an OOM error from terminating the connection. To prevent unnecessary connection terminations, add a holdoff mechanism similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel interrupts, but still allow die interrupts. The rules on which interrupts are processed when are now a bit more complicated, so refactor ProcessInterrupts() and the calls to it in signal handlers so that the signal handlers always call it if ImmediateInterruptOK is set, and ProcessInterrupts() can decide to not do anything if the other conditions are not met. Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund. Backpatch to all supported versions. Security: CVE-2015-0244
2015-02-02 17:08:45 +02:00
LockErrorCleanup();
/* don't send to client, we already know the connection to be dead. */
whereToSendOutput = DestNone;
ereport(FATAL,
(errcode(ERRCODE_CONNECTION_FAILURE),
errmsg("connection to client lost")));
}
Be more careful to not lose sync in the FE/BE protocol. If any error occurred while we were in the middle of reading a protocol message from the client, we could lose sync, and incorrectly try to interpret a part of another message as a new protocol message. That will usually lead to an "invalid frontend message" error that terminates the connection. However, this is a security issue because an attacker might be able to deliberately cause an error, inject a Query message in what's supposed to be just user data, and have the server execute it. We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other operations that could ereport(ERROR) in the middle of processing a message, but a query cancel interrupt or statement timeout could nevertheless cause it to happen. Also, the V2 fastpath and COPY handling were not so careful. It's very difficult to recover in the V2 COPY protocol, so we will just terminate the connection on error. In practice, that's what happened previously anyway, as we lost protocol sync. To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set whenever we're in the middle of reading a message. When it's set, we cannot safely ERROR out and continue running, because we might've read only part of a message. PqCommReadingMsg acts somewhat similarly to critical sections in that if an error occurs while it's set, the error handler will force the connection to be terminated, as if the error was FATAL. It's not implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted to PANIC in critical sections, because we want to be able to use PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes advantage of that to prevent an OOM error from terminating the connection. To prevent unnecessary connection terminations, add a holdoff mechanism similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel interrupts, but still allow die interrupts. The rules on which interrupts are processed when are now a bit more complicated, so refactor ProcessInterrupts() and the calls to it in signal handlers so that the signal handlers always call it if ImmediateInterruptOK is set, and ProcessInterrupts() can decide to not do anything if the other conditions are not met. Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund. Backpatch to all supported versions. Security: CVE-2015-0244
2015-02-02 17:08:45 +02:00
/*
* If a recovery conflict happens while we are waiting for input from the
* client, the client is presumably just sitting idle in a transaction,
* preventing recovery from making progress. Terminate the connection to
* dislodge it.
*/
if (RecoveryConflictPending && DoingCommandRead)
{
2015-05-23 21:35:49 -04:00
QueryCancelPending = false; /* this trumps QueryCancel */
Be more careful to not lose sync in the FE/BE protocol. If any error occurred while we were in the middle of reading a protocol message from the client, we could lose sync, and incorrectly try to interpret a part of another message as a new protocol message. That will usually lead to an "invalid frontend message" error that terminates the connection. However, this is a security issue because an attacker might be able to deliberately cause an error, inject a Query message in what's supposed to be just user data, and have the server execute it. We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other operations that could ereport(ERROR) in the middle of processing a message, but a query cancel interrupt or statement timeout could nevertheless cause it to happen. Also, the V2 fastpath and COPY handling were not so careful. It's very difficult to recover in the V2 COPY protocol, so we will just terminate the connection on error. In practice, that's what happened previously anyway, as we lost protocol sync. To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set whenever we're in the middle of reading a message. When it's set, we cannot safely ERROR out and continue running, because we might've read only part of a message. PqCommReadingMsg acts somewhat similarly to critical sections in that if an error occurs while it's set, the error handler will force the connection to be terminated, as if the error was FATAL. It's not implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted to PANIC in critical sections, because we want to be able to use PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes advantage of that to prevent an OOM error from terminating the connection. To prevent unnecessary connection terminations, add a holdoff mechanism similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel interrupts, but still allow die interrupts. The rules on which interrupts are processed when are now a bit more complicated, so refactor ProcessInterrupts() and the calls to it in signal handlers so that the signal handlers always call it if ImmediateInterruptOK is set, and ProcessInterrupts() can decide to not do anything if the other conditions are not met. Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund. Backpatch to all supported versions. Security: CVE-2015-0244
2015-02-02 17:08:45 +02:00
RecoveryConflictPending = false;
LockErrorCleanup();
pgstat_report_recovery_conflict(RecoveryConflictReason);
ereport(FATAL,
(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2015-05-23 21:35:49 -04:00
errmsg("terminating connection due to conflict with recovery"),
Be more careful to not lose sync in the FE/BE protocol. If any error occurred while we were in the middle of reading a protocol message from the client, we could lose sync, and incorrectly try to interpret a part of another message as a new protocol message. That will usually lead to an "invalid frontend message" error that terminates the connection. However, this is a security issue because an attacker might be able to deliberately cause an error, inject a Query message in what's supposed to be just user data, and have the server execute it. We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other operations that could ereport(ERROR) in the middle of processing a message, but a query cancel interrupt or statement timeout could nevertheless cause it to happen. Also, the V2 fastpath and COPY handling were not so careful. It's very difficult to recover in the V2 COPY protocol, so we will just terminate the connection on error. In practice, that's what happened previously anyway, as we lost protocol sync. To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set whenever we're in the middle of reading a message. When it's set, we cannot safely ERROR out and continue running, because we might've read only part of a message. PqCommReadingMsg acts somewhat similarly to critical sections in that if an error occurs while it's set, the error handler will force the connection to be terminated, as if the error was FATAL. It's not implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted to PANIC in critical sections, because we want to be able to use PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes advantage of that to prevent an OOM error from terminating the connection. To prevent unnecessary connection terminations, add a holdoff mechanism similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel interrupts, but still allow die interrupts. The rules on which interrupts are processed when are now a bit more complicated, so refactor ProcessInterrupts() and the calls to it in signal handlers so that the signal handlers always call it if ImmediateInterruptOK is set, and ProcessInterrupts() can decide to not do anything if the other conditions are not met. Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund. Backpatch to all supported versions. Security: CVE-2015-0244
2015-02-02 17:08:45 +02:00
errdetail_recovery_conflict(),
errhint("In a moment you should be able to reconnect to the"
" database and repeat your command.")));
}
if (QueryCancelPending)
{
Be more predictable about reporting "lock timeout" vs "statement timeout". If both timeout indicators are set when we arrive at ProcessInterrupts, we've historically just reported "lock timeout". However, some buildfarm members have been observed to fail isolationtester's timeouts test by reporting "lock timeout" when the statement timeout was expected to fire first. The cause seems to be that the process is allowed to sleep longer than expected (probably due to heavy machine load) so that the lock timeout happens before we reach the point of reporting the error, and then this arbitrary tiebreak rule does the wrong thing. We can improve matters by comparing the scheduled timeout times to decide which error to report. I had originally proposed greatly reducing the 1-second window between the two timeouts in the test cases. On reflection that is a bad idea, at least for the case where the lock timeout is expected to fire first, because that would assume that it takes negligible time to get from statement start to the beginning of the lock wait. Thus, this patch doesn't completely remove the risk of test failures on slow machines. Empirically, however, the case this handles is the one we are seeing in the buildfarm. The explanation may be that the other case requires the scheduler to take the CPU away from a busy process, whereas the case fixed here only requires the scheduler to not give the CPU back right away to a process that has been woken from a multi-second sleep (and, perhaps, has been swapped out meanwhile). Back-patch to 9.3 where the isolationtester timeouts test was added. Discussion: <8693.1464314819@sss.pgh.pa.us>
2016-05-27 10:40:20 -04:00
bool lock_timeout_occurred;
bool stmt_timeout_occurred;
Be more careful to not lose sync in the FE/BE protocol. If any error occurred while we were in the middle of reading a protocol message from the client, we could lose sync, and incorrectly try to interpret a part of another message as a new protocol message. That will usually lead to an "invalid frontend message" error that terminates the connection. However, this is a security issue because an attacker might be able to deliberately cause an error, inject a Query message in what's supposed to be just user data, and have the server execute it. We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other operations that could ereport(ERROR) in the middle of processing a message, but a query cancel interrupt or statement timeout could nevertheless cause it to happen. Also, the V2 fastpath and COPY handling were not so careful. It's very difficult to recover in the V2 COPY protocol, so we will just terminate the connection on error. In practice, that's what happened previously anyway, as we lost protocol sync. To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set whenever we're in the middle of reading a message. When it's set, we cannot safely ERROR out and continue running, because we might've read only part of a message. PqCommReadingMsg acts somewhat similarly to critical sections in that if an error occurs while it's set, the error handler will force the connection to be terminated, as if the error was FATAL. It's not implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted to PANIC in critical sections, because we want to be able to use PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes advantage of that to prevent an OOM error from terminating the connection. To prevent unnecessary connection terminations, add a holdoff mechanism similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel interrupts, but still allow die interrupts. The rules on which interrupts are processed when are now a bit more complicated, so refactor ProcessInterrupts() and the calls to it in signal handlers so that the signal handlers always call it if ImmediateInterruptOK is set, and ProcessInterrupts() can decide to not do anything if the other conditions are not met. Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund. Backpatch to all supported versions. Security: CVE-2015-0244
2015-02-02 17:08:45 +02:00
/*
* Don't allow query cancel interrupts while reading input from the
* client, because we might lose sync in the FE/BE protocol. (Die
* interrupts are OK, because we won't read any further messages from
* the client in that case.)
*/
if (QueryCancelHoldoffCount != 0)
{
/*
* Re-arm InterruptPending so that we process the cancel request
* as soon as we're done reading the message.
*/
InterruptPending = true;
return;
}
QueryCancelPending = false;
/*
* If LOCK_TIMEOUT and STATEMENT_TIMEOUT indicators are both set, we
Be more predictable about reporting "lock timeout" vs "statement timeout". If both timeout indicators are set when we arrive at ProcessInterrupts, we've historically just reported "lock timeout". However, some buildfarm members have been observed to fail isolationtester's timeouts test by reporting "lock timeout" when the statement timeout was expected to fire first. The cause seems to be that the process is allowed to sleep longer than expected (probably due to heavy machine load) so that the lock timeout happens before we reach the point of reporting the error, and then this arbitrary tiebreak rule does the wrong thing. We can improve matters by comparing the scheduled timeout times to decide which error to report. I had originally proposed greatly reducing the 1-second window between the two timeouts in the test cases. On reflection that is a bad idea, at least for the case where the lock timeout is expected to fire first, because that would assume that it takes negligible time to get from statement start to the beginning of the lock wait. Thus, this patch doesn't completely remove the risk of test failures on slow machines. Empirically, however, the case this handles is the one we are seeing in the buildfarm. The explanation may be that the other case requires the scheduler to take the CPU away from a busy process, whereas the case fixed here only requires the scheduler to not give the CPU back right away to a process that has been woken from a multi-second sleep (and, perhaps, has been swapped out meanwhile). Back-patch to 9.3 where the isolationtester timeouts test was added. Discussion: <8693.1464314819@sss.pgh.pa.us>
2016-05-27 10:40:20 -04:00
* need to clear both, so always fetch both.
*/
Be more predictable about reporting "lock timeout" vs "statement timeout". If both timeout indicators are set when we arrive at ProcessInterrupts, we've historically just reported "lock timeout". However, some buildfarm members have been observed to fail isolationtester's timeouts test by reporting "lock timeout" when the statement timeout was expected to fire first. The cause seems to be that the process is allowed to sleep longer than expected (probably due to heavy machine load) so that the lock timeout happens before we reach the point of reporting the error, and then this arbitrary tiebreak rule does the wrong thing. We can improve matters by comparing the scheduled timeout times to decide which error to report. I had originally proposed greatly reducing the 1-second window between the two timeouts in the test cases. On reflection that is a bad idea, at least for the case where the lock timeout is expected to fire first, because that would assume that it takes negligible time to get from statement start to the beginning of the lock wait. Thus, this patch doesn't completely remove the risk of test failures on slow machines. Empirically, however, the case this handles is the one we are seeing in the buildfarm. The explanation may be that the other case requires the scheduler to take the CPU away from a busy process, whereas the case fixed here only requires the scheduler to not give the CPU back right away to a process that has been woken from a multi-second sleep (and, perhaps, has been swapped out meanwhile). Back-patch to 9.3 where the isolationtester timeouts test was added. Discussion: <8693.1464314819@sss.pgh.pa.us>
2016-05-27 10:40:20 -04:00
lock_timeout_occurred = get_timeout_indicator(LOCK_TIMEOUT, true);
stmt_timeout_occurred = get_timeout_indicator(STATEMENT_TIMEOUT, true);
/*
* If both were set, we want to report whichever timeout completed
* earlier; this ensures consistent behavior if the machine is slow
* enough that the second timeout triggers before we get here. A tie
* is arbitrarily broken in favor of reporting a lock timeout.
*/
if (lock_timeout_occurred && stmt_timeout_occurred &&
get_timeout_finish_time(STATEMENT_TIMEOUT) < get_timeout_finish_time(LOCK_TIMEOUT))
lock_timeout_occurred = false; /* report stmt timeout */
if (lock_timeout_occurred)
{
Be more careful to not lose sync in the FE/BE protocol. If any error occurred while we were in the middle of reading a protocol message from the client, we could lose sync, and incorrectly try to interpret a part of another message as a new protocol message. That will usually lead to an "invalid frontend message" error that terminates the connection. However, this is a security issue because an attacker might be able to deliberately cause an error, inject a Query message in what's supposed to be just user data, and have the server execute it. We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other operations that could ereport(ERROR) in the middle of processing a message, but a query cancel interrupt or statement timeout could nevertheless cause it to happen. Also, the V2 fastpath and COPY handling were not so careful. It's very difficult to recover in the V2 COPY protocol, so we will just terminate the connection on error. In practice, that's what happened previously anyway, as we lost protocol sync. To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set whenever we're in the middle of reading a message. When it's set, we cannot safely ERROR out and continue running, because we might've read only part of a message. PqCommReadingMsg acts somewhat similarly to critical sections in that if an error occurs while it's set, the error handler will force the connection to be terminated, as if the error was FATAL. It's not implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted to PANIC in critical sections, because we want to be able to use PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes advantage of that to prevent an OOM error from terminating the connection. To prevent unnecessary connection terminations, add a holdoff mechanism similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel interrupts, but still allow die interrupts. The rules on which interrupts are processed when are now a bit more complicated, so refactor ProcessInterrupts() and the calls to it in signal handlers so that the signal handlers always call it if ImmediateInterruptOK is set, and ProcessInterrupts() can decide to not do anything if the other conditions are not met. Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund. Backpatch to all supported versions. Security: CVE-2015-0244
2015-02-02 17:08:45 +02:00
LockErrorCleanup();
ereport(ERROR,
(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
errmsg("canceling statement due to lock timeout")));
}
Be more predictable about reporting "lock timeout" vs "statement timeout". If both timeout indicators are set when we arrive at ProcessInterrupts, we've historically just reported "lock timeout". However, some buildfarm members have been observed to fail isolationtester's timeouts test by reporting "lock timeout" when the statement timeout was expected to fire first. The cause seems to be that the process is allowed to sleep longer than expected (probably due to heavy machine load) so that the lock timeout happens before we reach the point of reporting the error, and then this arbitrary tiebreak rule does the wrong thing. We can improve matters by comparing the scheduled timeout times to decide which error to report. I had originally proposed greatly reducing the 1-second window between the two timeouts in the test cases. On reflection that is a bad idea, at least for the case where the lock timeout is expected to fire first, because that would assume that it takes negligible time to get from statement start to the beginning of the lock wait. Thus, this patch doesn't completely remove the risk of test failures on slow machines. Empirically, however, the case this handles is the one we are seeing in the buildfarm. The explanation may be that the other case requires the scheduler to take the CPU away from a busy process, whereas the case fixed here only requires the scheduler to not give the CPU back right away to a process that has been woken from a multi-second sleep (and, perhaps, has been swapped out meanwhile). Back-patch to 9.3 where the isolationtester timeouts test was added. Discussion: <8693.1464314819@sss.pgh.pa.us>
2016-05-27 10:40:20 -04:00
if (stmt_timeout_occurred)
{
Be more careful to not lose sync in the FE/BE protocol. If any error occurred while we were in the middle of reading a protocol message from the client, we could lose sync, and incorrectly try to interpret a part of another message as a new protocol message. That will usually lead to an "invalid frontend message" error that terminates the connection. However, this is a security issue because an attacker might be able to deliberately cause an error, inject a Query message in what's supposed to be just user data, and have the server execute it. We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other operations that could ereport(ERROR) in the middle of processing a message, but a query cancel interrupt or statement timeout could nevertheless cause it to happen. Also, the V2 fastpath and COPY handling were not so careful. It's very difficult to recover in the V2 COPY protocol, so we will just terminate the connection on error. In practice, that's what happened previously anyway, as we lost protocol sync. To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set whenever we're in the middle of reading a message. When it's set, we cannot safely ERROR out and continue running, because we might've read only part of a message. PqCommReadingMsg acts somewhat similarly to critical sections in that if an error occurs while it's set, the error handler will force the connection to be terminated, as if the error was FATAL. It's not implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted to PANIC in critical sections, because we want to be able to use PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes advantage of that to prevent an OOM error from terminating the connection. To prevent unnecessary connection terminations, add a holdoff mechanism similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel interrupts, but still allow die interrupts. The rules on which interrupts are processed when are now a bit more complicated, so refactor ProcessInterrupts() and the calls to it in signal handlers so that the signal handlers always call it if ImmediateInterruptOK is set, and ProcessInterrupts() can decide to not do anything if the other conditions are not met. Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund. Backpatch to all supported versions. Security: CVE-2015-0244
2015-02-02 17:08:45 +02:00
LockErrorCleanup();
ereport(ERROR,
(errcode(ERRCODE_QUERY_CANCELED),
errmsg("canceling statement due to statement timeout")));
}
if (IsAutoVacuumWorkerProcess())
{
Be more careful to not lose sync in the FE/BE protocol. If any error occurred while we were in the middle of reading a protocol message from the client, we could lose sync, and incorrectly try to interpret a part of another message as a new protocol message. That will usually lead to an "invalid frontend message" error that terminates the connection. However, this is a security issue because an attacker might be able to deliberately cause an error, inject a Query message in what's supposed to be just user data, and have the server execute it. We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other operations that could ereport(ERROR) in the middle of processing a message, but a query cancel interrupt or statement timeout could nevertheless cause it to happen. Also, the V2 fastpath and COPY handling were not so careful. It's very difficult to recover in the V2 COPY protocol, so we will just terminate the connection on error. In practice, that's what happened previously anyway, as we lost protocol sync. To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set whenever we're in the middle of reading a message. When it's set, we cannot safely ERROR out and continue running, because we might've read only part of a message. PqCommReadingMsg acts somewhat similarly to critical sections in that if an error occurs while it's set, the error handler will force the connection to be terminated, as if the error was FATAL. It's not implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted to PANIC in critical sections, because we want to be able to use PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes advantage of that to prevent an OOM error from terminating the connection. To prevent unnecessary connection terminations, add a holdoff mechanism similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel interrupts, but still allow die interrupts. The rules on which interrupts are processed when are now a bit more complicated, so refactor ProcessInterrupts() and the calls to it in signal handlers so that the signal handlers always call it if ImmediateInterruptOK is set, and ProcessInterrupts() can decide to not do anything if the other conditions are not met. Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund. Backpatch to all supported versions. Security: CVE-2015-0244
2015-02-02 17:08:45 +02:00
LockErrorCleanup();
ereport(ERROR,
(errcode(ERRCODE_QUERY_CANCELED),
errmsg("canceling autovacuum task")));
}
if (RecoveryConflictPending)
Allow read only connections during recovery, known as Hot Standby. Enabled by recovery_connections = on (default) and forcing archive recovery using a recovery.conf. Recovery processing now emulates the original transactions as they are replayed, providing full locking and MVCC behaviour for read only queries. Recovery must enter consistent state before connections are allowed, so there is a delay, typically short, before connections succeed. Replay of recovering transactions can conflict and in some cases deadlock with queries during recovery; these result in query cancellation after max_standby_delay seconds have expired. Infrastructure changes have minor effects on normal running, though introduce four new types of WAL record. New test mode "make standbycheck" allows regression tests of static command behaviour on a standby server while in recovery. Typical and extreme dynamic behaviours have been checked via code inspection and manual testing. Few port specific behaviours have been utilised, though primary testing has been on Linux only so far. This commit is the basic patch. Additional changes will follow in this release to enhance some aspects of behaviour, notably improved handling of conflicts, deadlock detection and query cancellation. Changes to VACUUM FULL are also required. Simon Riggs, with significant and lengthy review by Heikki Linnakangas, including streamlined redesign of snapshot creation and two-phase commit. Important contributions from Florian Pflug, Mark Kirkwood, Merlin Moncure, Greg Stark, Gianni Ciolli, Gabriele Bartolini, Hannu Krosing, Robert Haas, Tatsuo Ishii, Hiroyuki Yamada plus support and feedback from many other community members.
2009-12-19 01:32:45 +00:00
{
RecoveryConflictPending = false;
Be more careful to not lose sync in the FE/BE protocol. If any error occurred while we were in the middle of reading a protocol message from the client, we could lose sync, and incorrectly try to interpret a part of another message as a new protocol message. That will usually lead to an "invalid frontend message" error that terminates the connection. However, this is a security issue because an attacker might be able to deliberately cause an error, inject a Query message in what's supposed to be just user data, and have the server execute it. We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other operations that could ereport(ERROR) in the middle of processing a message, but a query cancel interrupt or statement timeout could nevertheless cause it to happen. Also, the V2 fastpath and COPY handling were not so careful. It's very difficult to recover in the V2 COPY protocol, so we will just terminate the connection on error. In practice, that's what happened previously anyway, as we lost protocol sync. To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set whenever we're in the middle of reading a message. When it's set, we cannot safely ERROR out and continue running, because we might've read only part of a message. PqCommReadingMsg acts somewhat similarly to critical sections in that if an error occurs while it's set, the error handler will force the connection to be terminated, as if the error was FATAL. It's not implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted to PANIC in critical sections, because we want to be able to use PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes advantage of that to prevent an OOM error from terminating the connection. To prevent unnecessary connection terminations, add a holdoff mechanism similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel interrupts, but still allow die interrupts. The rules on which interrupts are processed when are now a bit more complicated, so refactor ProcessInterrupts() and the calls to it in signal handlers so that the signal handlers always call it if ImmediateInterruptOK is set, and ProcessInterrupts() can decide to not do anything if the other conditions are not met. Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund. Backpatch to all supported versions. Security: CVE-2015-0244
2015-02-02 17:08:45 +02:00
LockErrorCleanup();
pgstat_report_recovery_conflict(RecoveryConflictReason);
Be more careful to not lose sync in the FE/BE protocol. If any error occurred while we were in the middle of reading a protocol message from the client, we could lose sync, and incorrectly try to interpret a part of another message as a new protocol message. That will usually lead to an "invalid frontend message" error that terminates the connection. However, this is a security issue because an attacker might be able to deliberately cause an error, inject a Query message in what's supposed to be just user data, and have the server execute it. We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other operations that could ereport(ERROR) in the middle of processing a message, but a query cancel interrupt or statement timeout could nevertheless cause it to happen. Also, the V2 fastpath and COPY handling were not so careful. It's very difficult to recover in the V2 COPY protocol, so we will just terminate the connection on error. In practice, that's what happened previously anyway, as we lost protocol sync. To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set whenever we're in the middle of reading a message. When it's set, we cannot safely ERROR out and continue running, because we might've read only part of a message. PqCommReadingMsg acts somewhat similarly to critical sections in that if an error occurs while it's set, the error handler will force the connection to be terminated, as if the error was FATAL. It's not implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted to PANIC in critical sections, because we want to be able to use PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes advantage of that to prevent an OOM error from terminating the connection. To prevent unnecessary connection terminations, add a holdoff mechanism similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel interrupts, but still allow die interrupts. The rules on which interrupts are processed when are now a bit more complicated, so refactor ProcessInterrupts() and the calls to it in signal handlers so that the signal handlers always call it if ImmediateInterruptOK is set, and ProcessInterrupts() can decide to not do anything if the other conditions are not met. Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund. Backpatch to all supported versions. Security: CVE-2015-0244
2015-02-02 17:08:45 +02:00
ereport(ERROR,
(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2010-02-26 02:01:40 +00:00
errmsg("canceling statement due to conflict with recovery"),
Be more careful to not lose sync in the FE/BE protocol. If any error occurred while we were in the middle of reading a protocol message from the client, we could lose sync, and incorrectly try to interpret a part of another message as a new protocol message. That will usually lead to an "invalid frontend message" error that terminates the connection. However, this is a security issue because an attacker might be able to deliberately cause an error, inject a Query message in what's supposed to be just user data, and have the server execute it. We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other operations that could ereport(ERROR) in the middle of processing a message, but a query cancel interrupt or statement timeout could nevertheless cause it to happen. Also, the V2 fastpath and COPY handling were not so careful. It's very difficult to recover in the V2 COPY protocol, so we will just terminate the connection on error. In practice, that's what happened previously anyway, as we lost protocol sync. To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set whenever we're in the middle of reading a message. When it's set, we cannot safely ERROR out and continue running, because we might've read only part of a message. PqCommReadingMsg acts somewhat similarly to critical sections in that if an error occurs while it's set, the error handler will force the connection to be terminated, as if the error was FATAL. It's not implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted to PANIC in critical sections, because we want to be able to use PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes advantage of that to prevent an OOM error from terminating the connection. To prevent unnecessary connection terminations, add a holdoff mechanism similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel interrupts, but still allow die interrupts. The rules on which interrupts are processed when are now a bit more complicated, so refactor ProcessInterrupts() and the calls to it in signal handlers so that the signal handlers always call it if ImmediateInterruptOK is set, and ProcessInterrupts() can decide to not do anything if the other conditions are not met. Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund. Backpatch to all supported versions. Security: CVE-2015-0244
2015-02-02 17:08:45 +02:00
errdetail_recovery_conflict()));
}
Allow read only connections during recovery, known as Hot Standby. Enabled by recovery_connections = on (default) and forcing archive recovery using a recovery.conf. Recovery processing now emulates the original transactions as they are replayed, providing full locking and MVCC behaviour for read only queries. Recovery must enter consistent state before connections are allowed, so there is a delay, typically short, before connections succeed. Replay of recovering transactions can conflict and in some cases deadlock with queries during recovery; these result in query cancellation after max_standby_delay seconds have expired. Infrastructure changes have minor effects on normal running, though introduce four new types of WAL record. New test mode "make standbycheck" allows regression tests of static command behaviour on a standby server while in recovery. Typical and extreme dynamic behaviours have been checked via code inspection and manual testing. Few port specific behaviours have been utilised, though primary testing has been on Linux only so far. This commit is the basic patch. Additional changes will follow in this release to enhance some aspects of behaviour, notably improved handling of conflicts, deadlock detection and query cancellation. Changes to VACUUM FULL are also required. Simon Riggs, with significant and lengthy review by Heikki Linnakangas, including streamlined redesign of snapshot creation and two-phase commit. Important contributions from Florian Pflug, Mark Kirkwood, Merlin Moncure, Greg Stark, Gianni Ciolli, Gabriele Bartolini, Hannu Krosing, Robert Haas, Tatsuo Ishii, Hiroyuki Yamada plus support and feedback from many other community members.
2009-12-19 01:32:45 +00:00
/*
2010-02-26 02:01:40 +00:00
* If we are reading a command from the client, just ignore the cancel
* request --- sending an extra error message won't accomplish
* anything. Otherwise, go ahead and throw the error.
*/
if (!DoingCommandRead)
{
Be more careful to not lose sync in the FE/BE protocol. If any error occurred while we were in the middle of reading a protocol message from the client, we could lose sync, and incorrectly try to interpret a part of another message as a new protocol message. That will usually lead to an "invalid frontend message" error that terminates the connection. However, this is a security issue because an attacker might be able to deliberately cause an error, inject a Query message in what's supposed to be just user data, and have the server execute it. We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other operations that could ereport(ERROR) in the middle of processing a message, but a query cancel interrupt or statement timeout could nevertheless cause it to happen. Also, the V2 fastpath and COPY handling were not so careful. It's very difficult to recover in the V2 COPY protocol, so we will just terminate the connection on error. In practice, that's what happened previously anyway, as we lost protocol sync. To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set whenever we're in the middle of reading a message. When it's set, we cannot safely ERROR out and continue running, because we might've read only part of a message. PqCommReadingMsg acts somewhat similarly to critical sections in that if an error occurs while it's set, the error handler will force the connection to be terminated, as if the error was FATAL. It's not implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted to PANIC in critical sections, because we want to be able to use PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes advantage of that to prevent an OOM error from terminating the connection. To prevent unnecessary connection terminations, add a holdoff mechanism similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel interrupts, but still allow die interrupts. The rules on which interrupts are processed when are now a bit more complicated, so refactor ProcessInterrupts() and the calls to it in signal handlers so that the signal handlers always call it if ImmediateInterruptOK is set, and ProcessInterrupts() can decide to not do anything if the other conditions are not met. Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund. Backpatch to all supported versions. Security: CVE-2015-0244
2015-02-02 17:08:45 +02:00
LockErrorCleanup();
ereport(ERROR,
(errcode(ERRCODE_QUERY_CANCELED),
errmsg("canceling statement due to user request")));
Allow read only connections during recovery, known as Hot Standby. Enabled by recovery_connections = on (default) and forcing archive recovery using a recovery.conf. Recovery processing now emulates the original transactions as they are replayed, providing full locking and MVCC behaviour for read only queries. Recovery must enter consistent state before connections are allowed, so there is a delay, typically short, before connections succeed. Replay of recovering transactions can conflict and in some cases deadlock with queries during recovery; these result in query cancellation after max_standby_delay seconds have expired. Infrastructure changes have minor effects on normal running, though introduce four new types of WAL record. New test mode "make standbycheck" allows regression tests of static command behaviour on a standby server while in recovery. Typical and extreme dynamic behaviours have been checked via code inspection and manual testing. Few port specific behaviours have been utilised, though primary testing has been on Linux only so far. This commit is the basic patch. Additional changes will follow in this release to enhance some aspects of behaviour, notably improved handling of conflicts, deadlock detection and query cancellation. Changes to VACUUM FULL are also required. Simon Riggs, with significant and lengthy review by Heikki Linnakangas, including streamlined redesign of snapshot creation and two-phase commit. Important contributions from Florian Pflug, Mark Kirkwood, Merlin Moncure, Greg Stark, Gianni Ciolli, Gabriele Bartolini, Hannu Krosing, Robert Haas, Tatsuo Ishii, Hiroyuki Yamada plus support and feedback from many other community members.
2009-12-19 01:32:45 +00:00
}
}
Introduce and use infrastructure for interrupt processing during client reads. Up to now large swathes of backend code ran inside signal handlers while reading commands from the client, to allow for speedy reaction to asynchronous events. Most prominently shared invalidation and NOTIFY handling. That means that complex code like the starting/stopping of transactions is run in signal handlers... The required code was fragile and verbose, and is likely to contain bugs. That approach also severely limited what could be done while communicating with the client. As the read might be from within openssl it wasn't safely possible to trigger an error, e.g. to cancel a backend in idle-in-transaction state. We did that in some cases, namely fatal errors, nonetheless. Now that FE/BE communication in the backend employs non-blocking sockets and latches to block, we can quite simply interrupt reads from signal handlers by setting the latch. That allows us to signal an interrupted read, which is supposed to be retried after returning from within the ssl library. As signal handlers now only need to set the latch to guarantee timely interrupt processing, remove a fair amount of complicated & fragile code from async.c and sinval.c. We could now actually start to process some kinds of interrupts, like sinval ones, more often that before, but that seems better done separately. This work will hopefully allow to handle cases like being blocked by sending data, interrupting idle transactions and similar to be implemented without too much effort. In addition to allowing getting rid of ImmediateInterruptOK, that is. Author: Andres Freund Reviewed-By: Heikki Linnakangas
2015-02-03 22:25:20 +01:00
if (IdleInTransactionSessionTimeoutPending)
{
/* Has the timeout setting changed since last we looked? */
if (IdleInTransactionSessionTimeout > 0)
ereport(FATAL,
(errcode(ERRCODE_IDLE_IN_TRANSACTION_SESSION_TIMEOUT),
errmsg("terminating connection due to idle-in-transaction timeout")));
else
IdleInTransactionSessionTimeoutPending = false;
}
if (ParallelMessagePending)
HandleParallelMessages();
}
/*
* IA64-specific code to fetch the AR.BSP register for stack depth checks.
*
* We currently support gcc, icc, and HP-UX's native compiler here.
*
* Note: while icc accepts gcc asm blocks on x86[_64], this is not true on
* ia64 (at least not in icc versions before 12.x). So we have to carry a
* separate implementation for it.
*/
#if defined(__ia64__) || defined(__ia64)
#if defined(__hpux) && !defined(__GNUC__) && !defined(__INTEL_COMPILER)
/* Assume it's HP-UX native compiler */
#include <ia64/sys/inline.h>
#define ia64_get_bsp() ((char *) (_Asm_mov_from_ar(_AREG_BSP, _NO_FENCE)))
#elif defined(__INTEL_COMPILER)
/* icc */
#include <asm/ia64regs.h>
#define ia64_get_bsp() ((char *) __getReg(_IA64_REG_AR_BSP))
#else
/* gcc */
static __inline__ char *
ia64_get_bsp(void)
{
char *ret;
/* the ;; is a "stop", seems to be required before fetching BSP */
2011-04-10 11:42:00 -04:00
__asm__ __volatile__(
";;\n"
" mov %0=ar.bsp \n"
: "=r"(ret));
2011-04-10 11:42:00 -04:00
return ret;
}
#endif
2011-04-10 11:42:00 -04:00
#endif /* IA64 */
/*
* set_stack_base: set up reference point for stack depth checking
*
* Returns the old reference point, if any.
*/
pg_stack_base_t
set_stack_base(void)
{
char stack_base;
pg_stack_base_t old;
#if defined(__ia64__) || defined(__ia64)
old.stack_base_ptr = stack_base_ptr;
old.register_stack_base_ptr = register_stack_base_ptr;
#else
old = stack_base_ptr;
#endif
/* Set up reference point for stack depth checking */
stack_base_ptr = &stack_base;
#if defined(__ia64__) || defined(__ia64)
register_stack_base_ptr = ia64_get_bsp();
#endif
return old;
}
/*
* restore_stack_base: restore reference point for stack depth checking
*
* This can be used after set_stack_base() to restore the old value. This
* is currently only used in PL/Java. When PL/Java calls a backend function
* from different thread, the thread's stack is at a different location than
* the main thread's stack, so it sets the base pointer before the call, and
* restores it afterwards.
*/
void
restore_stack_base(pg_stack_base_t base)
{
#if defined(__ia64__) || defined(__ia64)
stack_base_ptr = base.stack_base_ptr;
register_stack_base_ptr = base.register_stack_base_ptr;
#else
stack_base_ptr = base;
#endif
}
/*
* check_stack_depth/stack_is_too_deep: check for excessively deep recursion
*
* This should be called someplace in any recursive routine that might possibly
* recurse deep enough to overflow the stack. Most Unixen treat stack
* overflow as an unrecoverable SIGSEGV, so we want to error out ourselves
* before hitting the hardware limit.
*
* check_stack_depth() just throws an error summarily. stack_is_too_deep()
* can be used by code that wants to handle the error condition itself.
*/
void
check_stack_depth(void)
{
if (stack_is_too_deep())
{
ereport(ERROR,
(errcode(ERRCODE_STATEMENT_TOO_COMPLEX),
errmsg("stack depth limit exceeded"),
errhint("Increase the configuration parameter \"max_stack_depth\" (currently %dkB), "
"after ensuring the platform's stack depth limit is adequate.",
max_stack_depth)));
}
}
bool
stack_is_too_deep(void)
{
2004-08-29 05:07:03 +00:00
char stack_top_loc;
long stack_depth;
/*
* Compute distance from reference point to my local variables
*/
stack_depth = (long) (stack_base_ptr - &stack_top_loc);
2004-08-29 05:07:03 +00:00
/*
2005-10-15 02:49:52 +00:00
* Take abs value, since stacks grow up on some machines, down on others
*/
if (stack_depth < 0)
stack_depth = -stack_depth;
2004-08-29 05:07:03 +00:00
/*
* Trouble?
*
* The test on stack_base_ptr prevents us from erroring out if called
* during process setup or in a non-backend process. Logically it should
* be done first, but putting it here avoids wasting cycles during normal
* cases.
*/
if (stack_depth > max_stack_depth_bytes &&
stack_base_ptr != NULL)
return true;
/*
* On IA64 there is a separate "register" stack that requires its own
* independent check. For this, we have to measure the change in the
* "BSP" pointer from PostgresMain to here. Logic is just as above,
* except that we know IA64's register stack grows up.
*
* Note we assume that the same max_stack_depth applies to both stacks.
*/
#if defined(__ia64__) || defined(__ia64)
stack_depth = (long) (ia64_get_bsp() - register_stack_base_ptr);
if (stack_depth > max_stack_depth_bytes &&
register_stack_base_ptr != NULL)
return true;
2011-04-10 11:42:00 -04:00
#endif /* IA64 */
return false;
}
/* GUC check hook for max_stack_depth */
bool
check_max_stack_depth(int *newval, void **extra, GucSource source)
{
long newval_bytes = *newval * 1024L;
long stack_rlimit = get_stack_depth_rlimit();
if (stack_rlimit > 0 && newval_bytes > stack_rlimit - STACK_DEPTH_SLOP)
{
GUC_check_errdetail("\"max_stack_depth\" must not exceed %ldkB.",
(stack_rlimit - STACK_DEPTH_SLOP) / 1024L);
GUC_check_errhint("Increase the platform's stack depth limit via \"ulimit -s\" or local equivalent.");
return false;
}
return true;
}
/* GUC assign hook for max_stack_depth */
void
assign_max_stack_depth(int newval, void *extra)
{
long newval_bytes = newval * 1024L;
max_stack_depth_bytes = newval_bytes;
}
/*
* set_debug_options --- apply "-d N" command line option
*
* -d is not quite the same as setting log_min_messages because it enables
* other output options.
*/
void
set_debug_options(int debug_flag, GucContext context, GucSource source)
{
if (debug_flag > 0)
{
char debugstr[64];
sprintf(debugstr, "debug%d", debug_flag);
SetConfigOption("log_min_messages", debugstr, context, source);
}
else
SetConfigOption("log_min_messages", "notice", context, source);
if (debug_flag >= 1 && context == PGC_POSTMASTER)
{
SetConfigOption("log_connections", "true", context, source);
SetConfigOption("log_disconnections", "true", context, source);
}
if (debug_flag >= 2)
SetConfigOption("log_statement", "all", context, source);
if (debug_flag >= 3)
SetConfigOption("debug_print_parse", "true", context, source);
if (debug_flag >= 4)
SetConfigOption("debug_print_plan", "true", context, source);
if (debug_flag >= 5)
SetConfigOption("debug_print_rewritten", "true", context, source);
}
bool
set_plan_disabling_options(const char *arg, GucContext context, GucSource source)
{
const char *tmp = NULL;
switch (arg[0])
{
2006-10-04 00:30:14 +00:00
case 's': /* seqscan */
tmp = "enable_seqscan";
break;
2006-10-04 00:30:14 +00:00
case 'i': /* indexscan */
tmp = "enable_indexscan";
break;
case 'o': /* indexonlyscan */
tmp = "enable_indexonlyscan";
break;
2006-10-04 00:30:14 +00:00
case 'b': /* bitmapscan */
tmp = "enable_bitmapscan";
break;
2006-10-04 00:30:14 +00:00
case 't': /* tidscan */
tmp = "enable_tidscan";
break;
2006-10-04 00:30:14 +00:00
case 'n': /* nestloop */
tmp = "enable_nestloop";
break;
2006-10-04 00:30:14 +00:00
case 'm': /* mergejoin */
tmp = "enable_mergejoin";
break;
2006-10-04 00:30:14 +00:00
case 'h': /* hashjoin */
tmp = "enable_hashjoin";
break;
}
if (tmp)
{
SetConfigOption(tmp, "false", context, source);
return true;
}
else
return false;
}
const char *
get_stats_option_name(const char *arg)
{
switch (arg[0])
{
case 'p':
2006-10-04 00:30:14 +00:00
if (optarg[1] == 'a') /* "parser" */
return "log_parser_stats";
2006-10-04 00:30:14 +00:00
else if (optarg[1] == 'l') /* "planner" */
return "log_planner_stats";
break;
2006-10-04 00:30:14 +00:00
case 'e': /* "executor" */
return "log_executor_stats";
break;
}
return NULL;
}
/* ----------------------------------------------------------------
* process_postgres_switches
* Parse command line arguments for PostgresMain
*
* This is called twice, once for the "secure" options coming from the
* postmaster or command line, and once for the "insecure" options coming
* from the client's startup packet. The latter have the same syntax but
* may be restricted in what they can do.
*
* argv[0] is ignored in either case (it's assumed to be the program name).
*
* ctx is PGC_POSTMASTER for secure options, PGC_BACKEND for insecure options
* coming from the client, or PGC_SU_BACKEND for insecure options coming from
* a superuser client.
*
* If a database name is present in the command line arguments, it's
* returned into *dbname (this is allowed only if *dbname is initially NULL).
* ----------------------------------------------------------------
*/
void
process_postgres_switches(int argc, char *argv[], GucContext ctx,
const char **dbname)
{
bool secure = (ctx == PGC_POSTMASTER);
int errs = 0;
GucSource gucsource;
int flag;
if (secure)
{
2010-02-26 02:01:40 +00:00
gucsource = PGC_S_ARGV; /* switches came from command line */
/* Ignore the initial --single argument, if present */
if (argc > 1 && strcmp(argv[1], "--single") == 0)
{
argv++;
argc--;
}
}
else
{
gucsource = PGC_S_CLIENT; /* switches came from client */
}
#ifdef HAVE_INT_OPTERR
/*
* Turn this off because it's either printed to stderr and not the log
* where we'd want it, or argv[0] is now "--single", which would make for
* a weird error message. We print our own error message below.
*/
opterr = 0;
#endif
/*
* Parse command-line options. CAUTION: keep this in sync with
2007-11-15 21:14:46 +00:00
* postmaster/postmaster.c (the option sets should not conflict) and with
* the common help() function in main/main.c.
*/
while ((flag = getopt(argc, argv, "B:bc:C:D:d:EeFf:h:ijk:lN:nOo:Pp:r:S:sTt:v:W:-:")) != -1)
{
switch (flag)
{
case 'B':
SetConfigOption("shared_buffers", optarg, ctx, gucsource);
break;
case 'b':
/* Undocumented flag used for binary upgrades */
if (secure)
IsBinaryUpgrade = true;
break;
case 'C':
/* ignored for consistency with the postmaster */
break;
case 'D':
if (secure)
userDoption = strdup(optarg);
break;
case 'd':
set_debug_options(atoi(optarg), ctx, gucsource);
break;
case 'E':
if (secure)
EchoQuery = true;
break;
case 'e':
SetConfigOption("datestyle", "euro", ctx, gucsource);
break;
case 'F':
SetConfigOption("fsync", "false", ctx, gucsource);
break;
case 'f':
if (!set_plan_disabling_options(optarg, ctx, gucsource))
errs++;
break;
case 'h':
SetConfigOption("listen_addresses", optarg, ctx, gucsource);
break;
case 'i':
SetConfigOption("listen_addresses", "*", ctx, gucsource);
break;
case 'j':
if (secure)
Adjust behavior of single-user -j mode for better initdb error reporting. Previously, -j caused the entire input file to be read in and executed as a single command string. That's undesirable, not least because any error causes the entire file to be regurgitated as the "failing query". Some experimentation suggests a better rule: end the command string when we see a semicolon immediately followed by two newlines, ie, an empty line after a query. This serves nicely to break up the existing examples such as information_schema.sql and system_views.sql. A limitation is that it's no longer possible to write such a sequence within a string literal or multiline comment in a file meant to be read with -j; but there are no instances of such a problem within the data currently used by initdb. (If someone does make such a mistake in future, it'll be obvious because they'll get an unterminated-literal or unterminated-comment syntax error.) Other than that, there shouldn't be any negative consequences; you're not forced to end statements that way, it's just a better idea in most cases. In passing, remove src/include/tcop/tcopdebug.h, which is dead code because it's not included anywhere, and hasn't been for more than ten years. One of the debug-support symbols it purported to describe has been unreferenced for at least the same amount of time, and the other is removed by this commit on the grounds that it was useless: forcing -j mode all the time would have broken initdb. The lack of complaints about that, or about the missing inclusion, shows that no one has tried to use TCOP_DONTUSENEWLINE in many years.
2015-12-17 19:34:15 -05:00
UseSemiNewlineNewline = true;
break;
case 'k':
SetConfigOption("unix_socket_directories", optarg, ctx, gucsource);
break;
case 'l':
SetConfigOption("ssl", "true", ctx, gucsource);
break;
case 'N':
SetConfigOption("max_connections", optarg, ctx, gucsource);
break;
case 'n':
/* ignored for consistency with postmaster */
break;
case 'O':
SetConfigOption("allow_system_table_mods", "true", ctx, gucsource);
2000-02-18 09:30:20 +00:00
break;
1999-05-01 17:16:25 +00:00
case 'o':
errs++;
break;
case 'P':
SetConfigOption("ignore_system_indexes", "true", ctx, gucsource);
1999-05-01 17:16:25 +00:00
break;
case 'p':
SetConfigOption("port", optarg, ctx, gucsource);
break;
2004-08-29 05:07:03 +00:00
case 'r':
/* send output (stdout and stderr) to the given file */
if (secure)
2007-02-10 14:58:55 +00:00
strlcpy(OutputFileName, optarg, MAXPGPATH);
break;
case 'S':
SetConfigOption("work_mem", optarg, ctx, gucsource);
break;
case 's':
SetConfigOption("log_statement_stats", "true", ctx, gucsource);
break;
case 'T':
/* ignored for consistency with the postmaster */
break;
case 't':
{
2006-10-04 00:30:14 +00:00
const char *tmp = get_stats_option_name(optarg);
if (tmp)
SetConfigOption(tmp, "true", ctx, gucsource);
else
2006-10-04 00:30:14 +00:00
errs++;
break;
}
case 'v':
2010-02-26 02:01:40 +00:00
/*
* -v is no longer used in normal operation, since
2010-02-26 02:01:40 +00:00
* FrontendProtocol is already set before we get here. We keep
* the switch only for possible use in standalone operation,
* in case we ever support using normal FE/BE protocol with a
* standalone backend.
*/
if (secure)
FrontendProtocol = (ProtocolVersion) atoi(optarg);
break;
case 'W':
SetConfigOption("post_auth_delay", optarg, ctx, gucsource);
break;
case 'c':
case '-':
{
2001-03-22 04:01:46 +00:00
char *name,
*value;
2001-03-22 04:01:46 +00:00
ParseLongOption(optarg, &name, &value);
if (!value)
{
if (flag == '-')
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("--%s requires a value",
optarg)));
2001-03-22 04:01:46 +00:00
else
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("-c %s requires a value",
optarg)));
2001-03-22 04:01:46 +00:00
}
SetConfigOption(name, value, ctx, gucsource);
2001-03-22 04:01:46 +00:00
free(name);
if (value)
free(value);
break;
}
default:
errs++;
1999-05-01 17:16:25 +00:00
break;
}
if (errs)
break;
}
/*
* Optional database name should be there only if *dbname is NULL.
*/
if (!errs && dbname && *dbname == NULL && argc - optind >= 1)
*dbname = strdup(argv[optind++]);
if (errs || argc != optind)
{
if (errs)
optind--; /* complain about the previous argument */
/* spell the error message a bit differently depending on context */
if (IsUnderPostmaster)
ereport(FATAL,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("invalid command-line argument for server process: %s", argv[optind]),
2010-02-26 02:01:40 +00:00
errhint("Try \"%s --help\" for more information.", progname)));
else
ereport(FATAL,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("%s: invalid command-line argument: %s",
progname, argv[optind]),
2010-02-26 02:01:40 +00:00
errhint("Try \"%s --help\" for more information.", progname)));
}
/*
* Reset getopt(3) library so that it will work correctly in subprocesses
* or when this function is called a second time with another array.
*/
optind = 1;
#ifdef HAVE_INT_OPTRESET
optreset = 1; /* some systems need this too */
#endif
}
/* ----------------------------------------------------------------
* PostgresMain
* postgres main loop -- all backends, interactive or otherwise start here
*
* argc/argv are the command line arguments to be used. (When being forked
* by the postmaster, these are not the original argv array of the process.)
* dbname is the name of the database to connect to, or NULL if the database
* name should be extracted from the command line arguments or defaulted.
* username is the PostgreSQL user name to be used for the session.
* ----------------------------------------------------------------
*/
void
PostgresMain(int argc, char *argv[],
const char *dbname,
const char *username)
{
int firstchar;
StringInfoData input_message;
sigjmp_buf local_sigjmp_buf;
volatile bool send_ready_for_query = true;
bool disable_idle_in_transaction_timeout = false;
/* Initialize startup process environment if necessary. */
if (!IsUnderPostmaster)
InitStandaloneProcess(argv[0]);
SetProcessingMode(InitProcessing);
/*
* Set default values for command-line options.
*/
if (!IsUnderPostmaster)
InitializeGUCOptions();
/*
* Parse command-line options.
*/
process_postgres_switches(argc, argv, PGC_POSTMASTER, &dbname);
/* Must have gotten a database name, or have a default (the username) */
if (dbname == NULL)
{
dbname = username;
if (dbname == NULL)
ereport(FATAL,
2010-02-26 02:01:40 +00:00
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("%s: no database nor user name specified",
progname)));
}
/* Acquire configuration parameters, unless inherited from postmaster */
if (!IsUnderPostmaster)
{
if (!SelectConfigFiles(userDoption, progname))
proc_exit(1);
}
/*
* Set up signal handlers and masks.
*
* Note that postmaster blocked all signals before forking child process,
* so there is no race condition whereby we might receive a signal before
* we have set up the handler.
XLOG (and related) changes: * Store two past checkpoint locations, not just one, in pg_control. On startup, we fall back to the older checkpoint if the newer one is unreadable. Also, a physical copy of the newest checkpoint record is kept in pg_control for possible use in disaster recovery (ie, complete loss of pg_xlog). Also add a version number for pg_control itself. Remove archdir from pg_control; it ought to be a GUC parameter, not a special case (not that it's implemented yet anyway). * Suppress successive checkpoint records when nothing has been entered in the WAL log since the last one. This is not so much to avoid I/O as to make it actually useful to keep track of the last two checkpoints. If the things are right next to each other then there's not a lot of redundancy gained... * Change CRC scheme to a true 64-bit CRC, not a pair of 32-bit CRCs on alternate bytes. Polynomial borrowed from ECMA DLT1 standard. * Fix XLOG record length handling so that it will work at BLCKSZ = 32k. * Change XID allocation to work more like OID allocation. (This is of dubious necessity, but I think it's a good idea anyway.) * Fix a number of minor bugs, such as off-by-one logic for XLOG file wraparound at the 4 gig mark. * Add documentation and clean up some coding infelicities; move file format declarations out to include files where planned contrib utilities can get at them. * Checkpoint will now occur every CHECKPOINT_SEGMENTS log segments or every CHECKPOINT_TIMEOUT seconds, whichever comes first. It is also possible to force a checkpoint by sending SIGUSR1 to the postmaster (undocumented feature...) * Defend against kill -9 postmaster by storing shmem block's key and ID in postmaster.pid lockfile, and checking at startup to ensure that no processes are still connected to old shmem block (if it still exists). * Switch backends to accept SIGQUIT rather than SIGUSR1 for emergency stop, for symmetry with postmaster and xlog utilities. Clean up signal handling in bootstrap.c so that xlog utilities launched by postmaster will react to signals better. * Standalone bootstrap now grabs lockfile in target directory, as added insurance against running it in parallel with live postmaster.
2001-03-13 01:17:06 +00:00
*
2005-10-15 02:49:52 +00:00
* Also note: it's best not to use any signals that are SIG_IGNored in the
* postmaster. If such a signal arrives before we are able to change the
2005-10-15 02:49:52 +00:00
* handler to non-SIG_IGN, it'll get dropped. Instead, make a dummy
* handler in the postmaster to reserve the signal. (Of course, this isn't
* an issue for signals that are locally generated, such as SIGALRM and
* SIGPIPE.)
*/
if (am_walsender)
WalSndSignals();
else
{
2010-02-26 02:01:40 +00:00
pqsignal(SIGHUP, SigHupHandler); /* set flag to read config
* file */
pqsignal(SIGINT, StatementCancelHandler); /* cancel current query */
pqsignal(SIGTERM, die); /* cancel current query and exit */
/*
* In a standalone backend, SIGQUIT can be generated from the keyboard
2010-02-26 02:01:40 +00:00
* easily, while SIGTERM cannot, so we make both signals do die()
* rather than quickdie().
*/
if (IsUnderPostmaster)
2010-02-26 02:01:40 +00:00
pqsignal(SIGQUIT, quickdie); /* hard crash time */
else
2010-02-26 02:01:40 +00:00
pqsignal(SIGQUIT, die); /* cancel current query and exit */
InitializeTimeouts(); /* establishes SIGALRM handler */
/*
* Ignore failure to write to frontend. Note: if frontend closes
* connection, we will notice it and exit cleanly when control next
2010-02-26 02:01:40 +00:00
* returns to outer loop. This seems safer than forcing exit in the
* midst of output during who-knows-what operation...
*/
pqsignal(SIGPIPE, SIG_IGN);
pqsignal(SIGUSR1, procsignal_sigusr1_handler);
pqsignal(SIGUSR2, SIG_IGN);
pqsignal(SIGFPE, FloatExceptionHandler);
/*
2010-02-26 02:01:40 +00:00
* Reset some signals that are accepted by postmaster but not by
* backend
*/
2010-02-26 02:01:40 +00:00
pqsignal(SIGCHLD, SIG_DFL); /* system() requires this on some
* platforms */
}
pqinitmask();
if (IsUnderPostmaster)
{
/* We allow SIGQUIT (quickdie) at all times */
sigdelset(&BlockSig, SIGQUIT);
}
XLOG (and related) changes: * Store two past checkpoint locations, not just one, in pg_control. On startup, we fall back to the older checkpoint if the newer one is unreadable. Also, a physical copy of the newest checkpoint record is kept in pg_control for possible use in disaster recovery (ie, complete loss of pg_xlog). Also add a version number for pg_control itself. Remove archdir from pg_control; it ought to be a GUC parameter, not a special case (not that it's implemented yet anyway). * Suppress successive checkpoint records when nothing has been entered in the WAL log since the last one. This is not so much to avoid I/O as to make it actually useful to keep track of the last two checkpoints. If the things are right next to each other then there's not a lot of redundancy gained... * Change CRC scheme to a true 64-bit CRC, not a pair of 32-bit CRCs on alternate bytes. Polynomial borrowed from ECMA DLT1 standard. * Fix XLOG record length handling so that it will work at BLCKSZ = 32k. * Change XID allocation to work more like OID allocation. (This is of dubious necessity, but I think it's a good idea anyway.) * Fix a number of minor bugs, such as off-by-one logic for XLOG file wraparound at the 4 gig mark. * Add documentation and clean up some coding infelicities; move file format declarations out to include files where planned contrib utilities can get at them. * Checkpoint will now occur every CHECKPOINT_SEGMENTS log segments or every CHECKPOINT_TIMEOUT seconds, whichever comes first. It is also possible to force a checkpoint by sending SIGUSR1 to the postmaster (undocumented feature...) * Defend against kill -9 postmaster by storing shmem block's key and ID in postmaster.pid lockfile, and checking at startup to ensure that no processes are still connected to old shmem block (if it still exists). * Switch backends to accept SIGQUIT rather than SIGUSR1 for emergency stop, for symmetry with postmaster and xlog utilities. Clean up signal handling in bootstrap.c so that xlog utilities launched by postmaster will react to signals better. * Standalone bootstrap now grabs lockfile in target directory, as added insurance against running it in parallel with live postmaster.
2001-03-13 01:17:06 +00:00
PG_SETMASK(&BlockSig); /* block everything except SIGQUIT */
if (!IsUnderPostmaster)
{
/*
2005-10-15 02:49:52 +00:00
* Validate we have been given a reasonable-looking DataDir (if under
* postmaster, assume postmaster did this already).
*/
Assert(DataDir);
ValidatePgVersion(DataDir);
/* Change into DataDir (if under postmaster, was done already) */
ChangeToDataDir();
2000-01-09 12:17:33 +00:00
/*
* Create lockfile for data directory.
2000-01-09 12:17:33 +00:00
*/
CreateDataDirLockFile(false);
/* Initialize MaxBackends (if under postmaster, was done already) */
InitializeMaxBackends();
}
/* Early initialization */
BaseInit();
/*
2006-10-04 00:30:14 +00:00
* Create a per-backend PGPROC struct in shared memory, except in the
* EXEC_BACKEND case where this was done in SubPostmasterMain. We must do
* this before we can use LWLocks (and in the EXEC_BACKEND case we already
* had to do some stuff with LWLocks).
*/
#ifdef EXEC_BACKEND
if (!IsUnderPostmaster)
InitProcess();
#else
InitProcess();
#endif
/* We need to allow SIGINT, etc during the initial transaction */
PG_SETMASK(&UnBlockSig);
/*
* General initialization.
*
* NOTE: if you are tempted to add code in this vicinity, consider putting
* it inside InitPostgres() instead. In particular, anything that
* involves database access should be there, not here.
*/
InitPostgres(dbname, InvalidOid, username, InvalidOid, NULL);
/*
* If the PostmasterContext is still around, recycle the space; we don't
* need it anymore after InitPostgres completes. Note this does not trash
* *MyProcPort, because ConnCreate() allocated that space with malloc()
* ... else we'd need to copy the Port data first. Also, subsidiary data
* such as the username isn't lost either; see ProcessStartupPacket().
*/
if (PostmasterContext)
{
MemoryContextDelete(PostmasterContext);
PostmasterContext = NULL;
}
SetProcessingMode(NormalProcessing);
/*
* Now all GUC states are fully set up. Report them to client if
* appropriate.
*/
BeginReportingGUCOptions();
/*
2005-10-15 02:49:52 +00:00
* Also set up handler to log session end; we have to wait till now to be
* sure Log_disconnections has its final value.
*/
if (IsUnderPostmaster && Log_disconnections)
on_proc_exit(log_disconnections, 0);
/* Perform initialization specific to a WAL sender process. */
if (am_walsender)
InitWalSender();
/*
2006-10-04 00:30:14 +00:00
* process any libraries that should be preloaded at backend start (this
* likewise can't be done until GUC settings are complete)
*/
process_session_preload_libraries();
/*
* Send this backend's cancellation info to the frontend.
*/
if (whereToSendOutput == DestRemote &&
PG_PROTOCOL_MAJOR(FrontendProtocol) >= 2)
{
StringInfoData buf;
1999-05-25 16:15:34 +00:00
pq_beginmessage(&buf, 'K');
pq_sendint(&buf, (int32) MyProcPid, sizeof(int32));
pq_sendint(&buf, (int32) MyCancelKey, sizeof(int32));
pq_endmessage(&buf);
/* Need not flush since ReadyForQuery will do it. */
}
/* Welcome banner for standalone case */
if (whereToSendOutput == DestDebug)
printf("\nPostgreSQL stand-alone backend %s\n", PG_VERSION);
/*
* Create the memory context we will use in the main loop.
*
* MessageContext is reset once per iteration of the main loop, ie, upon
* completion of processing of each command message from the client.
*/
MessageContext = AllocSetContextCreate(TopMemoryContext,
"MessageContext",
ALLOCSET_DEFAULT_MINSIZE,
ALLOCSET_DEFAULT_INITSIZE,
ALLOCSET_DEFAULT_MAXSIZE);
/*
* Remember stand-alone backend startup time
*/
if (!IsUnderPostmaster)
PgStartTime = GetCurrentTimestamp();
/*
* POSTGRES main processing loop begins here
*
2005-10-15 02:49:52 +00:00
* If an exception is encountered, processing resumes here so we abort the
* current transaction and start a new one.
*
* You might wonder why this isn't coded as an infinite loop around a
* PG_TRY construct. The reason is that this is the bottom of the
* exception stack, and so with PG_TRY there would be no exception handler
* in force at all during the CATCH part. By leaving the outermost setjmp
* always active, we have at least some chance of recovering from an error
* during error recovery. (If we get into an infinite loop thereby, it
* will soon be stopped by overflow of elog.c's internal state stack.)
Fix assorted race conditions in the new timeout infrastructure. Prevent handle_sig_alarm from losing control partway through due to a query cancel (either an asynchronous SIGINT, or a cancel triggered by one of the timeout handler functions). That would at least result in failure to schedule any required future interrupt, and might result in actual corruption of timeout.c's data structures, if the interrupt happened while we were updating those. We could still lose control if an asynchronous SIGINT arrives just as the function is entered. This wouldn't break any data structures, but it would have the same effect as if the SIGALRM interrupt had been silently lost: we'd not fire any currently-due handlers, nor schedule any new interrupt. To forestall that scenario, forcibly reschedule any pending timer interrupt during AbortTransaction and AbortSubTransaction. We can avoid any extra kernel call in most cases by not doing that until we've allowed LockErrorCleanup to kill the DEADLOCK_TIMEOUT and LOCK_TIMEOUT events. Another hazard is that some platforms (at least Linux and *BSD) block a signal before calling its handler and then unblock it on return. When we longjmp out of the handler, the unblock doesn't happen, and the signal is left blocked indefinitely. Again, we can fix that by forcibly unblocking signals during AbortTransaction and AbortSubTransaction. These latter two problems do not manifest when the longjmp reaches postgres.c, because the error recovery code there kills all pending timeout events anyway, and it uses sigsetjmp(..., 1) so that the appropriate signal mask is restored. So errors thrown outside any transaction should be OK already, and cleaning up in AbortTransaction and AbortSubTransaction should be enough to fix these issues. (We're assuming that any code that catches a query cancel error and doesn't re-throw it will do at least a subtransaction abort to clean up; but that was pretty much required already by other subsystems.) Lastly, ProcSleep should not clear the LOCK_TIMEOUT indicator flag when disabling that event: if a lock timeout interrupt happened after the lock was granted, the ensuing query cancel is still going to happen at the next CHECK_FOR_INTERRUPTS, and we want to report it as a lock timeout not a user cancel. Per reports from Dan Wood. Back-patch to 9.3 where the new timeout handling infrastructure was introduced. We may at some point decide to back-patch the signal unblocking changes further, but I'll desist from that until we hear actual field complaints about it.
2013-11-29 16:41:00 -05:00
*
* Note that we use sigsetjmp(..., 1), so that this function's signal mask
* (to wit, UnBlockSig) will be restored when longjmp'ing to here. This
* is essential in case we longjmp'd out of a signal handler on a platform
* where that leaves the signal blocked. It's not redundant with the
* unblock in AbortTransaction() because the latter is only called if we
* were inside a transaction.
*/
if (sigsetjmp(local_sigjmp_buf, 1) != 0)
{
/*
* NOTE: if you are tempted to add more code in this if-block,
* consider the high probability that it should be in
* AbortTransaction() instead. The only stuff done directly here
2005-10-15 02:49:52 +00:00
* should be stuff that is guaranteed to apply *only* for outer-level
* error recovery, such as adjusting the FE/BE protocol status.
*/
/* Since not using PG_TRY, must reset error stack by hand */
error_context_stack = NULL;
/* Prevent interrupts while cleaning up */
HOLD_INTERRUPTS();
/*
2005-10-15 02:49:52 +00:00
* Forget any pending QueryCancel request, since we're returning to
Fix assorted race conditions in the new timeout infrastructure. Prevent handle_sig_alarm from losing control partway through due to a query cancel (either an asynchronous SIGINT, or a cancel triggered by one of the timeout handler functions). That would at least result in failure to schedule any required future interrupt, and might result in actual corruption of timeout.c's data structures, if the interrupt happened while we were updating those. We could still lose control if an asynchronous SIGINT arrives just as the function is entered. This wouldn't break any data structures, but it would have the same effect as if the SIGALRM interrupt had been silently lost: we'd not fire any currently-due handlers, nor schedule any new interrupt. To forestall that scenario, forcibly reschedule any pending timer interrupt during AbortTransaction and AbortSubTransaction. We can avoid any extra kernel call in most cases by not doing that until we've allowed LockErrorCleanup to kill the DEADLOCK_TIMEOUT and LOCK_TIMEOUT events. Another hazard is that some platforms (at least Linux and *BSD) block a signal before calling its handler and then unblock it on return. When we longjmp out of the handler, the unblock doesn't happen, and the signal is left blocked indefinitely. Again, we can fix that by forcibly unblocking signals during AbortTransaction and AbortSubTransaction. These latter two problems do not manifest when the longjmp reaches postgres.c, because the error recovery code there kills all pending timeout events anyway, and it uses sigsetjmp(..., 1) so that the appropriate signal mask is restored. So errors thrown outside any transaction should be OK already, and cleaning up in AbortTransaction and AbortSubTransaction should be enough to fix these issues. (We're assuming that any code that catches a query cancel error and doesn't re-throw it will do at least a subtransaction abort to clean up; but that was pretty much required already by other subsystems.) Lastly, ProcSleep should not clear the LOCK_TIMEOUT indicator flag when disabling that event: if a lock timeout interrupt happened after the lock was granted, the ensuing query cancel is still going to happen at the next CHECK_FOR_INTERRUPTS, and we want to report it as a lock timeout not a user cancel. Per reports from Dan Wood. Back-patch to 9.3 where the new timeout handling infrastructure was introduced. We may at some point decide to back-patch the signal unblocking changes further, but I'll desist from that until we hear actual field complaints about it.
2013-11-29 16:41:00 -05:00
* the idle loop anyway, and cancel any active timeout requests. (In
* future we might want to allow some timeout requests to survive, but
* at minimum it'd be necessary to do reschedule_timeouts(), in case
* we got here because of a query cancel interrupting the SIGALRM
* interrupt handler.) Note in particular that we must clear the
* statement and lock timeout indicators, to prevent any future plain
* query cancels from being misreported as timeouts in case we're
* forgetting a timeout cancel.
*/
disable_all_timeouts(false);
Fix assorted race conditions in the new timeout infrastructure. Prevent handle_sig_alarm from losing control partway through due to a query cancel (either an asynchronous SIGINT, or a cancel triggered by one of the timeout handler functions). That would at least result in failure to schedule any required future interrupt, and might result in actual corruption of timeout.c's data structures, if the interrupt happened while we were updating those. We could still lose control if an asynchronous SIGINT arrives just as the function is entered. This wouldn't break any data structures, but it would have the same effect as if the SIGALRM interrupt had been silently lost: we'd not fire any currently-due handlers, nor schedule any new interrupt. To forestall that scenario, forcibly reschedule any pending timer interrupt during AbortTransaction and AbortSubTransaction. We can avoid any extra kernel call in most cases by not doing that until we've allowed LockErrorCleanup to kill the DEADLOCK_TIMEOUT and LOCK_TIMEOUT events. Another hazard is that some platforms (at least Linux and *BSD) block a signal before calling its handler and then unblock it on return. When we longjmp out of the handler, the unblock doesn't happen, and the signal is left blocked indefinitely. Again, we can fix that by forcibly unblocking signals during AbortTransaction and AbortSubTransaction. These latter two problems do not manifest when the longjmp reaches postgres.c, because the error recovery code there kills all pending timeout events anyway, and it uses sigsetjmp(..., 1) so that the appropriate signal mask is restored. So errors thrown outside any transaction should be OK already, and cleaning up in AbortTransaction and AbortSubTransaction should be enough to fix these issues. (We're assuming that any code that catches a query cancel error and doesn't re-throw it will do at least a subtransaction abort to clean up; but that was pretty much required already by other subsystems.) Lastly, ProcSleep should not clear the LOCK_TIMEOUT indicator flag when disabling that event: if a lock timeout interrupt happened after the lock was granted, the ensuing query cancel is still going to happen at the next CHECK_FOR_INTERRUPTS, and we want to report it as a lock timeout not a user cancel. Per reports from Dan Wood. Back-patch to 9.3 where the new timeout handling infrastructure was introduced. We may at some point decide to back-patch the signal unblocking changes further, but I'll desist from that until we hear actual field complaints about it.
2013-11-29 16:41:00 -05:00
QueryCancelPending = false; /* second to avoid race condition */
Introduce and use infrastructure for interrupt processing during client reads. Up to now large swathes of backend code ran inside signal handlers while reading commands from the client, to allow for speedy reaction to asynchronous events. Most prominently shared invalidation and NOTIFY handling. That means that complex code like the starting/stopping of transactions is run in signal handlers... The required code was fragile and verbose, and is likely to contain bugs. That approach also severely limited what could be done while communicating with the client. As the read might be from within openssl it wasn't safely possible to trigger an error, e.g. to cancel a backend in idle-in-transaction state. We did that in some cases, namely fatal errors, nonetheless. Now that FE/BE communication in the backend employs non-blocking sockets and latches to block, we can quite simply interrupt reads from signal handlers by setting the latch. That allows us to signal an interrupted read, which is supposed to be retried after returning from within the ssl library. As signal handlers now only need to set the latch to guarantee timely interrupt processing, remove a fair amount of complicated & fragile code from async.c and sinval.c. We could now actually start to process some kinds of interrupts, like sinval ones, more often that before, but that seems better done separately. This work will hopefully allow to handle cases like being blocked by sending data, interrupting idle transactions and similar to be implemented without too much effort. In addition to allowing getting rid of ImmediateInterruptOK, that is. Author: Andres Freund Reviewed-By: Heikki Linnakangas
2015-02-03 22:25:20 +01:00
/* Not reading from the client anymore. */
DoingCommandRead = false;
/* Make sure libpq is in a good state */
pq_comm_reset();
/* Report the error to the client and/or server log */
EmitErrorReport();
/*
2005-10-15 02:49:52 +00:00
* Make sure debug_query_string gets reset before we possibly clobber
* the storage it points at.
*/
debug_query_string = NULL;
/*
* Abort the current transaction in order to recover.
*/
AbortCurrentTransaction();
if (am_walsender)
WalSndErrorCleanup();
/*
* We can't release replication slots inside AbortTransaction() as we
* need to be able to start and abort transactions while having a slot
* acquired. But we never need to hold them across top level errors,
* so releasing here is fine. There's another cleanup in ProcKill()
* ensuring we'll correctly cleanup on FATAL errors as well.
*/
if (MyReplicationSlot != NULL)
ReplicationSlotRelease();
/*
2005-10-15 02:49:52 +00:00
* Now return to normal top-level context and clear ErrorContext for
* next time.
*/
MemoryContextSwitchTo(TopMemoryContext);
FlushErrorState();
/*
2005-10-15 02:49:52 +00:00
* If we were handling an extended-query-protocol message, initiate
* skip till next Sync. This also causes us not to issue
2003-08-04 00:43:34 +00:00
* ReadyForQuery (until we get Sync).
*/
if (doing_extended_query_message)
ignore_till_sync = true;
/* We don't have a transaction command open anymore */
xact_started = false;
Be more careful to not lose sync in the FE/BE protocol. If any error occurred while we were in the middle of reading a protocol message from the client, we could lose sync, and incorrectly try to interpret a part of another message as a new protocol message. That will usually lead to an "invalid frontend message" error that terminates the connection. However, this is a security issue because an attacker might be able to deliberately cause an error, inject a Query message in what's supposed to be just user data, and have the server execute it. We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other operations that could ereport(ERROR) in the middle of processing a message, but a query cancel interrupt or statement timeout could nevertheless cause it to happen. Also, the V2 fastpath and COPY handling were not so careful. It's very difficult to recover in the V2 COPY protocol, so we will just terminate the connection on error. In practice, that's what happened previously anyway, as we lost protocol sync. To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set whenever we're in the middle of reading a message. When it's set, we cannot safely ERROR out and continue running, because we might've read only part of a message. PqCommReadingMsg acts somewhat similarly to critical sections in that if an error occurs while it's set, the error handler will force the connection to be terminated, as if the error was FATAL. It's not implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted to PANIC in critical sections, because we want to be able to use PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes advantage of that to prevent an OOM error from terminating the connection. To prevent unnecessary connection terminations, add a holdoff mechanism similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel interrupts, but still allow die interrupts. The rules on which interrupts are processed when are now a bit more complicated, so refactor ProcessInterrupts() and the calls to it in signal handlers so that the signal handlers always call it if ImmediateInterruptOK is set, and ProcessInterrupts() can decide to not do anything if the other conditions are not met. Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund. Backpatch to all supported versions. Security: CVE-2015-0244
2015-02-02 17:08:45 +02:00
/*
* If an error occurred while we were reading a message from the
* client, we have potentially lost track of where the previous
* message ends and the next one begins. Even though we have
* otherwise recovered from the error, we cannot safely read any more
* messages from the client, so there isn't much we can do with the
* connection anymore.
*/
if (pq_is_reading_msg())
ereport(FATAL,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
Adjust behavior of single-user -j mode for better initdb error reporting. Previously, -j caused the entire input file to be read in and executed as a single command string. That's undesirable, not least because any error causes the entire file to be regurgitated as the "failing query". Some experimentation suggests a better rule: end the command string when we see a semicolon immediately followed by two newlines, ie, an empty line after a query. This serves nicely to break up the existing examples such as information_schema.sql and system_views.sql. A limitation is that it's no longer possible to write such a sequence within a string literal or multiline comment in a file meant to be read with -j; but there are no instances of such a problem within the data currently used by initdb. (If someone does make such a mistake in future, it'll be obvious because they'll get an unterminated-literal or unterminated-comment syntax error.) Other than that, there shouldn't be any negative consequences; you're not forced to end statements that way, it's just a better idea in most cases. In passing, remove src/include/tcop/tcopdebug.h, which is dead code because it's not included anywhere, and hasn't been for more than ten years. One of the debug-support symbols it purported to describe has been unreferenced for at least the same amount of time, and the other is removed by this commit on the grounds that it was useless: forcing -j mode all the time would have broken initdb. The lack of complaints about that, or about the missing inclusion, shows that no one has tried to use TCOP_DONTUSENEWLINE in many years.
2015-12-17 19:34:15 -05:00
errmsg("terminating connection because protocol synchronization was lost")));
Be more careful to not lose sync in the FE/BE protocol. If any error occurred while we were in the middle of reading a protocol message from the client, we could lose sync, and incorrectly try to interpret a part of another message as a new protocol message. That will usually lead to an "invalid frontend message" error that terminates the connection. However, this is a security issue because an attacker might be able to deliberately cause an error, inject a Query message in what's supposed to be just user data, and have the server execute it. We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other operations that could ereport(ERROR) in the middle of processing a message, but a query cancel interrupt or statement timeout could nevertheless cause it to happen. Also, the V2 fastpath and COPY handling were not so careful. It's very difficult to recover in the V2 COPY protocol, so we will just terminate the connection on error. In practice, that's what happened previously anyway, as we lost protocol sync. To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set whenever we're in the middle of reading a message. When it's set, we cannot safely ERROR out and continue running, because we might've read only part of a message. PqCommReadingMsg acts somewhat similarly to critical sections in that if an error occurs while it's set, the error handler will force the connection to be terminated, as if the error was FATAL. It's not implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted to PANIC in critical sections, because we want to be able to use PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes advantage of that to prevent an OOM error from terminating the connection. To prevent unnecessary connection terminations, add a holdoff mechanism similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel interrupts, but still allow die interrupts. The rules on which interrupts are processed when are now a bit more complicated, so refactor ProcessInterrupts() and the calls to it in signal handlers so that the signal handlers always call it if ImmediateInterruptOK is set, and ProcessInterrupts() can decide to not do anything if the other conditions are not met. Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund. Backpatch to all supported versions. Security: CVE-2015-0244
2015-02-02 17:08:45 +02:00
/* Now we can allow interrupts again */
RESUME_INTERRUPTS();
}
/* We can now handle ereport(ERROR) */
PG_exception_stack = &local_sigjmp_buf;
if (!ignore_till_sync)
2006-10-04 00:30:14 +00:00
send_ready_for_query = true; /* initially, or after error */
/*
* Non-error queries loop here.
*/
for (;;)
{
/*
2003-08-04 00:43:34 +00:00
* At top of loop, reset extended-query-message flag, so that any
* errors encountered in "idle" state don't provoke skip.
*/
doing_extended_query_message = false;
/*
2005-10-15 02:49:52 +00:00
* Release storage left over from prior query cycle, and create a new
* query input buffer in the cleared MessageContext.
*/
MemoryContextSwitchTo(MessageContext);
MemoryContextResetAndDeleteChildren(MessageContext);
initStringInfo(&input_message);
/*
2005-10-15 02:49:52 +00:00
* (1) If we've reached idle state, tell the frontend we're ready for
* a new query.
*
* Note: this includes fflush()'ing the last of the prior output.
*
* This is also a good time to send collected statistics to the
* collector, and to update the PS stats display. We avoid doing
2005-10-15 02:49:52 +00:00
* those every time through the message loop because it'd slow down
* processing of batched messages, and because we don't want to report
* uncommitted updates (that confuses autovacuum). The notification
* processor wants a call too, if we are not in a transaction block.
*/
if (send_ready_for_query)
{
if (IsAbortedTransactionBlockState())
{
set_ps_display("idle in transaction (aborted)", false);
pgstat_report_activity(STATE_IDLEINTRANSACTION_ABORTED, NULL);
/* Start the idle-in-transaction timer */
if (IdleInTransactionSessionTimeout > 0)
{
disable_idle_in_transaction_timeout = true;
enable_timeout_after(IDLE_IN_TRANSACTION_SESSION_TIMEOUT,
IdleInTransactionSessionTimeout);
}
}
else if (IsTransactionOrTransactionBlock())
{
set_ps_display("idle in transaction", false);
pgstat_report_activity(STATE_IDLEINTRANSACTION, NULL);
/* Start the idle-in-transaction timer */
if (IdleInTransactionSessionTimeout > 0)
{
disable_idle_in_transaction_timeout = true;
enable_timeout_after(IDLE_IN_TRANSACTION_SESSION_TIMEOUT,
IdleInTransactionSessionTimeout);
}
}
else
{
ProcessCompletedNotifies();
pgstat_report_stat(false);
set_ps_display("idle", false);
pgstat_report_activity(STATE_IDLE, NULL);
}
ReadyForQuery(whereToSendOutput);
send_ready_for_query = false;
}
/*
2005-10-15 02:49:52 +00:00
* (2) Allow asynchronous signals to be executed immediately if they
* come in while we are waiting for client input. (This must be
* conditional since we don't want, say, reads on behalf of COPY FROM
* STDIN doing the same thing.)
*/
DoingCommandRead = true;
/*
* (3) read a command (loop blocks here)
*/
firstchar = ReadCommand(&input_message);
/*
* (4) disable async signal conditions again.
Be more careful to not lose sync in the FE/BE protocol. If any error occurred while we were in the middle of reading a protocol message from the client, we could lose sync, and incorrectly try to interpret a part of another message as a new protocol message. That will usually lead to an "invalid frontend message" error that terminates the connection. However, this is a security issue because an attacker might be able to deliberately cause an error, inject a Query message in what's supposed to be just user data, and have the server execute it. We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other operations that could ereport(ERROR) in the middle of processing a message, but a query cancel interrupt or statement timeout could nevertheless cause it to happen. Also, the V2 fastpath and COPY handling were not so careful. It's very difficult to recover in the V2 COPY protocol, so we will just terminate the connection on error. In practice, that's what happened previously anyway, as we lost protocol sync. To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set whenever we're in the middle of reading a message. When it's set, we cannot safely ERROR out and continue running, because we might've read only part of a message. PqCommReadingMsg acts somewhat similarly to critical sections in that if an error occurs while it's set, the error handler will force the connection to be terminated, as if the error was FATAL. It's not implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted to PANIC in critical sections, because we want to be able to use PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes advantage of that to prevent an OOM error from terminating the connection. To prevent unnecessary connection terminations, add a holdoff mechanism similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel interrupts, but still allow die interrupts. The rules on which interrupts are processed when are now a bit more complicated, so refactor ProcessInterrupts() and the calls to it in signal handlers so that the signal handlers always call it if ImmediateInterruptOK is set, and ProcessInterrupts() can decide to not do anything if the other conditions are not met. Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund. Backpatch to all supported versions. Security: CVE-2015-0244
2015-02-02 17:08:45 +02:00
*
* Query cancel is supposed to be a no-op when there is no query in
* progress, so if a query cancel arrived while we were idle, just
* reset QueryCancelPending. ProcessInterrupts() has that effect when
* it's called when DoingCommandRead is set, so check for interrupts
* before resetting DoingCommandRead.
*/
Be more careful to not lose sync in the FE/BE protocol. If any error occurred while we were in the middle of reading a protocol message from the client, we could lose sync, and incorrectly try to interpret a part of another message as a new protocol message. That will usually lead to an "invalid frontend message" error that terminates the connection. However, this is a security issue because an attacker might be able to deliberately cause an error, inject a Query message in what's supposed to be just user data, and have the server execute it. We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other operations that could ereport(ERROR) in the middle of processing a message, but a query cancel interrupt or statement timeout could nevertheless cause it to happen. Also, the V2 fastpath and COPY handling were not so careful. It's very difficult to recover in the V2 COPY protocol, so we will just terminate the connection on error. In practice, that's what happened previously anyway, as we lost protocol sync. To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set whenever we're in the middle of reading a message. When it's set, we cannot safely ERROR out and continue running, because we might've read only part of a message. PqCommReadingMsg acts somewhat similarly to critical sections in that if an error occurs while it's set, the error handler will force the connection to be terminated, as if the error was FATAL. It's not implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted to PANIC in critical sections, because we want to be able to use PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes advantage of that to prevent an OOM error from terminating the connection. To prevent unnecessary connection terminations, add a holdoff mechanism similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel interrupts, but still allow die interrupts. The rules on which interrupts are processed when are now a bit more complicated, so refactor ProcessInterrupts() and the calls to it in signal handlers so that the signal handlers always call it if ImmediateInterruptOK is set, and ProcessInterrupts() can decide to not do anything if the other conditions are not met. Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund. Backpatch to all supported versions. Security: CVE-2015-0244
2015-02-02 17:08:45 +02:00
CHECK_FOR_INTERRUPTS();
DoingCommandRead = false;
/*
* (5) turn off the idle-in-transaction timeout
*/
if (disable_idle_in_transaction_timeout)
{
disable_timeout(IDLE_IN_TRANSACTION_SESSION_TIMEOUT, false);
disable_idle_in_transaction_timeout = false;
}
/*
* (6) check for any other interesting events that happened while we
2005-10-15 02:49:52 +00:00
* slept.
*/
if (got_SIGHUP)
{
got_SIGHUP = false;
ProcessConfigFile(PGC_SIGHUP);
}
/*
* (7) process the command. But ignore it if we're skipping till
2003-08-04 00:43:34 +00:00
* Sync.
*/
if (ignore_till_sync && firstchar != EOF)
continue;
switch (firstchar)
{
case 'Q': /* simple query */
{
const char *query_string;
/* Set statement_timestamp() */
SetCurrentStatementStartTimestamp();
query_string = pq_getmsgstring(&input_message);
pq_getmsgend(&input_message);
if (am_walsender)
exec_replication_command(query_string);
else
exec_simple_query(query_string);
send_ready_for_query = true;
}
break;
case 'P': /* parse */
{
const char *stmt_name;
const char *query_string;
int numParams;
Oid *paramTypes = NULL;
forbidden_in_wal_sender(firstchar);
/* Set statement_timestamp() */
SetCurrentStatementStartTimestamp();
stmt_name = pq_getmsgstring(&input_message);
query_string = pq_getmsgstring(&input_message);
numParams = pq_getmsgint(&input_message, 2);
if (numParams > 0)
{
2003-08-04 00:43:34 +00:00
int i;
paramTypes = (Oid *) palloc(numParams * sizeof(Oid));
for (i = 0; i < numParams; i++)
paramTypes[i] = pq_getmsgint(&input_message, 4);
}
pq_getmsgend(&input_message);
exec_parse_message(query_string, stmt_name,
paramTypes, numParams);
}
break;
case 'B': /* bind */
forbidden_in_wal_sender(firstchar);
/* Set statement_timestamp() */
SetCurrentStatementStartTimestamp();
2003-08-04 00:43:34 +00:00
/*
2005-10-15 02:49:52 +00:00
* this message is complex enough that it seems best to put
* the field extraction out-of-line
*/
exec_bind_message(&input_message);
break;
case 'E': /* execute */
{
const char *portal_name;
int max_rows;
forbidden_in_wal_sender(firstchar);
/* Set statement_timestamp() */
SetCurrentStatementStartTimestamp();
portal_name = pq_getmsgstring(&input_message);
max_rows = pq_getmsgint(&input_message, 4);
pq_getmsgend(&input_message);
exec_execute_message(portal_name, max_rows);
}
break;
case 'F': /* fastpath function call */
forbidden_in_wal_sender(firstchar);
/* Set statement_timestamp() */
SetCurrentStatementStartTimestamp();
/* Report query to various monitoring facilities. */
pgstat_report_activity(STATE_FASTPATH, NULL);
set_ps_display("<FASTPATH>", false);
/* start an xact for this function invocation */
start_xact_command();
/*
* Note: we may at this point be inside an aborted
2006-10-04 00:30:14 +00:00
* transaction. We can't throw error for that until we've
* finished reading the function-call message, so
* HandleFunctionRequest() must check for it after doing so.
* Be careful not to do anything that assumes we're inside a
* valid transaction here.
*/
/* switch back to message context */
MemoryContextSwitchTo(MessageContext);
if (HandleFunctionRequest(&input_message) == EOF)
{
/* lost frontend connection during F message input */
2002-09-04 20:31:48 +00:00
/*
* Reset whereToSendOutput to prevent ereport from
2002-09-04 20:31:48 +00:00
* attempting to send any more messages to client.
*/
if (whereToSendOutput == DestRemote)
whereToSendOutput = DestNone;
proc_exit(0);
}
/* commit the function-invocation transaction */
finish_xact_command();
send_ready_for_query = true;
break;
2003-08-04 00:43:34 +00:00
case 'C': /* close */
{
2003-08-04 00:43:34 +00:00
int close_type;
const char *close_target;
forbidden_in_wal_sender(firstchar);
close_type = pq_getmsgbyte(&input_message);
close_target = pq_getmsgstring(&input_message);
pq_getmsgend(&input_message);
switch (close_type)
{
case 'S':
if (close_target[0] != '\0')
DropPreparedStatement(close_target, false);
else
{
/* special-case the unnamed statement */
drop_unnamed_stmt();
}
break;
case 'P':
{
Portal portal;
portal = GetPortalByName(close_target);
if (PortalIsValid(portal))
PortalDrop(portal, false);
}
break;
default:
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
2005-10-15 02:49:52 +00:00
errmsg("invalid CLOSE message subtype %d",
close_type)));
break;
}
if (whereToSendOutput == DestRemote)
2003-08-04 00:43:34 +00:00
pq_putemptymessage('3'); /* CloseComplete */
}
break;
case 'D': /* describe */
{
2003-08-04 00:43:34 +00:00
int describe_type;
const char *describe_target;
forbidden_in_wal_sender(firstchar);
/* Set statement_timestamp() (needed for xact) */
SetCurrentStatementStartTimestamp();
describe_type = pq_getmsgbyte(&input_message);
describe_target = pq_getmsgstring(&input_message);
pq_getmsgend(&input_message);
switch (describe_type)
{
case 'S':
exec_describe_statement_message(describe_target);
break;
case 'P':
exec_describe_portal_message(describe_target);
break;
default:
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
2005-10-15 02:49:52 +00:00
errmsg("invalid DESCRIBE message subtype %d",
describe_type)));
break;
}
}
break;
2003-08-04 00:43:34 +00:00
case 'H': /* flush */
pq_getmsgend(&input_message);
if (whereToSendOutput == DestRemote)
pq_flush();
break;
2003-08-04 00:43:34 +00:00
case 'S': /* sync */
pq_getmsgend(&input_message);
finish_xact_command();
send_ready_for_query = true;
break;
/*
2005-10-15 02:49:52 +00:00
* 'X' means that the frontend is closing down the socket. EOF
* means unexpected loss of frontend connection. Either way,
* perform normal shutdown.
*/
case 'X':
case EOF:
2002-09-04 20:31:48 +00:00
/*
2005-10-15 02:49:52 +00:00
* Reset whereToSendOutput to prevent ereport from attempting
* to send any more messages to client.
*/
if (whereToSendOutput == DestRemote)
whereToSendOutput = DestNone;
2001-03-22 04:01:46 +00:00
/*
* NOTE: if you are tempted to add more code here, DON'T!
2001-03-22 04:01:46 +00:00
* Whatever you had in mind to do should be set up as an
2005-10-15 02:49:52 +00:00
* on_proc_exit or on_shmem_exit callback, instead. Otherwise
* it will fail to be called during other backend-shutdown
* scenarios.
*/
proc_exit(0);
2003-08-04 00:43:34 +00:00
case 'd': /* copy data */
case 'c': /* copy done */
case 'f': /* copy fail */
/*
2003-08-04 00:43:34 +00:00
* Accept but ignore these messages, per protocol spec; we
2005-10-15 02:49:52 +00:00
* probably got here because a COPY failed, and the frontend
* is still sending data.
*/
break;
default:
ereport(FATAL,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
errmsg("invalid frontend message type %d",
firstchar)));
}
} /* end of input-reading loop */
}
/*
* Throw an error if we're a WAL sender process.
*
* This is used to forbid anything else than simple query protocol messages
* in a WAL sender process. 'firstchar' specifies what kind of a forbidden
* message was received, and is used to construct the error message.
*/
static void
forbidden_in_wal_sender(char firstchar)
{
if (am_walsender)
{
if (firstchar == 'F')
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
errmsg("fastpath function calls not supported in a replication connection")));
else
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
errmsg("extended query protocol not supported in a replication connection")));
}
}
/*
* Obtain platform stack depth limit (in bytes)
*
* Return -1 if unknown
*/
long
get_stack_depth_rlimit(void)
{
#if defined(HAVE_GETRLIMIT) && defined(RLIMIT_STACK)
static long val = 0;
/* This won't change after process launch, so check just once */
if (val == 0)
{
struct rlimit rlim;
if (getrlimit(RLIMIT_STACK, &rlim) < 0)
val = -1;
else if (rlim.rlim_cur == RLIM_INFINITY)
val = LONG_MAX;
/* rlim_cur is probably of an unsigned type, so check for overflow */
else if (rlim.rlim_cur >= LONG_MAX)
val = LONG_MAX;
else
val = rlim.rlim_cur;
}
return val;
2007-11-15 21:14:46 +00:00
#else /* no getrlimit */
#if defined(WIN32) || defined(__CYGWIN__)
/* On Windows we set the backend stack size in src/backend/Makefile */
return WIN32_STACK_RLIMIT;
2007-11-15 21:14:46 +00:00
#else /* not windows ... give up */
return -1;
#endif
#endif
}
static struct rusage Save_r;
static struct timeval Save_t;
void
ResetUsage(void)
{
getrusage(RUSAGE_SELF, &Save_r);
gettimeofday(&Save_t, NULL);
}
void
ShowUsage(const char *title)
{
StringInfoData str;
struct timeval user,
sys;
struct timeval elapse_t;
struct rusage r;
getrusage(RUSAGE_SELF, &r);
gettimeofday(&elapse_t, NULL);
memcpy((char *) &user, (char *) &r.ru_utime, sizeof(user));
memcpy((char *) &sys, (char *) &r.ru_stime, sizeof(sys));
if (elapse_t.tv_usec < Save_t.tv_usec)
{
elapse_t.tv_sec--;
elapse_t.tv_usec += 1000000;
}
if (r.ru_utime.tv_usec < Save_r.ru_utime.tv_usec)
{
r.ru_utime.tv_sec--;
r.ru_utime.tv_usec += 1000000;
}
if (r.ru_stime.tv_usec < Save_r.ru_stime.tv_usec)
{
r.ru_stime.tv_sec--;
r.ru_stime.tv_usec += 1000000;
}
/*
* the only stats we don't show here are for memory usage -- i can't
2005-10-15 02:49:52 +00:00
* figure out how to interpret the relevant fields in the rusage struct,
* and they change names across o/s platforms, anyway. if you can figure
* out what the entries mean, you can somehow extract resident set size,
* shared text size, and unshared data and stack sizes.
*/
initStringInfo(&str);
appendStringInfoString(&str, "! system usage stats:\n");
appendStringInfo(&str,
2005-10-15 02:49:52 +00:00
"!\t%ld.%06ld elapsed %ld.%06ld user %ld.%06ld system sec\n",
(long) (elapse_t.tv_sec - Save_t.tv_sec),
(long) (elapse_t.tv_usec - Save_t.tv_usec),
(long) (r.ru_utime.tv_sec - Save_r.ru_utime.tv_sec),
2005-10-15 02:49:52 +00:00
(long) (r.ru_utime.tv_usec - Save_r.ru_utime.tv_usec),
(long) (r.ru_stime.tv_sec - Save_r.ru_stime.tv_sec),
2005-10-15 02:49:52 +00:00
(long) (r.ru_stime.tv_usec - Save_r.ru_stime.tv_usec));
appendStringInfo(&str,
2002-09-04 20:31:48 +00:00
"!\t[%ld.%06ld user %ld.%06ld sys total]\n",
(long) user.tv_sec,
(long) user.tv_usec,
(long) sys.tv_sec,
(long) sys.tv_usec);
#if defined(HAVE_GETRUSAGE)
appendStringInfo(&str,
2002-09-04 20:31:48 +00:00
"!\t%ld/%ld [%ld/%ld] filesystem blocks in/out\n",
r.ru_inblock - Save_r.ru_inblock,
/* they only drink coffee at dec */
2002-09-04 20:31:48 +00:00
r.ru_oublock - Save_r.ru_oublock,
r.ru_inblock, r.ru_oublock);
appendStringInfo(&str,
2005-10-15 02:49:52 +00:00
"!\t%ld/%ld [%ld/%ld] page faults/reclaims, %ld [%ld] swaps\n",
2002-09-04 20:31:48 +00:00
r.ru_majflt - Save_r.ru_majflt,
r.ru_minflt - Save_r.ru_minflt,
r.ru_majflt, r.ru_minflt,
r.ru_nswap - Save_r.ru_nswap,
r.ru_nswap);
appendStringInfo(&str,
2005-10-15 02:49:52 +00:00
"!\t%ld [%ld] signals rcvd, %ld/%ld [%ld/%ld] messages rcvd/sent\n",
2002-09-04 20:31:48 +00:00
r.ru_nsignals - Save_r.ru_nsignals,
r.ru_nsignals,
r.ru_msgrcv - Save_r.ru_msgrcv,
r.ru_msgsnd - Save_r.ru_msgsnd,
r.ru_msgrcv, r.ru_msgsnd);
appendStringInfo(&str,
2005-10-15 02:49:52 +00:00
"!\t%ld/%ld [%ld/%ld] voluntary/involuntary context switches\n",
2002-09-04 20:31:48 +00:00
r.ru_nvcsw - Save_r.ru_nvcsw,
r.ru_nivcsw - Save_r.ru_nivcsw,
r.ru_nvcsw, r.ru_nivcsw);
#endif /* HAVE_GETRUSAGE */
/* remove trailing newline */
2002-09-04 20:31:48 +00:00
if (str.data[str.len - 1] == '\n')
str.data[--str.len] = '\0';
ereport(LOG,
(errmsg_internal("%s", title),
errdetail_internal("%s", str.data)));
pfree(str.data);
}
/*
* on_proc_exit handler to log end of session
*/
2004-08-29 05:07:03 +00:00
static void
log_disconnections(int code, Datum arg)
{
2005-10-15 02:49:52 +00:00
Port *port = MyProcPort;
long secs;
int usecs;
int msecs;
2005-10-15 02:49:52 +00:00
int hours,
minutes,
seconds;
TimestampDifference(port->SessionStartTime,
GetCurrentTimestamp(),
&secs, &usecs);
msecs = usecs / 1000;
hours = secs / SECS_PER_HOUR;
secs %= SECS_PER_HOUR;
minutes = secs / SECS_PER_MINUTE;
seconds = secs % SECS_PER_MINUTE;
ereport(LOG,
(errmsg("disconnection: session time: %d:%02d:%02d.%03d "
"user=%s database=%s host=%s%s%s",
hours, minutes, seconds, msecs,
port->user_name, port->database_name, port->remote_host,
2006-10-04 00:30:14 +00:00
port->remote_port[0] ? " port=" : "", port->remote_port)));
}