1996-07-09 06:22:35 +00:00
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
*
|
1999-02-13 23:22:53 +00:00
|
|
|
* postgres.c
|
1997-09-07 05:04:48 +00:00
|
|
|
* POSTGRES C Backend Interface
|
1996-07-09 06:22:35 +00:00
|
|
|
*
|
2017-01-03 13:48:53 -05:00
|
|
|
* Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
|
2000-01-26 05:58:53 +00:00
|
|
|
* Portions Copyright (c) 1994, Regents of the University of California
|
1996-07-09 06:22:35 +00:00
|
|
|
*
|
|
|
|
*
|
|
|
|
* IDENTIFICATION
|
2010-09-20 22:08:53 +02:00
|
|
|
* src/backend/tcop/postgres.c
|
1996-07-09 06:22:35 +00:00
|
|
|
*
|
|
|
|
* NOTES
|
1997-09-07 05:04:48 +00:00
|
|
|
* this is the "main" module of the postgres backend and
|
|
|
|
* hence the main module of the "traffic cop".
|
1996-07-09 06:22:35 +00:00
|
|
|
*
|
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
*/
|
1996-11-08 06:02:30 +00:00
|
|
|
|
2000-06-29 07:35:57 +00:00
|
|
|
#include "postgres.h"
|
|
|
|
|
2010-11-06 16:50:18 -04:00
|
|
|
#include <fcntl.h>
|
|
|
|
#include <limits.h>
|
|
|
|
#include <signal.h>
|
1996-11-08 06:02:30 +00:00
|
|
|
#include <unistd.h>
|
1999-10-23 03:13:33 +00:00
|
|
|
#include <sys/socket.h>
|
2006-10-07 19:25:29 +00:00
|
|
|
#ifdef HAVE_SYS_SELECT_H
|
1996-07-09 06:22:35 +00:00
|
|
|
#include <sys/select.h>
|
2000-11-04 12:43:24 +00:00
|
|
|
#endif
|
2006-10-07 19:25:29 +00:00
|
|
|
#ifdef HAVE_SYS_RESOURCE_H
|
2006-10-08 17:45:50 +00:00
|
|
|
#include <sys/time.h>
|
2006-10-07 19:25:29 +00:00
|
|
|
#include <sys/resource.h>
|
|
|
|
#endif
|
1996-07-09 06:22:35 +00:00
|
|
|
|
2006-10-07 19:25:29 +00:00
|
|
|
#ifndef HAVE_GETRUSAGE
|
|
|
|
#include "rusagestub.h"
|
|
|
|
#endif
|
|
|
|
|
Create an infrastructure for parallel computation in PostgreSQL.
This does four basic things. First, it provides convenience routines
to coordinate the startup and shutdown of parallel workers. Second,
it synchronizes various pieces of state (e.g. GUCs, combo CID
mappings, transaction snapshot) from the parallel group leader to the
worker processes. Third, it prohibits various operations that would
result in unsafe changes to that state while parallelism is active.
Finally, it propagates events that would result in an ErrorResponse,
NoticeResponse, or NotifyResponse message being sent to the client
from the parallel workers back to the master, from which they can then
be sent on to the client.
Robert Haas, Amit Kapila, Noah Misch, Rushabh Lathia, Jeevan Chalke.
Suggestions and review from Andres Freund, Heikki Linnakangas, Noah
Misch, Simon Riggs, Euler Taveira, and Jim Nasby.
2015-04-30 15:02:14 -04:00
|
|
|
#include "access/parallel.h"
|
2003-05-05 00:44:56 +00:00
|
|
|
#include "access/printtup.h"
|
2006-07-13 16:49:20 +00:00
|
|
|
#include "access/xact.h"
|
2003-05-05 00:44:56 +00:00
|
|
|
#include "catalog/pg_type.h"
|
1996-07-09 06:22:35 +00:00
|
|
|
#include "commands/async.h"
|
2003-05-05 00:44:56 +00:00
|
|
|
#include "commands/prepare.h"
|
1998-01-13 04:05:12 +00:00
|
|
|
#include "libpq/libpq.h"
|
1999-04-25 03:19:27 +00:00
|
|
|
#include "libpq/pqformat.h"
|
1998-01-13 04:05:12 +00:00
|
|
|
#include "libpq/pqsignal.h"
|
1999-07-16 05:00:38 +00:00
|
|
|
#include "miscadmin.h"
|
1998-01-13 04:05:12 +00:00
|
|
|
#include "nodes/print.h"
|
1996-07-09 06:22:35 +00:00
|
|
|
#include "optimizer/planner.h"
|
2008-08-01 13:16:09 +00:00
|
|
|
#include "pgstat.h"
|
|
|
|
#include "pg_trace.h"
|
2000-10-07 00:58:23 +00:00
|
|
|
#include "parser/analyze.h"
|
1997-11-25 22:07:18 +00:00
|
|
|
#include "parser/parser.h"
|
2014-02-15 14:31:30 -05:00
|
|
|
#include "pg_getopt.h"
|
2007-06-29 17:07:39 +00:00
|
|
|
#include "postmaster/autovacuum.h"
|
2009-08-29 19:26:52 +00:00
|
|
|
#include "postmaster/postmaster.h"
|
2017-06-08 15:00:53 -07:00
|
|
|
#include "replication/logicallauncher.h"
|
2017-06-02 14:46:00 -04:00
|
|
|
#include "replication/logicalworker.h"
|
Introduce logical decoding.
This feature, building on previous commits, allows the write-ahead log
stream to be decoded into a series of logical changes; that is,
inserts, updates, and deletes and the transactions which contain them.
It is capable of handling decoding even across changes to the schema
of the effected tables. The output format is controlled by a
so-called "output plugin"; an example is included. To make use of
this in a real replication system, the output plugin will need to be
modified to produce output in the format appropriate to that system,
and to perform filtering.
Currently, information can be extracted from the logical decoding
system only via SQL; future commits will add the ability to stream
changes via walsender.
Andres Freund, with review and other contributions from many other
people, including Álvaro Herrera, Abhijit Menon-Sen, Peter Gheogegan,
Kevin Grittner, Robert Haas, Heikki Linnakangas, Fujii Masao, Abhijit
Menon-Sen, Michael Paquier, Simon Riggs, Craig Ringer, and Steve
Singer.
2014-03-03 16:32:18 -05:00
|
|
|
#include "replication/slot.h"
|
2010-01-15 09:19:10 +00:00
|
|
|
#include "replication/walsender.h"
|
1999-07-16 03:14:30 +00:00
|
|
|
#include "rewrite/rewriteHandler.h"
|
2008-05-12 00:00:54 +00:00
|
|
|
#include "storage/bufmgr.h"
|
2002-05-05 00:03:29 +00:00
|
|
|
#include "storage/ipc.h"
|
|
|
|
#include "storage/proc.h"
|
2009-07-31 20:26:23 +00:00
|
|
|
#include "storage/procsignal.h"
|
2004-05-23 03:50:45 +00:00
|
|
|
#include "storage/sinval.h"
|
1998-01-13 04:05:12 +00:00
|
|
|
#include "tcop/fastpath.h"
|
|
|
|
#include "tcop/pquery.h"
|
1999-07-16 03:14:30 +00:00
|
|
|
#include "tcop/tcopprot.h"
|
1998-01-13 04:05:12 +00:00
|
|
|
#include "tcop/utility.h"
|
2003-05-05 00:44:56 +00:00
|
|
|
#include "utils/lsyscache.h"
|
2000-07-17 03:05:41 +00:00
|
|
|
#include "utils/memutils.h"
|
1998-08-25 21:34:10 +00:00
|
|
|
#include "utils/ps_status.h"
|
2008-03-26 18:48:59 +00:00
|
|
|
#include "utils/snapmgr.h"
|
Introduce timeout handling framework
Management of timeouts was getting a little cumbersome; what we
originally had was more than enough back when we were only concerned
about deadlocks and query cancel; however, when we added timeouts for
standby processes, the code got considerably messier. Since there are
plans to add more complex timeouts, this seems a good time to introduce
a central timeout handling module.
External modules register their timeout handlers during process
initialization, and later enable and disable them as they see fit using
a simple API; timeout.c is in charge of keeping track of which timeouts
are in effect at any time, installing a common SIGALRM signal handler,
and calling setitimer() as appropriate to ensure timely firing of
external handlers.
timeout.c additionally supports pluggable modules to add their own
timeouts, though this capability isn't exercised anywhere yet.
Additionally, as of this commit, walsender processes are aware of
timeouts; we had a preexisting bug there that made those ignore SIGALRM,
thus being subject to unhandled deadlocks, particularly during the
authentication phase. This has already been fixed in back branches in
commit 0bf8eb2a, which see for more details.
Main author: Zoltán Böszörményi
Some review and cleanup by Álvaro Herrera
Extensive reworking by Tom Lane
2012-07-16 18:43:21 -04:00
|
|
|
#include "utils/timeout.h"
|
2011-09-09 13:23:41 -04:00
|
|
|
#include "utils/timestamp.h"
|
1999-07-16 05:00:38 +00:00
|
|
|
#include "mb/pg_wchar.h"
|
1998-06-16 07:29:54 +00:00
|
|
|
|
1998-08-25 21:34:10 +00:00
|
|
|
|
1996-07-09 06:22:35 +00:00
|
|
|
/* ----------------
|
1997-09-07 05:04:48 +00:00
|
|
|
* global variables
|
1996-07-09 06:22:35 +00:00
|
|
|
* ----------------
|
|
|
|
*/
|
2009-01-07 19:35:43 +00:00
|
|
|
const char *debug_query_string; /* client-supplied query string */
|
2001-04-14 19:11:45 +00:00
|
|
|
|
2001-09-08 01:10:21 +00:00
|
|
|
/* Note: whereToSendOutput is initialized for the bootstrap/standalone case */
|
2005-11-03 17:11:40 +00:00
|
|
|
CommandDest whereToSendOutput = DestDebug;
|
1996-07-09 06:22:35 +00:00
|
|
|
|
2004-02-17 03:54:57 +00:00
|
|
|
/* flag for logging end of session */
|
2004-08-29 05:07:03 +00:00
|
|
|
bool Log_disconnections = false;
|
2004-02-17 03:54:57 +00:00
|
|
|
|
2008-03-10 12:55:13 +00:00
|
|
|
int log_statement = LOGSTMT_NONE;
|
> >>1. change the type of "log_statement" option from boolean to string,
> >>with allowed values of "all, mod, ddl, none" with default "none".
OK, here is a patch that implements #1. Here is sample output:
test=> set client_min_messages = 'log';
SET
test=> set log_statement = 'mod';
SET
test=> select 1;
?column?
----------
1
(1 row)
test=> update test set x=1;
LOG: statement: update test set x=1;
ERROR: relation "test" does not exist
test=> update test set x=1;
LOG: statement: update test set x=1;
ERROR: relation "test" does not exist
test=> copy test from '/tmp/x';
LOG: statement: copy test from '/tmp/x';
ERROR: relation "test" does not exist
test=> copy test to '/tmp/x';
ERROR: relation "test" does not exist
test=> prepare xx as select 1;
PREPARE
test=> prepare xx as update x set y=1;
LOG: statement: prepare xx as update x set y=1;
ERROR: relation "x" does not exist
test=> explain analyze select 1;;
QUERY PLAN
------------------------------------------------------------------------------------
Result (cost=0.00..0.01 rows=1 width=0) (actual time=0.006..0.007 rows=1 loops=1)
Total runtime: 0.046 ms
(2 rows)
test=> explain analyze update test set x=1;
LOG: statement: explain analyze update test set x=1;
ERROR: relation "test" does not exist
test=> explain update test set x=1;
ERROR: relation "test" does not exist
It checks PREPARE and EXECUTE ANALYZE too. The log_statement values are
'none', 'mod', 'ddl', and 'all'. For 'all', it prints before the query
is parsed, and for ddl/mod, it does it right after parsing using the
node tag (or command tag for CREATE/ALTER/DROP), so any non-parse errors
will print after the log line.
2004-04-07 05:05:50 +00:00
|
|
|
|
2004-03-24 22:40:29 +00:00
|
|
|
/* GUC variable for maximum stack depth (measured in kilobytes) */
|
2006-10-07 19:25:29 +00:00
|
|
|
int max_stack_depth = 100;
|
2004-03-24 22:40:29 +00:00
|
|
|
|
2006-01-05 10:07:46 +00:00
|
|
|
/* wait N seconds to allow attach from a debugger */
|
|
|
|
int PostAuthDelay = 0;
|
|
|
|
|
|
|
|
|
2004-03-24 22:40:29 +00:00
|
|
|
|
2003-05-05 00:44:56 +00:00
|
|
|
/* ----------------
|
|
|
|
* private variables
|
|
|
|
* ----------------
|
|
|
|
*/
|
1997-09-07 05:04:48 +00:00
|
|
|
|
2004-03-24 22:40:29 +00:00
|
|
|
/* max_stack_depth converted to bytes for speed of checking */
|
2006-10-07 19:25:29 +00:00
|
|
|
static long max_stack_depth_bytes = 100 * 1024L;
|
2004-03-24 22:40:29 +00:00
|
|
|
|
2006-02-17 03:29:02 +00:00
|
|
|
/*
|
Do stack-depth checking in all postmaster children.
We used to only initialize the stack base pointer when starting up a regular
backend, not in other processes. In particular, autovacuum workers can run
arbitrary user code, and without stack-depth checking, infinite recursion
in e.g an index expression will bring down the whole cluster.
The comment about PL/Java using set_stack_base() is not yet true. As the
code stands, PL/java still modifies the stack_base_ptr variable directly.
However, it's been discussed in the PL/Java mailing list that it should be
changed to use the function, because PL/Java is currently oblivious to the
register stack used on Itanium. There's another issues with PL/Java, namely
that the stack base pointer it sets is not really the base of the stack, it
could be something close to the bottom of the stack. That's a separate issue
that might need some further changes to this code, but that's a different
story.
Backpatch to all supported releases.
2012-04-08 18:28:12 +03:00
|
|
|
* Stack base pointer -- initialized by PostmasterMain and inherited by
|
|
|
|
* subprocesses. This is not static because old versions of PL/Java modify
|
|
|
|
* it directly. Newer versions use set_stack_base(), but we want to stay
|
|
|
|
* binary-compatible for the time being.
|
2006-02-17 03:29:02 +00:00
|
|
|
*/
|
2005-10-15 02:49:52 +00:00
|
|
|
char *stack_base_ptr = NULL;
|
2004-03-24 22:40:29 +00:00
|
|
|
|
2010-11-06 19:36:29 -04:00
|
|
|
/*
|
|
|
|
* On IA64 we also have to remember the register stack base.
|
|
|
|
*/
|
|
|
|
#if defined(__ia64__) || defined(__ia64)
|
|
|
|
char *register_stack_base_ptr = NULL;
|
|
|
|
#endif
|
2004-03-24 22:40:29 +00:00
|
|
|
|
2003-05-05 00:44:56 +00:00
|
|
|
/*
|
|
|
|
* Flag to keep track of whether we have started a transaction.
|
|
|
|
* For extended query protocol this has to be remembered across messages.
|
|
|
|
*/
|
|
|
|
static bool xact_started = false;
|
|
|
|
|
2005-06-02 21:03:25 +00:00
|
|
|
/*
|
|
|
|
* Flag to indicate that we are doing the outer loop's read-from-client,
|
|
|
|
* as opposed to any random read from client that might happen within
|
|
|
|
* commands like COPY FROM STDIN.
|
|
|
|
*/
|
|
|
|
static bool DoingCommandRead = false;
|
|
|
|
|
2003-05-05 00:44:56 +00:00
|
|
|
/*
|
|
|
|
* Flags to implement skip-till-Sync-after-error behavior for messages of
|
|
|
|
* the extended query protocol.
|
|
|
|
*/
|
|
|
|
static bool doing_extended_query_message = false;
|
|
|
|
static bool ignore_till_sync = false;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If an unnamed prepared statement exists, it's stored here.
|
|
|
|
* We keep it separate from the hashtable kept by commands/prepare.c
|
|
|
|
* in order to reduce overhead for short-lived queries.
|
|
|
|
*/
|
2007-03-13 00:33:44 +00:00
|
|
|
static CachedPlanSource *unnamed_stmt_psrc = NULL;
|
2007-11-15 21:14:46 +00:00
|
|
|
|
2009-08-29 19:26:52 +00:00
|
|
|
/* assorted command-line switches */
|
2010-02-26 02:01:40 +00:00
|
|
|
static const char *userDoption = NULL; /* -D switch */
|
|
|
|
static bool EchoQuery = false; /* -E switch */
|
Adjust behavior of single-user -j mode for better initdb error reporting.
Previously, -j caused the entire input file to be read in and executed as
a single command string. That's undesirable, not least because any error
causes the entire file to be regurgitated as the "failing query". Some
experimentation suggests a better rule: end the command string when we see
a semicolon immediately followed by two newlines, ie, an empty line after
a query. This serves nicely to break up the existing examples such as
information_schema.sql and system_views.sql. A limitation is that it's
no longer possible to write such a sequence within a string literal or
multiline comment in a file meant to be read with -j; but there are no
instances of such a problem within the data currently used by initdb.
(If someone does make such a mistake in future, it'll be obvious because
they'll get an unterminated-literal or unterminated-comment syntax error.)
Other than that, there shouldn't be any negative consequences; you're not
forced to end statements that way, it's just a better idea in most cases.
In passing, remove src/include/tcop/tcopdebug.h, which is dead code
because it's not included anywhere, and hasn't been for more than
ten years. One of the debug-support symbols it purported to describe
has been unreferenced for at least the same amount of time, and the
other is removed by this commit on the grounds that it was useless:
forcing -j mode all the time would have broken initdb. The lack of
complaints about that, or about the missing inclusion, shows that
no one has tried to use TCOP_DONTUSENEWLINE in many years.
2015-12-17 19:34:15 -05:00
|
|
|
static bool UseSemiNewlineNewline = false; /* -j switch */
|
1996-07-09 06:22:35 +00:00
|
|
|
|
2011-06-29 09:26:14 +03:00
|
|
|
/* whether or not, and why, we were canceled by conflict with recovery */
|
2010-01-16 10:05:59 +00:00
|
|
|
static bool RecoveryConflictPending = false;
|
2010-05-12 19:45:02 +00:00
|
|
|
static bool RecoveryConflictRetryable = true;
|
2010-02-26 02:01:40 +00:00
|
|
|
static ProcSignalReason RecoveryConflictReason;
|
1996-07-09 06:22:35 +00:00
|
|
|
|
|
|
|
/* ----------------------------------------------------------------
|
1997-09-07 05:04:48 +00:00
|
|
|
* decls for routines only used in this file
|
1996-07-09 06:22:35 +00:00
|
|
|
* ----------------------------------------------------------------
|
|
|
|
*/
|
2000-04-12 17:17:23 +00:00
|
|
|
static int InteractiveBackend(StringInfo inBuf);
|
2007-07-09 01:15:14 +00:00
|
|
|
static int interactive_getc(void);
|
2000-04-12 17:17:23 +00:00
|
|
|
static int SocketBackend(StringInfo inBuf);
|
|
|
|
static int ReadCommand(StringInfo inBuf);
|
2012-10-05 17:13:07 +03:00
|
|
|
static void forbidden_in_wal_sender(char firstchar);
|
2007-06-23 22:12:52 +00:00
|
|
|
static List *pg_rewrite_query(Query *query);
|
2007-02-20 17:32:18 +00:00
|
|
|
static bool check_log_statement(List *stmt_list);
|
2006-09-07 22:52:01 +00:00
|
|
|
static int errdetail_execute(List *raw_parsetree_list);
|
|
|
|
static int errdetail_params(ParamListInfo params);
|
2010-02-26 02:01:40 +00:00
|
|
|
static int errdetail_abort(void);
|
|
|
|
static int errdetail_recovery_conflict(void);
|
2000-10-07 00:58:23 +00:00
|
|
|
static void start_xact_command(void);
|
2003-05-14 03:26:03 +00:00
|
|
|
static void finish_xact_command(void);
|
2005-11-10 00:31:34 +00:00
|
|
|
static bool IsTransactionExitStmt(Node *parsetree);
|
Change representation of statement lists, and add statement location info.
This patch makes several changes that improve the consistency of
representation of lists of statements. It's always been the case
that the output of parse analysis is a list of Query nodes, whatever
the types of the individual statements in the list. This patch brings
similar consistency to the outputs of raw parsing and planning steps:
* The output of raw parsing is now always a list of RawStmt nodes;
the statement-type-dependent nodes are one level down from that.
* The output of pg_plan_queries() is now always a list of PlannedStmt
nodes, even for utility statements. In the case of a utility statement,
"planning" just consists of wrapping a CMD_UTILITY PlannedStmt around
the utility node. This list representation is now used in Portal and
CachedPlan plan lists, replacing the former convention of intermixing
PlannedStmts with bare utility-statement nodes.
Now, every list of statements has a consistent head-node type depending
on how far along it is in processing. This allows changing many places
that formerly used generic "Node *" pointers to use a more specific
pointer type, thus reducing the number of IsA() tests and casts needed,
as well as improving code clarity.
Also, the post-parse-analysis representation of DECLARE CURSOR is changed
so that it looks more like EXPLAIN, PREPARE, etc. That is, the contained
SELECT remains a child of the DeclareCursorStmt rather than getting flipped
around to be the other way. It's now true for both Query and PlannedStmt
that utilityStmt is non-null if and only if commandType is CMD_UTILITY.
That allows simplifying a lot of places that were testing both fields.
(I think some of those were just defensive programming, but in many places,
it was actually necessary to avoid confusing DECLARE CURSOR with SELECT.)
Because PlannedStmt carries a canSetTag field, we're also able to get rid
of some ad-hoc rules about how to reconstruct canSetTag for a bare utility
statement; specifically, the assumption that a utility is canSetTag if and
only if it's the only one in its list. While I see no near-term need for
relaxing that restriction, it's nice to get rid of the ad-hocery.
The API of ProcessUtility() is changed so that what it's passed is the
wrapper PlannedStmt not just the bare utility statement. This will affect
all users of ProcessUtility_hook, but the changes are pretty trivial; see
the affected contrib modules for examples of the minimum change needed.
(Most compilers should give pointer-type-mismatch warnings for uncorrected
code.)
There's also a change in the API of ExplainOneQuery_hook, to pass through
cursorOptions instead of expecting hook functions to know what to pick.
This is needed because of the DECLARE CURSOR changes, but really should
have been done in 9.6; it's unlikely that any extant hook functions
know about using CURSOR_OPT_PARALLEL_OK.
Finally, teach gram.y to save statement boundary locations in RawStmt
nodes, and pass those through to Query and PlannedStmt nodes. This allows
more intelligent handling of cases where a source query string contains
multiple statements. This patch doesn't actually do anything with the
information, but a follow-on patch will. (Passing this information through
cleanly is the true motivation for these changes; while I think this is all
good cleanup, it's unlikely we'd have bothered without this end goal.)
catversion bump because addition of location fields to struct Query
affects stored rules.
This patch is by me, but it owes a good deal to Fabien Coelho who did
a lot of preliminary work on the problem, and also reviewed the patch.
Discussion: https://postgr.es/m/alpine.DEB.2.20.1612200926310.29821@lancre
2017-01-14 16:02:35 -05:00
|
|
|
static bool IsTransactionExitStmtList(List *pstmts);
|
|
|
|
static bool IsTransactionStmtList(List *pstmts);
|
2007-03-13 00:33:44 +00:00
|
|
|
static void drop_unnamed_stmt(void);
|
2004-02-17 04:09:26 +00:00
|
|
|
static void log_disconnections(int code, Datum arg);
|
1996-07-09 06:22:35 +00:00
|
|
|
|
|
|
|
|
|
|
|
/* ----------------------------------------------------------------
|
1997-09-07 05:04:48 +00:00
|
|
|
* routines to obtain user input
|
1996-07-09 06:22:35 +00:00
|
|
|
* ----------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* ----------------
|
1997-09-07 05:04:48 +00:00
|
|
|
* InteractiveBackend() is called for user interactive connections
|
2003-04-19 00:02:30 +00:00
|
|
|
*
|
|
|
|
* the string entered by the user is placed in its parameter inBuf,
|
|
|
|
* and we act like a Q message was received.
|
1999-07-22 02:40:07 +00:00
|
|
|
*
|
2000-04-12 17:17:23 +00:00
|
|
|
* EOF is returned if end-of-file input is seen; time to shut down.
|
1996-07-09 06:22:35 +00:00
|
|
|
* ----------------
|
|
|
|
*/
|
|
|
|
|
1999-07-22 02:40:07 +00:00
|
|
|
static int
|
1999-08-31 04:26:40 +00:00
|
|
|
InteractiveBackend(StringInfo inBuf)
|
1996-07-09 06:22:35 +00:00
|
|
|
{
|
1997-09-08 02:41:22 +00:00
|
|
|
int c; /* character read from getc() */
|
1997-09-07 05:04:48 +00:00
|
|
|
|
2001-03-22 06:16:21 +00:00
|
|
|
/*
|
|
|
|
* display a prompt and obtain input from the user
|
1997-09-07 05:04:48 +00:00
|
|
|
*/
|
1999-05-22 02:55:58 +00:00
|
|
|
printf("backend> ");
|
1998-10-02 01:14:14 +00:00
|
|
|
fflush(stdout);
|
1997-09-07 05:04:48 +00:00
|
|
|
|
2007-03-03 19:32:55 +00:00
|
|
|
resetStringInfo(inBuf);
|
1999-08-31 04:26:40 +00:00
|
|
|
|
Adjust behavior of single-user -j mode for better initdb error reporting.
Previously, -j caused the entire input file to be read in and executed as
a single command string. That's undesirable, not least because any error
causes the entire file to be regurgitated as the "failing query". Some
experimentation suggests a better rule: end the command string when we see
a semicolon immediately followed by two newlines, ie, an empty line after
a query. This serves nicely to break up the existing examples such as
information_schema.sql and system_views.sql. A limitation is that it's
no longer possible to write such a sequence within a string literal or
multiline comment in a file meant to be read with -j; but there are no
instances of such a problem within the data currently used by initdb.
(If someone does make such a mistake in future, it'll be obvious because
they'll get an unterminated-literal or unterminated-comment syntax error.)
Other than that, there shouldn't be any negative consequences; you're not
forced to end statements that way, it's just a better idea in most cases.
In passing, remove src/include/tcop/tcopdebug.h, which is dead code
because it's not included anywhere, and hasn't been for more than
ten years. One of the debug-support symbols it purported to describe
has been unreferenced for at least the same amount of time, and the
other is removed by this commit on the grounds that it was useless:
forcing -j mode all the time would have broken initdb. The lack of
complaints about that, or about the missing inclusion, shows that
no one has tried to use TCOP_DONTUSENEWLINE in many years.
2015-12-17 19:34:15 -05:00
|
|
|
/*
|
|
|
|
* Read characters until EOF or the appropriate delimiter is seen.
|
|
|
|
*/
|
|
|
|
while ((c = interactive_getc()) != EOF)
|
1997-09-07 05:04:48 +00:00
|
|
|
{
|
Adjust behavior of single-user -j mode for better initdb error reporting.
Previously, -j caused the entire input file to be read in and executed as
a single command string. That's undesirable, not least because any error
causes the entire file to be regurgitated as the "failing query". Some
experimentation suggests a better rule: end the command string when we see
a semicolon immediately followed by two newlines, ie, an empty line after
a query. This serves nicely to break up the existing examples such as
information_schema.sql and system_views.sql. A limitation is that it's
no longer possible to write such a sequence within a string literal or
multiline comment in a file meant to be read with -j; but there are no
instances of such a problem within the data currently used by initdb.
(If someone does make such a mistake in future, it'll be obvious because
they'll get an unterminated-literal or unterminated-comment syntax error.)
Other than that, there shouldn't be any negative consequences; you're not
forced to end statements that way, it's just a better idea in most cases.
In passing, remove src/include/tcop/tcopdebug.h, which is dead code
because it's not included anywhere, and hasn't been for more than
ten years. One of the debug-support symbols it purported to describe
has been unreferenced for at least the same amount of time, and the
other is removed by this commit on the grounds that it was useless:
forcing -j mode all the time would have broken initdb. The lack of
complaints about that, or about the missing inclusion, shows that
no one has tried to use TCOP_DONTUSENEWLINE in many years.
2015-12-17 19:34:15 -05:00
|
|
|
if (c == '\n')
|
1997-09-07 05:04:48 +00:00
|
|
|
{
|
Adjust behavior of single-user -j mode for better initdb error reporting.
Previously, -j caused the entire input file to be read in and executed as
a single command string. That's undesirable, not least because any error
causes the entire file to be regurgitated as the "failing query". Some
experimentation suggests a better rule: end the command string when we see
a semicolon immediately followed by two newlines, ie, an empty line after
a query. This serves nicely to break up the existing examples such as
information_schema.sql and system_views.sql. A limitation is that it's
no longer possible to write such a sequence within a string literal or
multiline comment in a file meant to be read with -j; but there are no
instances of such a problem within the data currently used by initdb.
(If someone does make such a mistake in future, it'll be obvious because
they'll get an unterminated-literal or unterminated-comment syntax error.)
Other than that, there shouldn't be any negative consequences; you're not
forced to end statements that way, it's just a better idea in most cases.
In passing, remove src/include/tcop/tcopdebug.h, which is dead code
because it's not included anywhere, and hasn't been for more than
ten years. One of the debug-support symbols it purported to describe
has been unreferenced for at least the same amount of time, and the
other is removed by this commit on the grounds that it was useless:
forcing -j mode all the time would have broken initdb. The lack of
complaints about that, or about the missing inclusion, shows that
no one has tried to use TCOP_DONTUSENEWLINE in many years.
2015-12-17 19:34:15 -05:00
|
|
|
if (UseSemiNewlineNewline)
|
1997-09-07 05:04:48 +00:00
|
|
|
{
|
Adjust behavior of single-user -j mode for better initdb error reporting.
Previously, -j caused the entire input file to be read in and executed as
a single command string. That's undesirable, not least because any error
causes the entire file to be regurgitated as the "failing query". Some
experimentation suggests a better rule: end the command string when we see
a semicolon immediately followed by two newlines, ie, an empty line after
a query. This serves nicely to break up the existing examples such as
information_schema.sql and system_views.sql. A limitation is that it's
no longer possible to write such a sequence within a string literal or
multiline comment in a file meant to be read with -j; but there are no
instances of such a problem within the data currently used by initdb.
(If someone does make such a mistake in future, it'll be obvious because
they'll get an unterminated-literal or unterminated-comment syntax error.)
Other than that, there shouldn't be any negative consequences; you're not
forced to end statements that way, it's just a better idea in most cases.
In passing, remove src/include/tcop/tcopdebug.h, which is dead code
because it's not included anywhere, and hasn't been for more than
ten years. One of the debug-support symbols it purported to describe
has been unreferenced for at least the same amount of time, and the
other is removed by this commit on the grounds that it was useless:
forcing -j mode all the time would have broken initdb. The lack of
complaints about that, or about the missing inclusion, shows that
no one has tried to use TCOP_DONTUSENEWLINE in many years.
2015-12-17 19:34:15 -05:00
|
|
|
/*
|
|
|
|
* In -j mode, semicolon followed by two newlines ends the
|
|
|
|
* command; otherwise treat newline as regular character.
|
|
|
|
*/
|
|
|
|
if (inBuf->len > 1 &&
|
|
|
|
inBuf->data[inBuf->len - 1] == '\n' &&
|
|
|
|
inBuf->data[inBuf->len - 2] == ';')
|
|
|
|
{
|
|
|
|
/* might as well drop the second newline */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* In plain mode, newline ends the command unless preceded by
|
|
|
|
* backslash.
|
|
|
|
*/
|
|
|
|
if (inBuf->len > 0 &&
|
|
|
|
inBuf->data[inBuf->len - 1] == '\\')
|
1997-09-07 05:04:48 +00:00
|
|
|
{
|
2007-07-09 01:15:14 +00:00
|
|
|
/* discard backslash from inBuf */
|
|
|
|
inBuf->data[--inBuf->len] = '\0';
|
Adjust behavior of single-user -j mode for better initdb error reporting.
Previously, -j caused the entire input file to be read in and executed as
a single command string. That's undesirable, not least because any error
causes the entire file to be regurgitated as the "failing query". Some
experimentation suggests a better rule: end the command string when we see
a semicolon immediately followed by two newlines, ie, an empty line after
a query. This serves nicely to break up the existing examples such as
information_schema.sql and system_views.sql. A limitation is that it's
no longer possible to write such a sequence within a string literal or
multiline comment in a file meant to be read with -j; but there are no
instances of such a problem within the data currently used by initdb.
(If someone does make such a mistake in future, it'll be obvious because
they'll get an unterminated-literal or unterminated-comment syntax error.)
Other than that, there shouldn't be any negative consequences; you're not
forced to end statements that way, it's just a better idea in most cases.
In passing, remove src/include/tcop/tcopdebug.h, which is dead code
because it's not included anywhere, and hasn't been for more than
ten years. One of the debug-support symbols it purported to describe
has been unreferenced for at least the same amount of time, and the
other is removed by this commit on the grounds that it was useless:
forcing -j mode all the time would have broken initdb. The lack of
complaints about that, or about the missing inclusion, shows that
no one has tried to use TCOP_DONTUSENEWLINE in many years.
2015-12-17 19:34:15 -05:00
|
|
|
/* discard newline too */
|
2007-07-09 01:15:14 +00:00
|
|
|
continue;
|
1997-09-07 05:04:48 +00:00
|
|
|
}
|
|
|
|
else
|
2007-07-09 01:15:14 +00:00
|
|
|
{
|
Adjust behavior of single-user -j mode for better initdb error reporting.
Previously, -j caused the entire input file to be read in and executed as
a single command string. That's undesirable, not least because any error
causes the entire file to be regurgitated as the "failing query". Some
experimentation suggests a better rule: end the command string when we see
a semicolon immediately followed by two newlines, ie, an empty line after
a query. This serves nicely to break up the existing examples such as
information_schema.sql and system_views.sql. A limitation is that it's
no longer possible to write such a sequence within a string literal or
multiline comment in a file meant to be read with -j; but there are no
instances of such a problem within the data currently used by initdb.
(If someone does make such a mistake in future, it'll be obvious because
they'll get an unterminated-literal or unterminated-comment syntax error.)
Other than that, there shouldn't be any negative consequences; you're not
forced to end statements that way, it's just a better idea in most cases.
In passing, remove src/include/tcop/tcopdebug.h, which is dead code
because it's not included anywhere, and hasn't been for more than
ten years. One of the debug-support symbols it purported to describe
has been unreferenced for at least the same amount of time, and the
other is removed by this commit on the grounds that it was useless:
forcing -j mode all the time would have broken initdb. The lack of
complaints about that, or about the missing inclusion, shows that
no one has tried to use TCOP_DONTUSENEWLINE in many years.
2015-12-17 19:34:15 -05:00
|
|
|
/* keep the newline character, but end the command */
|
2007-07-09 01:15:14 +00:00
|
|
|
appendStringInfoChar(inBuf, '\n');
|
|
|
|
break;
|
|
|
|
}
|
1997-09-07 05:04:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Adjust behavior of single-user -j mode for better initdb error reporting.
Previously, -j caused the entire input file to be read in and executed as
a single command string. That's undesirable, not least because any error
causes the entire file to be regurgitated as the "failing query". Some
experimentation suggests a better rule: end the command string when we see
a semicolon immediately followed by two newlines, ie, an empty line after
a query. This serves nicely to break up the existing examples such as
information_schema.sql and system_views.sql. A limitation is that it's
no longer possible to write such a sequence within a string literal or
multiline comment in a file meant to be read with -j; but there are no
instances of such a problem within the data currently used by initdb.
(If someone does make such a mistake in future, it'll be obvious because
they'll get an unterminated-literal or unterminated-comment syntax error.)
Other than that, there shouldn't be any negative consequences; you're not
forced to end statements that way, it's just a better idea in most cases.
In passing, remove src/include/tcop/tcopdebug.h, which is dead code
because it's not included anywhere, and hasn't been for more than
ten years. One of the debug-support symbols it purported to describe
has been unreferenced for at least the same amount of time, and the
other is removed by this commit on the grounds that it was useless:
forcing -j mode all the time would have broken initdb. The lack of
complaints about that, or about the missing inclusion, shows that
no one has tried to use TCOP_DONTUSENEWLINE in many years.
2015-12-17 19:34:15 -05:00
|
|
|
/* Not newline, or newline treated as regular character */
|
|
|
|
appendStringInfoChar(inBuf, (char) c);
|
1997-09-07 05:04:48 +00:00
|
|
|
}
|
|
|
|
|
Adjust behavior of single-user -j mode for better initdb error reporting.
Previously, -j caused the entire input file to be read in and executed as
a single command string. That's undesirable, not least because any error
causes the entire file to be regurgitated as the "failing query". Some
experimentation suggests a better rule: end the command string when we see
a semicolon immediately followed by two newlines, ie, an empty line after
a query. This serves nicely to break up the existing examples such as
information_schema.sql and system_views.sql. A limitation is that it's
no longer possible to write such a sequence within a string literal or
multiline comment in a file meant to be read with -j; but there are no
instances of such a problem within the data currently used by initdb.
(If someone does make such a mistake in future, it'll be obvious because
they'll get an unterminated-literal or unterminated-comment syntax error.)
Other than that, there shouldn't be any negative consequences; you're not
forced to end statements that way, it's just a better idea in most cases.
In passing, remove src/include/tcop/tcopdebug.h, which is dead code
because it's not included anywhere, and hasn't been for more than
ten years. One of the debug-support symbols it purported to describe
has been unreferenced for at least the same amount of time, and the
other is removed by this commit on the grounds that it was useless:
forcing -j mode all the time would have broken initdb. The lack of
complaints about that, or about the missing inclusion, shows that
no one has tried to use TCOP_DONTUSENEWLINE in many years.
2015-12-17 19:34:15 -05:00
|
|
|
/* No input before EOF signal means time to quit. */
|
|
|
|
if (c == EOF && inBuf->len == 0)
|
2007-07-09 01:15:14 +00:00
|
|
|
return EOF;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* otherwise we have a user query so process it.
|
|
|
|
*/
|
|
|
|
|
2003-04-19 00:02:30 +00:00
|
|
|
/* Add '\0' to make it look the same as message case. */
|
|
|
|
appendStringInfoChar(inBuf, (char) '\0');
|
|
|
|
|
2001-03-22 06:16:21 +00:00
|
|
|
/*
|
|
|
|
* if the query echo flag was given, print the query..
|
1997-09-07 05:04:48 +00:00
|
|
|
*/
|
|
|
|
if (EchoQuery)
|
2002-09-01 23:26:06 +00:00
|
|
|
printf("statement: %s\n", inBuf->data);
|
1998-10-02 01:14:14 +00:00
|
|
|
fflush(stdout);
|
1997-09-07 05:04:48 +00:00
|
|
|
|
1998-09-01 03:29:17 +00:00
|
|
|
return 'Q';
|
1996-07-09 06:22:35 +00:00
|
|
|
}
|
|
|
|
|
2007-07-09 01:15:14 +00:00
|
|
|
/*
|
|
|
|
* interactive_getc -- collect one character from stdin
|
|
|
|
*
|
|
|
|
* Even though we are not reading from a "client" process, we still want to
|
Introduce and use infrastructure for interrupt processing during client reads.
Up to now large swathes of backend code ran inside signal handlers
while reading commands from the client, to allow for speedy reaction to
asynchronous events. Most prominently shared invalidation and NOTIFY
handling. That means that complex code like the starting/stopping of
transactions is run in signal handlers... The required code was
fragile and verbose, and is likely to contain bugs.
That approach also severely limited what could be done while
communicating with the client. As the read might be from within
openssl it wasn't safely possible to trigger an error, e.g. to cancel
a backend in idle-in-transaction state. We did that in some cases,
namely fatal errors, nonetheless.
Now that FE/BE communication in the backend employs non-blocking
sockets and latches to block, we can quite simply interrupt reads from
signal handlers by setting the latch. That allows us to signal an
interrupted read, which is supposed to be retried after returning from
within the ssl library.
As signal handlers now only need to set the latch to guarantee timely
interrupt processing, remove a fair amount of complicated & fragile
code from async.c and sinval.c.
We could now actually start to process some kinds of interrupts, like
sinval ones, more often that before, but that seems better done
separately.
This work will hopefully allow to handle cases like being blocked by
sending data, interrupting idle transactions and similar to be
implemented without too much effort. In addition to allowing getting
rid of ImmediateInterruptOK, that is.
Author: Andres Freund
Reviewed-By: Heikki Linnakangas
2015-02-03 22:25:20 +01:00
|
|
|
* respond to signals, particularly SIGTERM/SIGQUIT.
|
2007-07-09 01:15:14 +00:00
|
|
|
*/
|
|
|
|
static int
|
|
|
|
interactive_getc(void)
|
|
|
|
{
|
|
|
|
int c;
|
|
|
|
|
Introduce and use infrastructure for interrupt processing during client reads.
Up to now large swathes of backend code ran inside signal handlers
while reading commands from the client, to allow for speedy reaction to
asynchronous events. Most prominently shared invalidation and NOTIFY
handling. That means that complex code like the starting/stopping of
transactions is run in signal handlers... The required code was
fragile and verbose, and is likely to contain bugs.
That approach also severely limited what could be done while
communicating with the client. As the read might be from within
openssl it wasn't safely possible to trigger an error, e.g. to cancel
a backend in idle-in-transaction state. We did that in some cases,
namely fatal errors, nonetheless.
Now that FE/BE communication in the backend employs non-blocking
sockets and latches to block, we can quite simply interrupt reads from
signal handlers by setting the latch. That allows us to signal an
interrupted read, which is supposed to be retried after returning from
within the ssl library.
As signal handlers now only need to set the latch to guarantee timely
interrupt processing, remove a fair amount of complicated & fragile
code from async.c and sinval.c.
We could now actually start to process some kinds of interrupts, like
sinval ones, more often that before, but that seems better done
separately.
This work will hopefully allow to handle cases like being blocked by
sending data, interrupting idle transactions and similar to be
implemented without too much effort. In addition to allowing getting
rid of ImmediateInterruptOK, that is.
Author: Andres Freund
Reviewed-By: Heikki Linnakangas
2015-02-03 22:25:20 +01:00
|
|
|
/*
|
|
|
|
* This will not process catchup interrupts or notifications while
|
|
|
|
* reading. But those can't really be relevant for a standalone backend
|
|
|
|
* anyway. To properly handle SIGTERM there's a hack in die() that
|
|
|
|
* directly processes interrupts at this stage...
|
|
|
|
*/
|
|
|
|
CHECK_FOR_INTERRUPTS();
|
|
|
|
|
2007-07-09 01:15:14 +00:00
|
|
|
c = getc(stdin);
|
Introduce and use infrastructure for interrupt processing during client reads.
Up to now large swathes of backend code ran inside signal handlers
while reading commands from the client, to allow for speedy reaction to
asynchronous events. Most prominently shared invalidation and NOTIFY
handling. That means that complex code like the starting/stopping of
transactions is run in signal handlers... The required code was
fragile and verbose, and is likely to contain bugs.
That approach also severely limited what could be done while
communicating with the client. As the read might be from within
openssl it wasn't safely possible to trigger an error, e.g. to cancel
a backend in idle-in-transaction state. We did that in some cases,
namely fatal errors, nonetheless.
Now that FE/BE communication in the backend employs non-blocking
sockets and latches to block, we can quite simply interrupt reads from
signal handlers by setting the latch. That allows us to signal an
interrupted read, which is supposed to be retried after returning from
within the ssl library.
As signal handlers now only need to set the latch to guarantee timely
interrupt processing, remove a fair amount of complicated & fragile
code from async.c and sinval.c.
We could now actually start to process some kinds of interrupts, like
sinval ones, more often that before, but that seems better done
separately.
This work will hopefully allow to handle cases like being blocked by
sending data, interrupting idle transactions and similar to be
implemented without too much effort. In addition to allowing getting
rid of ImmediateInterruptOK, that is.
Author: Andres Freund
Reviewed-By: Heikki Linnakangas
2015-02-03 22:25:20 +01:00
|
|
|
|
2015-02-03 22:45:45 +01:00
|
|
|
ProcessClientReadInterrupt(true);
|
Introduce and use infrastructure for interrupt processing during client reads.
Up to now large swathes of backend code ran inside signal handlers
while reading commands from the client, to allow for speedy reaction to
asynchronous events. Most prominently shared invalidation and NOTIFY
handling. That means that complex code like the starting/stopping of
transactions is run in signal handlers... The required code was
fragile and verbose, and is likely to contain bugs.
That approach also severely limited what could be done while
communicating with the client. As the read might be from within
openssl it wasn't safely possible to trigger an error, e.g. to cancel
a backend in idle-in-transaction state. We did that in some cases,
namely fatal errors, nonetheless.
Now that FE/BE communication in the backend employs non-blocking
sockets and latches to block, we can quite simply interrupt reads from
signal handlers by setting the latch. That allows us to signal an
interrupted read, which is supposed to be retried after returning from
within the ssl library.
As signal handlers now only need to set the latch to guarantee timely
interrupt processing, remove a fair amount of complicated & fragile
code from async.c and sinval.c.
We could now actually start to process some kinds of interrupts, like
sinval ones, more often that before, but that seems better done
separately.
This work will hopefully allow to handle cases like being blocked by
sending data, interrupting idle transactions and similar to be
implemented without too much effort. In addition to allowing getting
rid of ImmediateInterruptOK, that is.
Author: Andres Freund
Reviewed-By: Heikki Linnakangas
2015-02-03 22:25:20 +01:00
|
|
|
|
2007-07-09 01:15:14 +00:00
|
|
|
return c;
|
|
|
|
}
|
|
|
|
|
1996-07-09 06:22:35 +00:00
|
|
|
/* ----------------
|
1997-09-07 05:04:48 +00:00
|
|
|
* SocketBackend() Is called for frontend-backend connections
|
1996-07-09 06:22:35 +00:00
|
|
|
*
|
2003-04-19 00:02:30 +00:00
|
|
|
* Returns the message type code, and loads message body data into inBuf.
|
1999-07-22 02:40:07 +00:00
|
|
|
*
|
2000-04-12 17:17:23 +00:00
|
|
|
* EOF is returned if the connection is lost.
|
1996-07-09 06:22:35 +00:00
|
|
|
* ----------------
|
|
|
|
*/
|
1999-07-22 02:40:07 +00:00
|
|
|
static int
|
1999-08-31 04:26:40 +00:00
|
|
|
SocketBackend(StringInfo inBuf)
|
1996-07-09 06:22:35 +00:00
|
|
|
{
|
2001-12-04 19:40:17 +00:00
|
|
|
int qtype;
|
1997-09-07 05:04:48 +00:00
|
|
|
|
2001-03-22 06:16:21 +00:00
|
|
|
/*
|
2003-04-19 00:02:30 +00:00
|
|
|
* Get message type code from the frontend.
|
1997-09-07 05:04:48 +00:00
|
|
|
*/
|
Be more careful to not lose sync in the FE/BE protocol.
If any error occurred while we were in the middle of reading a protocol
message from the client, we could lose sync, and incorrectly try to
interpret a part of another message as a new protocol message. That will
usually lead to an "invalid frontend message" error that terminates the
connection. However, this is a security issue because an attacker might
be able to deliberately cause an error, inject a Query message in what's
supposed to be just user data, and have the server execute it.
We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other
operations that could ereport(ERROR) in the middle of processing a message,
but a query cancel interrupt or statement timeout could nevertheless cause
it to happen. Also, the V2 fastpath and COPY handling were not so careful.
It's very difficult to recover in the V2 COPY protocol, so we will just
terminate the connection on error. In practice, that's what happened
previously anyway, as we lost protocol sync.
To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set
whenever we're in the middle of reading a message. When it's set, we cannot
safely ERROR out and continue running, because we might've read only part
of a message. PqCommReadingMsg acts somewhat similarly to critical sections
in that if an error occurs while it's set, the error handler will force the
connection to be terminated, as if the error was FATAL. It's not
implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted
to PANIC in critical sections, because we want to be able to use
PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes
advantage of that to prevent an OOM error from terminating the connection.
To prevent unnecessary connection terminations, add a holdoff mechanism
similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel
interrupts, but still allow die interrupts. The rules on which interrupts
are processed when are now a bit more complicated, so refactor
ProcessInterrupts() and the calls to it in signal handlers so that the
signal handlers always call it if ImmediateInterruptOK is set, and
ProcessInterrupts() can decide to not do anything if the other conditions
are not met.
Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund.
Backpatch to all supported versions.
Security: CVE-2015-0244
2015-02-02 17:08:45 +02:00
|
|
|
HOLD_CANCEL_INTERRUPTS();
|
|
|
|
pq_startmsgread();
|
2001-12-04 19:40:17 +00:00
|
|
|
qtype = pq_getbyte();
|
1997-09-07 05:04:48 +00:00
|
|
|
|
2003-04-19 00:02:30 +00:00
|
|
|
if (qtype == EOF) /* frontend disconnected */
|
|
|
|
{
|
2012-05-07 18:39:37 +02:00
|
|
|
if (IsTransactionState())
|
|
|
|
ereport(COMMERROR,
|
|
|
|
(errcode(ERRCODE_CONNECTION_FAILURE),
|
|
|
|
errmsg("unexpected EOF on client connection with an open transaction")));
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/*
|
2012-06-10 15:20:04 -04:00
|
|
|
* Can't send DEBUG log messages to client at this point. Since
|
|
|
|
* we're disconnecting right away, we don't need to restore
|
|
|
|
* whereToSendOutput.
|
2012-05-07 18:39:37 +02:00
|
|
|
*/
|
|
|
|
whereToSendOutput = DestNone;
|
|
|
|
ereport(DEBUG1,
|
|
|
|
(errcode(ERRCODE_CONNECTION_DOES_NOT_EXIST),
|
|
|
|
errmsg("unexpected EOF on client connection")));
|
|
|
|
}
|
2003-04-19 00:02:30 +00:00
|
|
|
return qtype;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2005-10-15 02:49:52 +00:00
|
|
|
* Validate message type code before trying to read body; if we have lost
|
|
|
|
* sync, better to say "command unknown" than to run out of memory because
|
|
|
|
* we used garbage as a length word.
|
2003-05-05 00:44:56 +00:00
|
|
|
*
|
2005-11-22 18:17:34 +00:00
|
|
|
* This also gives us a place to set the doing_extended_query_message flag
|
|
|
|
* as soon as possible.
|
2003-04-19 00:02:30 +00:00
|
|
|
*/
|
1999-04-25 03:19:27 +00:00
|
|
|
switch (qtype)
|
1997-09-07 05:04:48 +00:00
|
|
|
{
|
2003-04-19 00:02:30 +00:00
|
|
|
case 'Q': /* simple query */
|
2003-05-05 00:44:56 +00:00
|
|
|
doing_extended_query_message = false;
|
2003-04-19 00:02:30 +00:00
|
|
|
if (PG_PROTOCOL_MAJOR(FrontendProtocol) < 3)
|
|
|
|
{
|
|
|
|
/* old style without length word; convert */
|
|
|
|
if (pq_getstring(inBuf))
|
|
|
|
{
|
2012-05-07 18:39:37 +02:00
|
|
|
if (IsTransactionState())
|
|
|
|
ereport(COMMERROR,
|
|
|
|
(errcode(ERRCODE_CONNECTION_FAILURE),
|
|
|
|
errmsg("unexpected EOF on client connection with an open transaction")));
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Can't send DEBUG log messages to client at this
|
Be more careful to not lose sync in the FE/BE protocol.
If any error occurred while we were in the middle of reading a protocol
message from the client, we could lose sync, and incorrectly try to
interpret a part of another message as a new protocol message. That will
usually lead to an "invalid frontend message" error that terminates the
connection. However, this is a security issue because an attacker might
be able to deliberately cause an error, inject a Query message in what's
supposed to be just user data, and have the server execute it.
We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other
operations that could ereport(ERROR) in the middle of processing a message,
but a query cancel interrupt or statement timeout could nevertheless cause
it to happen. Also, the V2 fastpath and COPY handling were not so careful.
It's very difficult to recover in the V2 COPY protocol, so we will just
terminate the connection on error. In practice, that's what happened
previously anyway, as we lost protocol sync.
To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set
whenever we're in the middle of reading a message. When it's set, we cannot
safely ERROR out and continue running, because we might've read only part
of a message. PqCommReadingMsg acts somewhat similarly to critical sections
in that if an error occurs while it's set, the error handler will force the
connection to be terminated, as if the error was FATAL. It's not
implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted
to PANIC in critical sections, because we want to be able to use
PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes
advantage of that to prevent an OOM error from terminating the connection.
To prevent unnecessary connection terminations, add a holdoff mechanism
similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel
interrupts, but still allow die interrupts. The rules on which interrupts
are processed when are now a bit more complicated, so refactor
ProcessInterrupts() and the calls to it in signal handlers so that the
signal handlers always call it if ImmediateInterruptOK is set, and
ProcessInterrupts() can decide to not do anything if the other conditions
are not met.
Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund.
Backpatch to all supported versions.
Security: CVE-2015-0244
2015-02-02 17:08:45 +02:00
|
|
|
* point. Since we're disconnecting right away, we
|
2012-05-07 18:39:37 +02:00
|
|
|
* don't need to restore whereToSendOutput.
|
|
|
|
*/
|
|
|
|
whereToSendOutput = DestNone;
|
|
|
|
ereport(DEBUG1,
|
|
|
|
(errcode(ERRCODE_CONNECTION_DOES_NOT_EXIST),
|
2012-06-10 15:20:04 -04:00
|
|
|
errmsg("unexpected EOF on client connection")));
|
2012-05-07 18:39:37 +02:00
|
|
|
}
|
2003-04-19 00:02:30 +00:00
|
|
|
return EOF;
|
|
|
|
}
|
|
|
|
}
|
2001-12-04 19:40:17 +00:00
|
|
|
break;
|
|
|
|
|
2003-04-19 00:02:30 +00:00
|
|
|
case 'F': /* fastpath function call */
|
2003-05-05 00:44:56 +00:00
|
|
|
doing_extended_query_message = false;
|
Be more careful to not lose sync in the FE/BE protocol.
If any error occurred while we were in the middle of reading a protocol
message from the client, we could lose sync, and incorrectly try to
interpret a part of another message as a new protocol message. That will
usually lead to an "invalid frontend message" error that terminates the
connection. However, this is a security issue because an attacker might
be able to deliberately cause an error, inject a Query message in what's
supposed to be just user data, and have the server execute it.
We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other
operations that could ereport(ERROR) in the middle of processing a message,
but a query cancel interrupt or statement timeout could nevertheless cause
it to happen. Also, the V2 fastpath and COPY handling were not so careful.
It's very difficult to recover in the V2 COPY protocol, so we will just
terminate the connection on error. In practice, that's what happened
previously anyway, as we lost protocol sync.
To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set
whenever we're in the middle of reading a message. When it's set, we cannot
safely ERROR out and continue running, because we might've read only part
of a message. PqCommReadingMsg acts somewhat similarly to critical sections
in that if an error occurs while it's set, the error handler will force the
connection to be terminated, as if the error was FATAL. It's not
implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted
to PANIC in critical sections, because we want to be able to use
PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes
advantage of that to prevent an OOM error from terminating the connection.
To prevent unnecessary connection terminations, add a holdoff mechanism
similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel
interrupts, but still allow die interrupts. The rules on which interrupts
are processed when are now a bit more complicated, so refactor
ProcessInterrupts() and the calls to it in signal handlers so that the
signal handlers always call it if ImmediateInterruptOK is set, and
ProcessInterrupts() can decide to not do anything if the other conditions
are not met.
Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund.
Backpatch to all supported versions.
Security: CVE-2015-0244
2015-02-02 17:08:45 +02:00
|
|
|
if (PG_PROTOCOL_MAJOR(FrontendProtocol) < 3)
|
|
|
|
{
|
|
|
|
if (GetOldFunctionMessage(inBuf))
|
|
|
|
{
|
|
|
|
if (IsTransactionState())
|
|
|
|
ereport(COMMERROR,
|
|
|
|
(errcode(ERRCODE_CONNECTION_FAILURE),
|
|
|
|
errmsg("unexpected EOF on client connection with an open transaction")));
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Can't send DEBUG log messages to client at this
|
|
|
|
* point. Since we're disconnecting right away, we
|
|
|
|
* don't need to restore whereToSendOutput.
|
|
|
|
*/
|
|
|
|
whereToSendOutput = DestNone;
|
|
|
|
ereport(DEBUG1,
|
|
|
|
(errcode(ERRCODE_CONNECTION_DOES_NOT_EXIST),
|
|
|
|
errmsg("unexpected EOF on client connection")));
|
|
|
|
}
|
|
|
|
return EOF;
|
|
|
|
}
|
|
|
|
}
|
1997-09-08 02:41:22 +00:00
|
|
|
break;
|
1997-09-07 05:04:48 +00:00
|
|
|
|
2003-04-19 00:02:30 +00:00
|
|
|
case 'X': /* terminate */
|
2003-05-05 00:44:56 +00:00
|
|
|
doing_extended_query_message = false;
|
2003-05-14 18:40:37 +00:00
|
|
|
ignore_till_sync = false;
|
2003-05-05 00:44:56 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case 'B': /* bind */
|
|
|
|
case 'C': /* close */
|
|
|
|
case 'D': /* describe */
|
|
|
|
case 'E': /* execute */
|
|
|
|
case 'H': /* flush */
|
|
|
|
case 'P': /* parse */
|
|
|
|
doing_extended_query_message = true;
|
|
|
|
/* these are only legal in protocol 3 */
|
|
|
|
if (PG_PROTOCOL_MAJOR(FrontendProtocol) < 3)
|
2003-07-22 19:00:12 +00:00
|
|
|
ereport(FATAL,
|
|
|
|
(errcode(ERRCODE_PROTOCOL_VIOLATION),
|
2005-10-15 02:49:52 +00:00
|
|
|
errmsg("invalid frontend message type %d", qtype)));
|
2003-05-05 00:44:56 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case 'S': /* sync */
|
|
|
|
/* stop any active skip-till-Sync */
|
|
|
|
ignore_till_sync = false;
|
|
|
|
/* mark not-extended, so that a new error doesn't begin skip */
|
|
|
|
doing_extended_query_message = false;
|
|
|
|
/* only legal in protocol 3 */
|
|
|
|
if (PG_PROTOCOL_MAJOR(FrontendProtocol) < 3)
|
2003-07-22 19:00:12 +00:00
|
|
|
ereport(FATAL,
|
|
|
|
(errcode(ERRCODE_PROTOCOL_VIOLATION),
|
2005-10-15 02:49:52 +00:00
|
|
|
errmsg("invalid frontend message type %d", qtype)));
|
1997-09-08 02:41:22 +00:00
|
|
|
break;
|
1997-09-07 05:04:48 +00:00
|
|
|
|
2003-04-19 00:02:30 +00:00
|
|
|
case 'd': /* copy data */
|
|
|
|
case 'c': /* copy done */
|
|
|
|
case 'f': /* copy fail */
|
2003-05-05 00:44:56 +00:00
|
|
|
doing_extended_query_message = false;
|
|
|
|
/* these are only legal in protocol 3 */
|
|
|
|
if (PG_PROTOCOL_MAJOR(FrontendProtocol) < 3)
|
2003-07-22 19:00:12 +00:00
|
|
|
ereport(FATAL,
|
|
|
|
(errcode(ERRCODE_PROTOCOL_VIOLATION),
|
2005-10-15 02:49:52 +00:00
|
|
|
errmsg("invalid frontend message type %d", qtype)));
|
1997-09-08 02:41:22 +00:00
|
|
|
break;
|
1997-09-07 05:04:48 +00:00
|
|
|
|
2003-04-19 00:02:30 +00:00
|
|
|
default:
|
2003-08-04 00:43:34 +00:00
|
|
|
|
2001-03-22 06:16:21 +00:00
|
|
|
/*
|
2014-05-06 12:12:18 -04:00
|
|
|
* Otherwise we got garbage from the frontend. We treat this as
|
2005-10-15 02:49:52 +00:00
|
|
|
* fatal because we have probably lost message boundary sync, and
|
|
|
|
* there's no good way to recover.
|
1997-09-08 02:41:22 +00:00
|
|
|
*/
|
2003-07-22 19:00:12 +00:00
|
|
|
ereport(FATAL,
|
|
|
|
(errcode(ERRCODE_PROTOCOL_VIOLATION),
|
|
|
|
errmsg("invalid frontend message type %d", qtype)));
|
1997-09-08 02:41:22 +00:00
|
|
|
break;
|
1997-09-07 05:04:48 +00:00
|
|
|
}
|
2001-12-04 19:40:17 +00:00
|
|
|
|
2003-04-19 00:02:30 +00:00
|
|
|
/*
|
2005-10-15 02:49:52 +00:00
|
|
|
* In protocol version 3, all frontend messages have a length word next
|
|
|
|
* after the type code; we can read the message contents independently of
|
|
|
|
* the type.
|
2003-04-19 00:02:30 +00:00
|
|
|
*/
|
|
|
|
if (PG_PROTOCOL_MAJOR(FrontendProtocol) >= 3)
|
|
|
|
{
|
|
|
|
if (pq_getmessage(inBuf, 0))
|
|
|
|
return EOF; /* suitable message already logged */
|
|
|
|
}
|
Be more careful to not lose sync in the FE/BE protocol.
If any error occurred while we were in the middle of reading a protocol
message from the client, we could lose sync, and incorrectly try to
interpret a part of another message as a new protocol message. That will
usually lead to an "invalid frontend message" error that terminates the
connection. However, this is a security issue because an attacker might
be able to deliberately cause an error, inject a Query message in what's
supposed to be just user data, and have the server execute it.
We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other
operations that could ereport(ERROR) in the middle of processing a message,
but a query cancel interrupt or statement timeout could nevertheless cause
it to happen. Also, the V2 fastpath and COPY handling were not so careful.
It's very difficult to recover in the V2 COPY protocol, so we will just
terminate the connection on error. In practice, that's what happened
previously anyway, as we lost protocol sync.
To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set
whenever we're in the middle of reading a message. When it's set, we cannot
safely ERROR out and continue running, because we might've read only part
of a message. PqCommReadingMsg acts somewhat similarly to critical sections
in that if an error occurs while it's set, the error handler will force the
connection to be terminated, as if the error was FATAL. It's not
implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted
to PANIC in critical sections, because we want to be able to use
PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes
advantage of that to prevent an OOM error from terminating the connection.
To prevent unnecessary connection terminations, add a holdoff mechanism
similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel
interrupts, but still allow die interrupts. The rules on which interrupts
are processed when are now a bit more complicated, so refactor
ProcessInterrupts() and the calls to it in signal handlers so that the
signal handlers always call it if ImmediateInterruptOK is set, and
ProcessInterrupts() can decide to not do anything if the other conditions
are not met.
Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund.
Backpatch to all supported versions.
Security: CVE-2015-0244
2015-02-02 17:08:45 +02:00
|
|
|
else
|
|
|
|
pq_endmsgread();
|
|
|
|
RESUME_CANCEL_INTERRUPTS();
|
2003-04-19 00:02:30 +00:00
|
|
|
|
2001-12-04 19:40:17 +00:00
|
|
|
return qtype;
|
1996-07-09 06:22:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* ----------------
|
1997-09-07 05:04:48 +00:00
|
|
|
* ReadCommand reads a command from either the frontend or
|
2003-05-02 20:54:36 +00:00
|
|
|
* standard input, places it in inBuf, and returns the
|
|
|
|
* message type code (first byte of the message).
|
|
|
|
* EOF is returned if end of file.
|
1996-07-09 06:22:35 +00:00
|
|
|
* ----------------
|
|
|
|
*/
|
1999-07-22 02:40:07 +00:00
|
|
|
static int
|
1999-08-31 04:26:40 +00:00
|
|
|
ReadCommand(StringInfo inBuf)
|
1996-07-09 06:22:35 +00:00
|
|
|
{
|
2000-04-12 17:17:23 +00:00
|
|
|
int result;
|
1999-08-31 04:26:40 +00:00
|
|
|
|
2005-11-03 17:11:40 +00:00
|
|
|
if (whereToSendOutput == DestRemote)
|
1999-08-31 04:26:40 +00:00
|
|
|
result = SocketBackend(inBuf);
|
1997-09-07 05:04:48 +00:00
|
|
|
else
|
1999-08-31 04:26:40 +00:00
|
|
|
result = InteractiveBackend(inBuf);
|
|
|
|
return result;
|
1996-07-09 06:22:35 +00:00
|
|
|
}
|
|
|
|
|
2005-06-02 21:03:25 +00:00
|
|
|
/*
|
Introduce and use infrastructure for interrupt processing during client reads.
Up to now large swathes of backend code ran inside signal handlers
while reading commands from the client, to allow for speedy reaction to
asynchronous events. Most prominently shared invalidation and NOTIFY
handling. That means that complex code like the starting/stopping of
transactions is run in signal handlers... The required code was
fragile and verbose, and is likely to contain bugs.
That approach also severely limited what could be done while
communicating with the client. As the read might be from within
openssl it wasn't safely possible to trigger an error, e.g. to cancel
a backend in idle-in-transaction state. We did that in some cases,
namely fatal errors, nonetheless.
Now that FE/BE communication in the backend employs non-blocking
sockets and latches to block, we can quite simply interrupt reads from
signal handlers by setting the latch. That allows us to signal an
interrupted read, which is supposed to be retried after returning from
within the ssl library.
As signal handlers now only need to set the latch to guarantee timely
interrupt processing, remove a fair amount of complicated & fragile
code from async.c and sinval.c.
We could now actually start to process some kinds of interrupts, like
sinval ones, more often that before, but that seems better done
separately.
This work will hopefully allow to handle cases like being blocked by
sending data, interrupting idle transactions and similar to be
implemented without too much effort. In addition to allowing getting
rid of ImmediateInterruptOK, that is.
Author: Andres Freund
Reviewed-By: Heikki Linnakangas
2015-02-03 22:25:20 +01:00
|
|
|
* ProcessClientReadInterrupt() - Process interrupts specific to client reads
|
2005-06-02 21:03:25 +00:00
|
|
|
*
|
Introduce and use infrastructure for interrupt processing during client reads.
Up to now large swathes of backend code ran inside signal handlers
while reading commands from the client, to allow for speedy reaction to
asynchronous events. Most prominently shared invalidation and NOTIFY
handling. That means that complex code like the starting/stopping of
transactions is run in signal handlers... The required code was
fragile and verbose, and is likely to contain bugs.
That approach also severely limited what could be done while
communicating with the client. As the read might be from within
openssl it wasn't safely possible to trigger an error, e.g. to cancel
a backend in idle-in-transaction state. We did that in some cases,
namely fatal errors, nonetheless.
Now that FE/BE communication in the backend employs non-blocking
sockets and latches to block, we can quite simply interrupt reads from
signal handlers by setting the latch. That allows us to signal an
interrupted read, which is supposed to be retried after returning from
within the ssl library.
As signal handlers now only need to set the latch to guarantee timely
interrupt processing, remove a fair amount of complicated & fragile
code from async.c and sinval.c.
We could now actually start to process some kinds of interrupts, like
sinval ones, more often that before, but that seems better done
separately.
This work will hopefully allow to handle cases like being blocked by
sending data, interrupting idle transactions and similar to be
implemented without too much effort. In addition to allowing getting
rid of ImmediateInterruptOK, that is.
Author: Andres Freund
Reviewed-By: Heikki Linnakangas
2015-02-03 22:25:20 +01:00
|
|
|
* This is called just after low-level reads. That might be after the read
|
|
|
|
* finished successfully, or it was interrupted via interrupt.
|
2013-11-24 13:09:38 -05:00
|
|
|
*
|
Introduce and use infrastructure for interrupt processing during client reads.
Up to now large swathes of backend code ran inside signal handlers
while reading commands from the client, to allow for speedy reaction to
asynchronous events. Most prominently shared invalidation and NOTIFY
handling. That means that complex code like the starting/stopping of
transactions is run in signal handlers... The required code was
fragile and verbose, and is likely to contain bugs.
That approach also severely limited what could be done while
communicating with the client. As the read might be from within
openssl it wasn't safely possible to trigger an error, e.g. to cancel
a backend in idle-in-transaction state. We did that in some cases,
namely fatal errors, nonetheless.
Now that FE/BE communication in the backend employs non-blocking
sockets and latches to block, we can quite simply interrupt reads from
signal handlers by setting the latch. That allows us to signal an
interrupted read, which is supposed to be retried after returning from
within the ssl library.
As signal handlers now only need to set the latch to guarantee timely
interrupt processing, remove a fair amount of complicated & fragile
code from async.c and sinval.c.
We could now actually start to process some kinds of interrupts, like
sinval ones, more often that before, but that seems better done
separately.
This work will hopefully allow to handle cases like being blocked by
sending data, interrupting idle transactions and similar to be
implemented without too much effort. In addition to allowing getting
rid of ImmediateInterruptOK, that is.
Author: Andres Freund
Reviewed-By: Heikki Linnakangas
2015-02-03 22:25:20 +01:00
|
|
|
* Must preserve errno!
|
2005-06-02 21:03:25 +00:00
|
|
|
*/
|
|
|
|
void
|
2015-02-03 22:45:45 +01:00
|
|
|
ProcessClientReadInterrupt(bool blocked)
|
2005-06-02 21:03:25 +00:00
|
|
|
{
|
Introduce and use infrastructure for interrupt processing during client reads.
Up to now large swathes of backend code ran inside signal handlers
while reading commands from the client, to allow for speedy reaction to
asynchronous events. Most prominently shared invalidation and NOTIFY
handling. That means that complex code like the starting/stopping of
transactions is run in signal handlers... The required code was
fragile and verbose, and is likely to contain bugs.
That approach also severely limited what could be done while
communicating with the client. As the read might be from within
openssl it wasn't safely possible to trigger an error, e.g. to cancel
a backend in idle-in-transaction state. We did that in some cases,
namely fatal errors, nonetheless.
Now that FE/BE communication in the backend employs non-blocking
sockets and latches to block, we can quite simply interrupt reads from
signal handlers by setting the latch. That allows us to signal an
interrupted read, which is supposed to be retried after returning from
within the ssl library.
As signal handlers now only need to set the latch to guarantee timely
interrupt processing, remove a fair amount of complicated & fragile
code from async.c and sinval.c.
We could now actually start to process some kinds of interrupts, like
sinval ones, more often that before, but that seems better done
separately.
This work will hopefully allow to handle cases like being blocked by
sending data, interrupting idle transactions and similar to be
implemented without too much effort. In addition to allowing getting
rid of ImmediateInterruptOK, that is.
Author: Andres Freund
Reviewed-By: Heikki Linnakangas
2015-02-03 22:25:20 +01:00
|
|
|
int save_errno = errno;
|
|
|
|
|
2005-06-02 21:03:25 +00:00
|
|
|
if (DoingCommandRead)
|
|
|
|
{
|
Introduce and use infrastructure for interrupt processing during client reads.
Up to now large swathes of backend code ran inside signal handlers
while reading commands from the client, to allow for speedy reaction to
asynchronous events. Most prominently shared invalidation and NOTIFY
handling. That means that complex code like the starting/stopping of
transactions is run in signal handlers... The required code was
fragile and verbose, and is likely to contain bugs.
That approach also severely limited what could be done while
communicating with the client. As the read might be from within
openssl it wasn't safely possible to trigger an error, e.g. to cancel
a backend in idle-in-transaction state. We did that in some cases,
namely fatal errors, nonetheless.
Now that FE/BE communication in the backend employs non-blocking
sockets and latches to block, we can quite simply interrupt reads from
signal handlers by setting the latch. That allows us to signal an
interrupted read, which is supposed to be retried after returning from
within the ssl library.
As signal handlers now only need to set the latch to guarantee timely
interrupt processing, remove a fair amount of complicated & fragile
code from async.c and sinval.c.
We could now actually start to process some kinds of interrupts, like
sinval ones, more often that before, but that seems better done
separately.
This work will hopefully allow to handle cases like being blocked by
sending data, interrupting idle transactions and similar to be
implemented without too much effort. In addition to allowing getting
rid of ImmediateInterruptOK, that is.
Author: Andres Freund
Reviewed-By: Heikki Linnakangas
2015-02-03 22:25:20 +01:00
|
|
|
/* Check for general interrupts that arrived while reading */
|
|
|
|
CHECK_FOR_INTERRUPTS();
|
2005-06-02 21:03:25 +00:00
|
|
|
|
Introduce and use infrastructure for interrupt processing during client reads.
Up to now large swathes of backend code ran inside signal handlers
while reading commands from the client, to allow for speedy reaction to
asynchronous events. Most prominently shared invalidation and NOTIFY
handling. That means that complex code like the starting/stopping of
transactions is run in signal handlers... The required code was
fragile and verbose, and is likely to contain bugs.
That approach also severely limited what could be done while
communicating with the client. As the read might be from within
openssl it wasn't safely possible to trigger an error, e.g. to cancel
a backend in idle-in-transaction state. We did that in some cases,
namely fatal errors, nonetheless.
Now that FE/BE communication in the backend employs non-blocking
sockets and latches to block, we can quite simply interrupt reads from
signal handlers by setting the latch. That allows us to signal an
interrupted read, which is supposed to be retried after returning from
within the ssl library.
As signal handlers now only need to set the latch to guarantee timely
interrupt processing, remove a fair amount of complicated & fragile
code from async.c and sinval.c.
We could now actually start to process some kinds of interrupts, like
sinval ones, more often that before, but that seems better done
separately.
This work will hopefully allow to handle cases like being blocked by
sending data, interrupting idle transactions and similar to be
implemented without too much effort. In addition to allowing getting
rid of ImmediateInterruptOK, that is.
Author: Andres Freund
Reviewed-By: Heikki Linnakangas
2015-02-03 22:25:20 +01:00
|
|
|
/* Process sinval catchup interrupts that happened while reading */
|
|
|
|
if (catchupInterruptPending)
|
|
|
|
ProcessCatchupInterrupt();
|
2013-11-24 13:09:38 -05:00
|
|
|
|
Introduce and use infrastructure for interrupt processing during client reads.
Up to now large swathes of backend code ran inside signal handlers
while reading commands from the client, to allow for speedy reaction to
asynchronous events. Most prominently shared invalidation and NOTIFY
handling. That means that complex code like the starting/stopping of
transactions is run in signal handlers... The required code was
fragile and verbose, and is likely to contain bugs.
That approach also severely limited what could be done while
communicating with the client. As the read might be from within
openssl it wasn't safely possible to trigger an error, e.g. to cancel
a backend in idle-in-transaction state. We did that in some cases,
namely fatal errors, nonetheless.
Now that FE/BE communication in the backend employs non-blocking
sockets and latches to block, we can quite simply interrupt reads from
signal handlers by setting the latch. That allows us to signal an
interrupted read, which is supposed to be retried after returning from
within the ssl library.
As signal handlers now only need to set the latch to guarantee timely
interrupt processing, remove a fair amount of complicated & fragile
code from async.c and sinval.c.
We could now actually start to process some kinds of interrupts, like
sinval ones, more often that before, but that seems better done
separately.
This work will hopefully allow to handle cases like being blocked by
sending data, interrupting idle transactions and similar to be
implemented without too much effort. In addition to allowing getting
rid of ImmediateInterruptOK, that is.
Author: Andres Freund
Reviewed-By: Heikki Linnakangas
2015-02-03 22:25:20 +01:00
|
|
|
/* Process sinval catchup interrupts that happened while reading */
|
|
|
|
if (notifyInterruptPending)
|
|
|
|
ProcessNotifyInterrupt();
|
2005-06-02 21:03:25 +00:00
|
|
|
}
|
2015-02-03 22:45:45 +01:00
|
|
|
else if (ProcDiePending && blocked)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* We're dying. It's safe (and sane) to handle that now.
|
|
|
|
*/
|
|
|
|
CHECK_FOR_INTERRUPTS();
|
|
|
|
}
|
Introduce and use infrastructure for interrupt processing during client reads.
Up to now large swathes of backend code ran inside signal handlers
while reading commands from the client, to allow for speedy reaction to
asynchronous events. Most prominently shared invalidation and NOTIFY
handling. That means that complex code like the starting/stopping of
transactions is run in signal handlers... The required code was
fragile and verbose, and is likely to contain bugs.
That approach also severely limited what could be done while
communicating with the client. As the read might be from within
openssl it wasn't safely possible to trigger an error, e.g. to cancel
a backend in idle-in-transaction state. We did that in some cases,
namely fatal errors, nonetheless.
Now that FE/BE communication in the backend employs non-blocking
sockets and latches to block, we can quite simply interrupt reads from
signal handlers by setting the latch. That allows us to signal an
interrupted read, which is supposed to be retried after returning from
within the ssl library.
As signal handlers now only need to set the latch to guarantee timely
interrupt processing, remove a fair amount of complicated & fragile
code from async.c and sinval.c.
We could now actually start to process some kinds of interrupts, like
sinval ones, more often that before, but that seems better done
separately.
This work will hopefully allow to handle cases like being blocked by
sending data, interrupting idle transactions and similar to be
implemented without too much effort. In addition to allowing getting
rid of ImmediateInterruptOK, that is.
Author: Andres Freund
Reviewed-By: Heikki Linnakangas
2015-02-03 22:25:20 +01:00
|
|
|
|
|
|
|
errno = save_errno;
|
2005-06-02 21:03:25 +00:00
|
|
|
}
|
|
|
|
|
2015-02-03 22:45:45 +01:00
|
|
|
/*
|
|
|
|
* ProcessClientWriteInterrupt() - Process interrupts specific to client writes
|
|
|
|
*
|
|
|
|
* This is called just after low-level writes. That might be after the read
|
|
|
|
* finished successfully, or it was interrupted via interrupt. 'blocked' tells
|
|
|
|
* us whether the
|
|
|
|
*
|
|
|
|
* Must preserve errno!
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
ProcessClientWriteInterrupt(bool blocked)
|
|
|
|
{
|
|
|
|
int save_errno = errno;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We only want to process the interrupt here if socket writes are
|
2015-05-23 21:35:49 -04:00
|
|
|
* blocking to increase the chance to get an error message to the client.
|
|
|
|
* If we're not blocked there'll soon be a CHECK_FOR_INTERRUPTS(). But if
|
|
|
|
* we're blocked we'll never get out of that situation if the client has
|
|
|
|
* died.
|
2015-02-03 22:45:45 +01:00
|
|
|
*/
|
|
|
|
if (ProcDiePending && blocked)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* We're dying. It's safe (and sane) to handle that now. But we don't
|
|
|
|
* want to send the client the error message as that a) would possibly
|
|
|
|
* block again b) would possibly lead to sending an error message to
|
|
|
|
* the client, while we already started to send something else.
|
|
|
|
*/
|
|
|
|
if (whereToSendOutput == DestRemote)
|
|
|
|
whereToSendOutput = DestNone;
|
|
|
|
|
|
|
|
CHECK_FOR_INTERRUPTS();
|
|
|
|
}
|
|
|
|
|
|
|
|
errno = save_errno;
|
|
|
|
}
|
2000-04-04 21:44:40 +00:00
|
|
|
|
2000-10-07 00:58:23 +00:00
|
|
|
/*
|
|
|
|
* Do raw parsing (only).
|
|
|
|
*
|
Change representation of statement lists, and add statement location info.
This patch makes several changes that improve the consistency of
representation of lists of statements. It's always been the case
that the output of parse analysis is a list of Query nodes, whatever
the types of the individual statements in the list. This patch brings
similar consistency to the outputs of raw parsing and planning steps:
* The output of raw parsing is now always a list of RawStmt nodes;
the statement-type-dependent nodes are one level down from that.
* The output of pg_plan_queries() is now always a list of PlannedStmt
nodes, even for utility statements. In the case of a utility statement,
"planning" just consists of wrapping a CMD_UTILITY PlannedStmt around
the utility node. This list representation is now used in Portal and
CachedPlan plan lists, replacing the former convention of intermixing
PlannedStmts with bare utility-statement nodes.
Now, every list of statements has a consistent head-node type depending
on how far along it is in processing. This allows changing many places
that formerly used generic "Node *" pointers to use a more specific
pointer type, thus reducing the number of IsA() tests and casts needed,
as well as improving code clarity.
Also, the post-parse-analysis representation of DECLARE CURSOR is changed
so that it looks more like EXPLAIN, PREPARE, etc. That is, the contained
SELECT remains a child of the DeclareCursorStmt rather than getting flipped
around to be the other way. It's now true for both Query and PlannedStmt
that utilityStmt is non-null if and only if commandType is CMD_UTILITY.
That allows simplifying a lot of places that were testing both fields.
(I think some of those were just defensive programming, but in many places,
it was actually necessary to avoid confusing DECLARE CURSOR with SELECT.)
Because PlannedStmt carries a canSetTag field, we're also able to get rid
of some ad-hoc rules about how to reconstruct canSetTag for a bare utility
statement; specifically, the assumption that a utility is canSetTag if and
only if it's the only one in its list. While I see no near-term need for
relaxing that restriction, it's nice to get rid of the ad-hocery.
The API of ProcessUtility() is changed so that what it's passed is the
wrapper PlannedStmt not just the bare utility statement. This will affect
all users of ProcessUtility_hook, but the changes are pretty trivial; see
the affected contrib modules for examples of the minimum change needed.
(Most compilers should give pointer-type-mismatch warnings for uncorrected
code.)
There's also a change in the API of ExplainOneQuery_hook, to pass through
cursorOptions instead of expecting hook functions to know what to pick.
This is needed because of the DECLARE CURSOR changes, but really should
have been done in 9.6; it's unlikely that any extant hook functions
know about using CURSOR_OPT_PARALLEL_OK.
Finally, teach gram.y to save statement boundary locations in RawStmt
nodes, and pass those through to Query and PlannedStmt nodes. This allows
more intelligent handling of cases where a source query string contains
multiple statements. This patch doesn't actually do anything with the
information, but a follow-on patch will. (Passing this information through
cleanly is the true motivation for these changes; while I think this is all
good cleanup, it's unlikely we'd have bothered without this end goal.)
catversion bump because addition of location fields to struct Query
affects stored rules.
This patch is by me, but it owes a good deal to Fabien Coelho who did
a lot of preliminary work on the problem, and also reviewed the patch.
Discussion: https://postgr.es/m/alpine.DEB.2.20.1612200926310.29821@lancre
2017-01-14 16:02:35 -05:00
|
|
|
* A list of parsetrees (RawStmt nodes) is returned, since there might be
|
|
|
|
* multiple commands in the given string.
|
2000-10-07 00:58:23 +00:00
|
|
|
*
|
|
|
|
* NOTE: for interactive queries, it is important to keep this routine
|
|
|
|
* separate from the analysis & rewrite stages. Analysis and rewriting
|
|
|
|
* cannot be done in an aborted transaction, since they require access to
|
|
|
|
* database tables. So, we rely on the raw parser to determine whether
|
|
|
|
* we've seen a COMMIT or ABORT command; when we are in abort state, other
|
|
|
|
* commands are not processed any further than the raw parse stage.
|
|
|
|
*/
|
2002-10-14 23:49:20 +00:00
|
|
|
List *
|
2003-04-29 22:13:11 +00:00
|
|
|
pg_parse_query(const char *query_string)
|
2000-10-07 00:58:23 +00:00
|
|
|
{
|
2004-05-26 04:41:50 +00:00
|
|
|
List *raw_parsetree_list;
|
1999-05-09 23:31:47 +00:00
|
|
|
|
2008-08-01 13:16:09 +00:00
|
|
|
TRACE_POSTGRESQL_QUERY_PARSE_START(query_string);
|
|
|
|
|
2002-11-15 00:47:22 +00:00
|
|
|
if (log_parser_stats)
|
2000-10-07 00:58:23 +00:00
|
|
|
ResetUsage();
|
|
|
|
|
2003-04-29 22:13:11 +00:00
|
|
|
raw_parsetree_list = raw_parser(query_string);
|
2000-10-07 00:58:23 +00:00
|
|
|
|
2005-05-24 04:18:04 +00:00
|
|
|
if (log_parser_stats)
|
|
|
|
ShowUsage("PARSER STATISTICS");
|
|
|
|
|
2007-02-17 19:33:32 +00:00
|
|
|
#ifdef COPY_PARSE_PLAN_TREES
|
|
|
|
/* Optional debugging check: pass raw parsetrees through copyObject() */
|
|
|
|
{
|
2017-03-09 15:18:59 -05:00
|
|
|
List *new_list = copyObject(raw_parsetree_list);
|
2007-02-17 19:33:32 +00:00
|
|
|
|
|
|
|
/* This checks both copyObject() and the equal() routines... */
|
|
|
|
if (!equal(new_list, raw_parsetree_list))
|
|
|
|
elog(WARNING, "copyObject() failed to produce an equal raw parse tree");
|
|
|
|
else
|
|
|
|
raw_parsetree_list = new_list;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2008-08-01 13:16:09 +00:00
|
|
|
TRACE_POSTGRESQL_QUERY_PARSE_DONE(query_string);
|
|
|
|
|
2005-05-24 04:18:04 +00:00
|
|
|
return raw_parsetree_list;
|
|
|
|
}
|
|
|
|
|
2000-10-07 00:58:23 +00:00
|
|
|
/*
|
2003-04-29 22:13:11 +00:00
|
|
|
* Given a raw parsetree (gram.y output), and optionally information about
|
|
|
|
* types of parameter symbols ($n), perform parse analysis and rule rewriting.
|
2000-10-07 00:58:23 +00:00
|
|
|
*
|
|
|
|
* A list of Query nodes is returned, since either the analyzer or the
|
|
|
|
* rewriter might expand one query to several.
|
|
|
|
*
|
|
|
|
* NOTE: for reasons mentioned above, this must be separate from raw parsing.
|
|
|
|
*/
|
2002-10-14 23:49:20 +00:00
|
|
|
List *
|
Change representation of statement lists, and add statement location info.
This patch makes several changes that improve the consistency of
representation of lists of statements. It's always been the case
that the output of parse analysis is a list of Query nodes, whatever
the types of the individual statements in the list. This patch brings
similar consistency to the outputs of raw parsing and planning steps:
* The output of raw parsing is now always a list of RawStmt nodes;
the statement-type-dependent nodes are one level down from that.
* The output of pg_plan_queries() is now always a list of PlannedStmt
nodes, even for utility statements. In the case of a utility statement,
"planning" just consists of wrapping a CMD_UTILITY PlannedStmt around
the utility node. This list representation is now used in Portal and
CachedPlan plan lists, replacing the former convention of intermixing
PlannedStmts with bare utility-statement nodes.
Now, every list of statements has a consistent head-node type depending
on how far along it is in processing. This allows changing many places
that formerly used generic "Node *" pointers to use a more specific
pointer type, thus reducing the number of IsA() tests and casts needed,
as well as improving code clarity.
Also, the post-parse-analysis representation of DECLARE CURSOR is changed
so that it looks more like EXPLAIN, PREPARE, etc. That is, the contained
SELECT remains a child of the DeclareCursorStmt rather than getting flipped
around to be the other way. It's now true for both Query and PlannedStmt
that utilityStmt is non-null if and only if commandType is CMD_UTILITY.
That allows simplifying a lot of places that were testing both fields.
(I think some of those were just defensive programming, but in many places,
it was actually necessary to avoid confusing DECLARE CURSOR with SELECT.)
Because PlannedStmt carries a canSetTag field, we're also able to get rid
of some ad-hoc rules about how to reconstruct canSetTag for a bare utility
statement; specifically, the assumption that a utility is canSetTag if and
only if it's the only one in its list. While I see no near-term need for
relaxing that restriction, it's nice to get rid of the ad-hocery.
The API of ProcessUtility() is changed so that what it's passed is the
wrapper PlannedStmt not just the bare utility statement. This will affect
all users of ProcessUtility_hook, but the changes are pretty trivial; see
the affected contrib modules for examples of the minimum change needed.
(Most compilers should give pointer-type-mismatch warnings for uncorrected
code.)
There's also a change in the API of ExplainOneQuery_hook, to pass through
cursorOptions instead of expecting hook functions to know what to pick.
This is needed because of the DECLARE CURSOR changes, but really should
have been done in 9.6; it's unlikely that any extant hook functions
know about using CURSOR_OPT_PARALLEL_OK.
Finally, teach gram.y to save statement boundary locations in RawStmt
nodes, and pass those through to Query and PlannedStmt nodes. This allows
more intelligent handling of cases where a source query string contains
multiple statements. This patch doesn't actually do anything with the
information, but a follow-on patch will. (Passing this information through
cleanly is the true motivation for these changes; while I think this is all
good cleanup, it's unlikely we'd have bothered without this end goal.)
catversion bump because addition of location fields to struct Query
affects stored rules.
This patch is by me, but it owes a good deal to Fabien Coelho who did
a lot of preliminary work on the problem, and also reviewed the patch.
Discussion: https://postgr.es/m/alpine.DEB.2.20.1612200926310.29821@lancre
2017-01-14 16:02:35 -05:00
|
|
|
pg_analyze_and_rewrite(RawStmt *parsetree, const char *query_string,
|
2017-03-31 23:17:18 -05:00
|
|
|
Oid *paramTypes, int numParams,
|
|
|
|
QueryEnvironment *queryEnv)
|
2000-10-07 00:58:23 +00:00
|
|
|
{
|
2007-06-23 22:12:52 +00:00
|
|
|
Query *query;
|
2000-10-07 00:58:23 +00:00
|
|
|
List *querytree_list;
|
|
|
|
|
2008-08-01 13:16:09 +00:00
|
|
|
TRACE_POSTGRESQL_QUERY_REWRITE_START(query_string);
|
|
|
|
|
2001-03-22 06:16:21 +00:00
|
|
|
/*
|
|
|
|
* (1) Perform parse analysis.
|
1997-09-07 05:04:48 +00:00
|
|
|
*/
|
2002-11-15 00:47:22 +00:00
|
|
|
if (log_parser_stats)
|
1997-09-07 05:04:48 +00:00
|
|
|
ResetUsage();
|
|
|
|
|
2017-03-31 23:17:18 -05:00
|
|
|
query = parse_analyze(parsetree, query_string, paramTypes, numParams,
|
|
|
|
queryEnv);
|
1997-09-07 05:04:48 +00:00
|
|
|
|
2002-11-15 00:47:22 +00:00
|
|
|
if (log_parser_stats)
|
2001-11-10 23:51:14 +00:00
|
|
|
ShowUsage("PARSE ANALYSIS STATISTICS");
|
1997-09-07 05:04:48 +00:00
|
|
|
|
2001-03-22 06:16:21 +00:00
|
|
|
/*
|
|
|
|
* (2) Rewrite the queries, as necessary
|
2003-05-05 00:44:56 +00:00
|
|
|
*/
|
2007-06-23 22:12:52 +00:00
|
|
|
querytree_list = pg_rewrite_query(query);
|
2003-05-05 00:44:56 +00:00
|
|
|
|
2008-08-01 13:16:09 +00:00
|
|
|
TRACE_POSTGRESQL_QUERY_REWRITE_DONE(query_string);
|
|
|
|
|
2003-05-05 00:44:56 +00:00
|
|
|
return querytree_list;
|
|
|
|
}
|
|
|
|
|
2009-11-04 22:26:08 +00:00
|
|
|
/*
|
|
|
|
* Do parse analysis and rewriting. This is the same as pg_analyze_and_rewrite
|
|
|
|
* except that external-parameter resolution is determined by parser callback
|
|
|
|
* hooks instead of a fixed list of parameter datatypes.
|
|
|
|
*/
|
|
|
|
List *
|
Change representation of statement lists, and add statement location info.
This patch makes several changes that improve the consistency of
representation of lists of statements. It's always been the case
that the output of parse analysis is a list of Query nodes, whatever
the types of the individual statements in the list. This patch brings
similar consistency to the outputs of raw parsing and planning steps:
* The output of raw parsing is now always a list of RawStmt nodes;
the statement-type-dependent nodes are one level down from that.
* The output of pg_plan_queries() is now always a list of PlannedStmt
nodes, even for utility statements. In the case of a utility statement,
"planning" just consists of wrapping a CMD_UTILITY PlannedStmt around
the utility node. This list representation is now used in Portal and
CachedPlan plan lists, replacing the former convention of intermixing
PlannedStmts with bare utility-statement nodes.
Now, every list of statements has a consistent head-node type depending
on how far along it is in processing. This allows changing many places
that formerly used generic "Node *" pointers to use a more specific
pointer type, thus reducing the number of IsA() tests and casts needed,
as well as improving code clarity.
Also, the post-parse-analysis representation of DECLARE CURSOR is changed
so that it looks more like EXPLAIN, PREPARE, etc. That is, the contained
SELECT remains a child of the DeclareCursorStmt rather than getting flipped
around to be the other way. It's now true for both Query and PlannedStmt
that utilityStmt is non-null if and only if commandType is CMD_UTILITY.
That allows simplifying a lot of places that were testing both fields.
(I think some of those were just defensive programming, but in many places,
it was actually necessary to avoid confusing DECLARE CURSOR with SELECT.)
Because PlannedStmt carries a canSetTag field, we're also able to get rid
of some ad-hoc rules about how to reconstruct canSetTag for a bare utility
statement; specifically, the assumption that a utility is canSetTag if and
only if it's the only one in its list. While I see no near-term need for
relaxing that restriction, it's nice to get rid of the ad-hocery.
The API of ProcessUtility() is changed so that what it's passed is the
wrapper PlannedStmt not just the bare utility statement. This will affect
all users of ProcessUtility_hook, but the changes are pretty trivial; see
the affected contrib modules for examples of the minimum change needed.
(Most compilers should give pointer-type-mismatch warnings for uncorrected
code.)
There's also a change in the API of ExplainOneQuery_hook, to pass through
cursorOptions instead of expecting hook functions to know what to pick.
This is needed because of the DECLARE CURSOR changes, but really should
have been done in 9.6; it's unlikely that any extant hook functions
know about using CURSOR_OPT_PARALLEL_OK.
Finally, teach gram.y to save statement boundary locations in RawStmt
nodes, and pass those through to Query and PlannedStmt nodes. This allows
more intelligent handling of cases where a source query string contains
multiple statements. This patch doesn't actually do anything with the
information, but a follow-on patch will. (Passing this information through
cleanly is the true motivation for these changes; while I think this is all
good cleanup, it's unlikely we'd have bothered without this end goal.)
catversion bump because addition of location fields to struct Query
affects stored rules.
This patch is by me, but it owes a good deal to Fabien Coelho who did
a lot of preliminary work on the problem, and also reviewed the patch.
Discussion: https://postgr.es/m/alpine.DEB.2.20.1612200926310.29821@lancre
2017-01-14 16:02:35 -05:00
|
|
|
pg_analyze_and_rewrite_params(RawStmt *parsetree,
|
2009-11-04 22:26:08 +00:00
|
|
|
const char *query_string,
|
|
|
|
ParserSetupHook parserSetup,
|
2017-03-31 23:17:18 -05:00
|
|
|
void *parserSetupArg,
|
|
|
|
QueryEnvironment *queryEnv)
|
2009-11-04 22:26:08 +00:00
|
|
|
{
|
|
|
|
ParseState *pstate;
|
|
|
|
Query *query;
|
|
|
|
List *querytree_list;
|
|
|
|
|
2010-02-26 02:01:40 +00:00
|
|
|
Assert(query_string != NULL); /* required as of 8.4 */
|
2009-11-04 22:26:08 +00:00
|
|
|
|
|
|
|
TRACE_POSTGRESQL_QUERY_REWRITE_START(query_string);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* (1) Perform parse analysis.
|
|
|
|
*/
|
|
|
|
if (log_parser_stats)
|
|
|
|
ResetUsage();
|
|
|
|
|
|
|
|
pstate = make_parsestate(NULL);
|
|
|
|
pstate->p_sourcetext = query_string;
|
2017-03-31 23:17:18 -05:00
|
|
|
pstate->p_queryEnv = queryEnv;
|
2009-11-04 22:26:08 +00:00
|
|
|
(*parserSetup) (pstate, parserSetupArg);
|
|
|
|
|
Restructure SELECT INTO's parsetree representation into CreateTableAsStmt.
Making this operation look like a utility statement seems generally a good
idea, and particularly so in light of the desire to provide command
triggers for utility statements. The original choice of representing it as
SELECT with an IntoClause appendage had metastasized into rather a lot of
places, unfortunately, so that this patch is a great deal more complicated
than one might at first expect.
In particular, keeping EXPLAIN working for SELECT INTO and CREATE TABLE AS
subcommands required restructuring some EXPLAIN-related APIs. Add-on code
that calls ExplainOnePlan or ExplainOneUtility, or uses
ExplainOneQuery_hook, will need adjustment.
Also, the cases PREPARE ... SELECT INTO and CREATE RULE ... SELECT INTO,
which formerly were accepted though undocumented, are no longer accepted.
The PREPARE case can be replaced with use of CREATE TABLE AS EXECUTE.
The CREATE RULE case doesn't seem to have much real-world use (since the
rule would work only once before failing with "table already exists"),
so we'll not bother with that one.
Both SELECT INTO and CREATE TABLE AS still return a command tag of
"SELECT nnnn". There was some discussion of returning "CREATE TABLE nnnn",
but for the moment backwards compatibility wins the day.
Andres Freund and Tom Lane
2012-03-19 21:37:19 -04:00
|
|
|
query = transformTopLevelStmt(pstate, parsetree);
|
2009-11-04 22:26:08 +00:00
|
|
|
|
2012-03-27 15:14:13 -04:00
|
|
|
if (post_parse_analyze_hook)
|
|
|
|
(*post_parse_analyze_hook) (pstate, query);
|
|
|
|
|
2009-11-04 22:26:08 +00:00
|
|
|
free_parsestate(pstate);
|
|
|
|
|
|
|
|
if (log_parser_stats)
|
|
|
|
ShowUsage("PARSE ANALYSIS STATISTICS");
|
|
|
|
|
|
|
|
/*
|
|
|
|
* (2) Rewrite the queries, as necessary
|
|
|
|
*/
|
|
|
|
querytree_list = pg_rewrite_query(query);
|
|
|
|
|
|
|
|
TRACE_POSTGRESQL_QUERY_REWRITE_DONE(query_string);
|
|
|
|
|
|
|
|
return querytree_list;
|
|
|
|
}
|
|
|
|
|
2003-05-05 00:44:56 +00:00
|
|
|
/*
|
2007-06-23 22:12:52 +00:00
|
|
|
* Perform rewriting of a query produced by parse analysis.
|
2005-06-03 23:05:30 +00:00
|
|
|
*
|
2007-06-23 22:12:52 +00:00
|
|
|
* Note: query must just have come from the parser, because we do not do
|
|
|
|
* AcquireRewriteLocks() on it.
|
2003-05-05 00:44:56 +00:00
|
|
|
*/
|
2005-06-03 23:05:30 +00:00
|
|
|
static List *
|
2007-06-23 22:12:52 +00:00
|
|
|
pg_rewrite_query(Query *query)
|
2003-05-05 00:44:56 +00:00
|
|
|
{
|
2007-06-23 22:12:52 +00:00
|
|
|
List *querytree_list;
|
2003-05-05 00:44:56 +00:00
|
|
|
|
2007-06-23 22:12:52 +00:00
|
|
|
if (Debug_print_parse)
|
2008-08-19 18:30:04 +00:00
|
|
|
elog_node_display(LOG, "parse tree", query,
|
2007-06-23 22:12:52 +00:00
|
|
|
Debug_pretty_print);
|
2000-04-12 17:17:23 +00:00
|
|
|
|
2008-08-19 18:30:04 +00:00
|
|
|
if (log_parser_stats)
|
|
|
|
ResetUsage();
|
|
|
|
|
2007-06-23 22:12:52 +00:00
|
|
|
if (query->commandType == CMD_UTILITY)
|
|
|
|
{
|
|
|
|
/* don't rewrite utilities, just dump 'em into result list */
|
|
|
|
querytree_list = list_make1(query);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* rewrite regular queries */
|
|
|
|
querytree_list = QueryRewrite(query);
|
1997-09-07 05:04:48 +00:00
|
|
|
}
|
|
|
|
|
2002-11-15 00:47:22 +00:00
|
|
|
if (log_parser_stats)
|
2001-11-10 23:51:14 +00:00
|
|
|
ShowUsage("REWRITER STATISTICS");
|
2000-10-07 00:58:23 +00:00
|
|
|
|
2000-06-29 07:35:57 +00:00
|
|
|
#ifdef COPY_PARSE_PLAN_TREES
|
2007-06-23 22:12:52 +00:00
|
|
|
/* Optional debugging check: pass querytree output through copyObject() */
|
|
|
|
{
|
|
|
|
List *new_list;
|
2001-03-22 04:01:46 +00:00
|
|
|
|
2017-03-09 15:18:59 -05:00
|
|
|
new_list = copyObject(querytree_list);
|
2007-06-23 22:12:52 +00:00
|
|
|
/* This checks both copyObject() and the equal() routines... */
|
|
|
|
if (!equal(new_list, querytree_list))
|
|
|
|
elog(WARNING, "copyObject() failed to produce equal parse tree");
|
|
|
|
else
|
|
|
|
querytree_list = new_list;
|
|
|
|
}
|
2000-06-29 07:35:57 +00:00
|
|
|
#endif
|
|
|
|
|
2000-05-31 00:28:42 +00:00
|
|
|
if (Debug_print_rewritten)
|
2008-08-19 18:30:04 +00:00
|
|
|
elog_node_display(LOG, "rewritten parse tree", querytree_list,
|
2002-03-24 04:31:09 +00:00
|
|
|
Debug_pretty_print);
|
1997-09-07 05:04:48 +00:00
|
|
|
|
2000-04-04 21:44:40 +00:00
|
|
|
return querytree_list;
|
|
|
|
}
|
1997-09-07 05:04:48 +00:00
|
|
|
|
|
|
|
|
2007-04-16 01:14:58 +00:00
|
|
|
/*
|
|
|
|
* Generate a plan for a single already-rewritten query.
|
|
|
|
* This is a thin wrapper around planner() and takes the same parameters.
|
|
|
|
*/
|
2007-02-20 17:32:18 +00:00
|
|
|
PlannedStmt *
|
2007-04-16 01:14:58 +00:00
|
|
|
pg_plan_query(Query *querytree, int cursorOptions, ParamListInfo boundParams)
|
2000-04-04 21:44:40 +00:00
|
|
|
{
|
2007-02-20 17:32:18 +00:00
|
|
|
PlannedStmt *plan;
|
1997-09-07 05:04:48 +00:00
|
|
|
|
2000-04-04 21:44:40 +00:00
|
|
|
/* Utility commands have no plans. */
|
|
|
|
if (querytree->commandType == CMD_UTILITY)
|
|
|
|
return NULL;
|
1997-09-07 05:04:48 +00:00
|
|
|
|
2008-12-13 02:00:20 +00:00
|
|
|
/* Planner must have a snapshot in case it calls user-defined functions. */
|
|
|
|
Assert(ActiveSnapshotSet());
|
|
|
|
|
2008-08-01 13:16:09 +00:00
|
|
|
TRACE_POSTGRESQL_QUERY_PLAN_START();
|
|
|
|
|
2002-11-15 00:47:22 +00:00
|
|
|
if (log_planner_stats)
|
2000-04-04 21:44:40 +00:00
|
|
|
ResetUsage();
|
1997-09-07 05:04:48 +00:00
|
|
|
|
2002-09-20 03:45:08 +00:00
|
|
|
/* call the optimizer */
|
2007-04-16 01:14:58 +00:00
|
|
|
plan = planner(querytree, cursorOptions, boundParams);
|
1997-12-11 17:36:58 +00:00
|
|
|
|
2002-11-15 00:47:22 +00:00
|
|
|
if (log_planner_stats)
|
2001-11-10 23:51:14 +00:00
|
|
|
ShowUsage("PLANNER STATISTICS");
|
1997-12-11 17:36:58 +00:00
|
|
|
|
2000-06-29 07:35:57 +00:00
|
|
|
#ifdef COPY_PARSE_PLAN_TREES
|
|
|
|
/* Optional debugging check: pass plan output through copyObject() */
|
|
|
|
{
|
2017-03-09 15:18:59 -05:00
|
|
|
PlannedStmt *new_plan = copyObject(plan);
|
2000-06-29 07:35:57 +00:00
|
|
|
|
2001-03-22 04:01:46 +00:00
|
|
|
/*
|
2005-10-15 02:49:52 +00:00
|
|
|
* equal() currently does not have routines to compare Plan nodes, so
|
|
|
|
* don't try to test equality here. Perhaps fix someday?
|
2000-06-29 07:35:57 +00:00
|
|
|
*/
|
|
|
|
#ifdef NOT_USED
|
|
|
|
/* This checks both copyObject() and the equal() routines... */
|
2001-03-22 04:01:46 +00:00
|
|
|
if (!equal(new_plan, plan))
|
2003-10-02 06:34:04 +00:00
|
|
|
elog(WARNING, "copyObject() failed to produce an equal plan tree");
|
2000-06-29 07:35:57 +00:00
|
|
|
else
|
|
|
|
#endif
|
|
|
|
plan = new_plan;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2001-03-22 06:16:21 +00:00
|
|
|
/*
|
|
|
|
* Print plan if debugging.
|
2000-04-04 21:44:40 +00:00
|
|
|
*/
|
2000-05-31 00:28:42 +00:00
|
|
|
if (Debug_print_plan)
|
2008-08-19 18:30:04 +00:00
|
|
|
elog_node_display(LOG, "plan", plan, Debug_pretty_print);
|
1998-08-24 01:38:11 +00:00
|
|
|
|
2008-08-01 13:16:09 +00:00
|
|
|
TRACE_POSTGRESQL_QUERY_PLAN_DONE();
|
|
|
|
|
2000-04-04 21:44:40 +00:00
|
|
|
return plan;
|
1996-07-09 06:22:35 +00:00
|
|
|
}
|
|
|
|
|
2003-05-02 20:54:36 +00:00
|
|
|
/*
|
|
|
|
* Generate plans for a list of already-rewritten queries.
|
2000-06-28 03:33:33 +00:00
|
|
|
*
|
Change representation of statement lists, and add statement location info.
This patch makes several changes that improve the consistency of
representation of lists of statements. It's always been the case
that the output of parse analysis is a list of Query nodes, whatever
the types of the individual statements in the list. This patch brings
similar consistency to the outputs of raw parsing and planning steps:
* The output of raw parsing is now always a list of RawStmt nodes;
the statement-type-dependent nodes are one level down from that.
* The output of pg_plan_queries() is now always a list of PlannedStmt
nodes, even for utility statements. In the case of a utility statement,
"planning" just consists of wrapping a CMD_UTILITY PlannedStmt around
the utility node. This list representation is now used in Portal and
CachedPlan plan lists, replacing the former convention of intermixing
PlannedStmts with bare utility-statement nodes.
Now, every list of statements has a consistent head-node type depending
on how far along it is in processing. This allows changing many places
that formerly used generic "Node *" pointers to use a more specific
pointer type, thus reducing the number of IsA() tests and casts needed,
as well as improving code clarity.
Also, the post-parse-analysis representation of DECLARE CURSOR is changed
so that it looks more like EXPLAIN, PREPARE, etc. That is, the contained
SELECT remains a child of the DeclareCursorStmt rather than getting flipped
around to be the other way. It's now true for both Query and PlannedStmt
that utilityStmt is non-null if and only if commandType is CMD_UTILITY.
That allows simplifying a lot of places that were testing both fields.
(I think some of those were just defensive programming, but in many places,
it was actually necessary to avoid confusing DECLARE CURSOR with SELECT.)
Because PlannedStmt carries a canSetTag field, we're also able to get rid
of some ad-hoc rules about how to reconstruct canSetTag for a bare utility
statement; specifically, the assumption that a utility is canSetTag if and
only if it's the only one in its list. While I see no near-term need for
relaxing that restriction, it's nice to get rid of the ad-hocery.
The API of ProcessUtility() is changed so that what it's passed is the
wrapper PlannedStmt not just the bare utility statement. This will affect
all users of ProcessUtility_hook, but the changes are pretty trivial; see
the affected contrib modules for examples of the minimum change needed.
(Most compilers should give pointer-type-mismatch warnings for uncorrected
code.)
There's also a change in the API of ExplainOneQuery_hook, to pass through
cursorOptions instead of expecting hook functions to know what to pick.
This is needed because of the DECLARE CURSOR changes, but really should
have been done in 9.6; it's unlikely that any extant hook functions
know about using CURSOR_OPT_PARALLEL_OK.
Finally, teach gram.y to save statement boundary locations in RawStmt
nodes, and pass those through to Query and PlannedStmt nodes. This allows
more intelligent handling of cases where a source query string contains
multiple statements. This patch doesn't actually do anything with the
information, but a follow-on patch will. (Passing this information through
cleanly is the true motivation for these changes; while I think this is all
good cleanup, it's unlikely we'd have bothered without this end goal.)
catversion bump because addition of location fields to struct Query
affects stored rules.
This patch is by me, but it owes a good deal to Fabien Coelho who did
a lot of preliminary work on the problem, and also reviewed the patch.
Discussion: https://postgr.es/m/alpine.DEB.2.20.1612200926310.29821@lancre
2017-01-14 16:02:35 -05:00
|
|
|
* For normal optimizable statements, invoke the planner. For utility
|
|
|
|
* statements, just make a wrapper PlannedStmt node.
|
|
|
|
*
|
|
|
|
* The result is a list of PlannedStmt nodes.
|
2003-05-02 20:54:36 +00:00
|
|
|
*/
|
|
|
|
List *
|
2008-12-13 02:29:22 +00:00
|
|
|
pg_plan_queries(List *querytrees, int cursorOptions, ParamListInfo boundParams)
|
2003-05-02 20:54:36 +00:00
|
|
|
{
|
2008-05-12 20:02:02 +00:00
|
|
|
List *stmt_list = NIL;
|
|
|
|
ListCell *query_list;
|
2003-05-02 20:54:36 +00:00
|
|
|
|
2008-05-12 20:02:02 +00:00
|
|
|
foreach(query_list, querytrees)
|
2003-05-02 20:54:36 +00:00
|
|
|
{
|
Improve castNode notation by introducing list-extraction-specific variants.
This extends the castNode() notation introduced by commit 5bcab1114 to
provide, in one step, extraction of a list cell's pointer and coercion to
a concrete node type. For example, "lfirst_node(Foo, lc)" is the same
as "castNode(Foo, lfirst(lc))". Almost half of the uses of castNode
that have appeared so far include a list extraction call, so this is
pretty widely useful, and it saves a few more keystrokes compared to the
old way.
As with the previous patch, back-patch the addition of these macros to
pg_list.h, so that the notation will be available when back-patching.
Patch by me, after an idea of Andrew Gierth's.
Discussion: https://postgr.es/m/14197.1491841216@sss.pgh.pa.us
2017-04-10 13:51:29 -04:00
|
|
|
Query *query = lfirst_node(Query, query_list);
|
Change representation of statement lists, and add statement location info.
This patch makes several changes that improve the consistency of
representation of lists of statements. It's always been the case
that the output of parse analysis is a list of Query nodes, whatever
the types of the individual statements in the list. This patch brings
similar consistency to the outputs of raw parsing and planning steps:
* The output of raw parsing is now always a list of RawStmt nodes;
the statement-type-dependent nodes are one level down from that.
* The output of pg_plan_queries() is now always a list of PlannedStmt
nodes, even for utility statements. In the case of a utility statement,
"planning" just consists of wrapping a CMD_UTILITY PlannedStmt around
the utility node. This list representation is now used in Portal and
CachedPlan plan lists, replacing the former convention of intermixing
PlannedStmts with bare utility-statement nodes.
Now, every list of statements has a consistent head-node type depending
on how far along it is in processing. This allows changing many places
that formerly used generic "Node *" pointers to use a more specific
pointer type, thus reducing the number of IsA() tests and casts needed,
as well as improving code clarity.
Also, the post-parse-analysis representation of DECLARE CURSOR is changed
so that it looks more like EXPLAIN, PREPARE, etc. That is, the contained
SELECT remains a child of the DeclareCursorStmt rather than getting flipped
around to be the other way. It's now true for both Query and PlannedStmt
that utilityStmt is non-null if and only if commandType is CMD_UTILITY.
That allows simplifying a lot of places that were testing both fields.
(I think some of those were just defensive programming, but in many places,
it was actually necessary to avoid confusing DECLARE CURSOR with SELECT.)
Because PlannedStmt carries a canSetTag field, we're also able to get rid
of some ad-hoc rules about how to reconstruct canSetTag for a bare utility
statement; specifically, the assumption that a utility is canSetTag if and
only if it's the only one in its list. While I see no near-term need for
relaxing that restriction, it's nice to get rid of the ad-hocery.
The API of ProcessUtility() is changed so that what it's passed is the
wrapper PlannedStmt not just the bare utility statement. This will affect
all users of ProcessUtility_hook, but the changes are pretty trivial; see
the affected contrib modules for examples of the minimum change needed.
(Most compilers should give pointer-type-mismatch warnings for uncorrected
code.)
There's also a change in the API of ExplainOneQuery_hook, to pass through
cursorOptions instead of expecting hook functions to know what to pick.
This is needed because of the DECLARE CURSOR changes, but really should
have been done in 9.6; it's unlikely that any extant hook functions
know about using CURSOR_OPT_PARALLEL_OK.
Finally, teach gram.y to save statement boundary locations in RawStmt
nodes, and pass those through to Query and PlannedStmt nodes. This allows
more intelligent handling of cases where a source query string contains
multiple statements. This patch doesn't actually do anything with the
information, but a follow-on patch will. (Passing this information through
cleanly is the true motivation for these changes; while I think this is all
good cleanup, it's unlikely we'd have bothered without this end goal.)
catversion bump because addition of location fields to struct Query
affects stored rules.
This patch is by me, but it owes a good deal to Fabien Coelho who did
a lot of preliminary work on the problem, and also reviewed the patch.
Discussion: https://postgr.es/m/alpine.DEB.2.20.1612200926310.29821@lancre
2017-01-14 16:02:35 -05:00
|
|
|
PlannedStmt *stmt;
|
2003-05-02 20:54:36 +00:00
|
|
|
|
2008-05-12 20:02:02 +00:00
|
|
|
if (query->commandType == CMD_UTILITY)
|
2003-05-02 20:54:36 +00:00
|
|
|
{
|
Change representation of statement lists, and add statement location info.
This patch makes several changes that improve the consistency of
representation of lists of statements. It's always been the case
that the output of parse analysis is a list of Query nodes, whatever
the types of the individual statements in the list. This patch brings
similar consistency to the outputs of raw parsing and planning steps:
* The output of raw parsing is now always a list of RawStmt nodes;
the statement-type-dependent nodes are one level down from that.
* The output of pg_plan_queries() is now always a list of PlannedStmt
nodes, even for utility statements. In the case of a utility statement,
"planning" just consists of wrapping a CMD_UTILITY PlannedStmt around
the utility node. This list representation is now used in Portal and
CachedPlan plan lists, replacing the former convention of intermixing
PlannedStmts with bare utility-statement nodes.
Now, every list of statements has a consistent head-node type depending
on how far along it is in processing. This allows changing many places
that formerly used generic "Node *" pointers to use a more specific
pointer type, thus reducing the number of IsA() tests and casts needed,
as well as improving code clarity.
Also, the post-parse-analysis representation of DECLARE CURSOR is changed
so that it looks more like EXPLAIN, PREPARE, etc. That is, the contained
SELECT remains a child of the DeclareCursorStmt rather than getting flipped
around to be the other way. It's now true for both Query and PlannedStmt
that utilityStmt is non-null if and only if commandType is CMD_UTILITY.
That allows simplifying a lot of places that were testing both fields.
(I think some of those were just defensive programming, but in many places,
it was actually necessary to avoid confusing DECLARE CURSOR with SELECT.)
Because PlannedStmt carries a canSetTag field, we're also able to get rid
of some ad-hoc rules about how to reconstruct canSetTag for a bare utility
statement; specifically, the assumption that a utility is canSetTag if and
only if it's the only one in its list. While I see no near-term need for
relaxing that restriction, it's nice to get rid of the ad-hocery.
The API of ProcessUtility() is changed so that what it's passed is the
wrapper PlannedStmt not just the bare utility statement. This will affect
all users of ProcessUtility_hook, but the changes are pretty trivial; see
the affected contrib modules for examples of the minimum change needed.
(Most compilers should give pointer-type-mismatch warnings for uncorrected
code.)
There's also a change in the API of ExplainOneQuery_hook, to pass through
cursorOptions instead of expecting hook functions to know what to pick.
This is needed because of the DECLARE CURSOR changes, but really should
have been done in 9.6; it's unlikely that any extant hook functions
know about using CURSOR_OPT_PARALLEL_OK.
Finally, teach gram.y to save statement boundary locations in RawStmt
nodes, and pass those through to Query and PlannedStmt nodes. This allows
more intelligent handling of cases where a source query string contains
multiple statements. This patch doesn't actually do anything with the
information, but a follow-on patch will. (Passing this information through
cleanly is the true motivation for these changes; while I think this is all
good cleanup, it's unlikely we'd have bothered without this end goal.)
catversion bump because addition of location fields to struct Query
affects stored rules.
This patch is by me, but it owes a good deal to Fabien Coelho who did
a lot of preliminary work on the problem, and also reviewed the patch.
Discussion: https://postgr.es/m/alpine.DEB.2.20.1612200926310.29821@lancre
2017-01-14 16:02:35 -05:00
|
|
|
/* Utility commands require no planning. */
|
|
|
|
stmt = makeNode(PlannedStmt);
|
|
|
|
stmt->commandType = CMD_UTILITY;
|
|
|
|
stmt->canSetTag = query->canSetTag;
|
|
|
|
stmt->utilityStmt = query->utilityStmt;
|
|
|
|
stmt->stmt_location = query->stmt_location;
|
|
|
|
stmt->stmt_len = query->stmt_len;
|
2008-05-12 20:02:02 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
Change representation of statement lists, and add statement location info.
This patch makes several changes that improve the consistency of
representation of lists of statements. It's always been the case
that the output of parse analysis is a list of Query nodes, whatever
the types of the individual statements in the list. This patch brings
similar consistency to the outputs of raw parsing and planning steps:
* The output of raw parsing is now always a list of RawStmt nodes;
the statement-type-dependent nodes are one level down from that.
* The output of pg_plan_queries() is now always a list of PlannedStmt
nodes, even for utility statements. In the case of a utility statement,
"planning" just consists of wrapping a CMD_UTILITY PlannedStmt around
the utility node. This list representation is now used in Portal and
CachedPlan plan lists, replacing the former convention of intermixing
PlannedStmts with bare utility-statement nodes.
Now, every list of statements has a consistent head-node type depending
on how far along it is in processing. This allows changing many places
that formerly used generic "Node *" pointers to use a more specific
pointer type, thus reducing the number of IsA() tests and casts needed,
as well as improving code clarity.
Also, the post-parse-analysis representation of DECLARE CURSOR is changed
so that it looks more like EXPLAIN, PREPARE, etc. That is, the contained
SELECT remains a child of the DeclareCursorStmt rather than getting flipped
around to be the other way. It's now true for both Query and PlannedStmt
that utilityStmt is non-null if and only if commandType is CMD_UTILITY.
That allows simplifying a lot of places that were testing both fields.
(I think some of those were just defensive programming, but in many places,
it was actually necessary to avoid confusing DECLARE CURSOR with SELECT.)
Because PlannedStmt carries a canSetTag field, we're also able to get rid
of some ad-hoc rules about how to reconstruct canSetTag for a bare utility
statement; specifically, the assumption that a utility is canSetTag if and
only if it's the only one in its list. While I see no near-term need for
relaxing that restriction, it's nice to get rid of the ad-hocery.
The API of ProcessUtility() is changed so that what it's passed is the
wrapper PlannedStmt not just the bare utility statement. This will affect
all users of ProcessUtility_hook, but the changes are pretty trivial; see
the affected contrib modules for examples of the minimum change needed.
(Most compilers should give pointer-type-mismatch warnings for uncorrected
code.)
There's also a change in the API of ExplainOneQuery_hook, to pass through
cursorOptions instead of expecting hook functions to know what to pick.
This is needed because of the DECLARE CURSOR changes, but really should
have been done in 9.6; it's unlikely that any extant hook functions
know about using CURSOR_OPT_PARALLEL_OK.
Finally, teach gram.y to save statement boundary locations in RawStmt
nodes, and pass those through to Query and PlannedStmt nodes. This allows
more intelligent handling of cases where a source query string contains
multiple statements. This patch doesn't actually do anything with the
information, but a follow-on patch will. (Passing this information through
cleanly is the true motivation for these changes; while I think this is all
good cleanup, it's unlikely we'd have bothered without this end goal.)
catversion bump because addition of location fields to struct Query
affects stored rules.
This patch is by me, but it owes a good deal to Fabien Coelho who did
a lot of preliminary work on the problem, and also reviewed the patch.
Discussion: https://postgr.es/m/alpine.DEB.2.20.1612200926310.29821@lancre
2017-01-14 16:02:35 -05:00
|
|
|
stmt = pg_plan_query(query, cursorOptions, boundParams);
|
2003-05-02 20:54:36 +00:00
|
|
|
}
|
|
|
|
|
2008-05-12 20:02:02 +00:00
|
|
|
stmt_list = lappend(stmt_list, stmt);
|
2008-03-12 23:58:27 +00:00
|
|
|
}
|
2008-05-12 20:02:02 +00:00
|
|
|
|
2007-02-20 17:32:18 +00:00
|
|
|
return stmt_list;
|
2003-05-02 20:54:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
2003-05-05 00:44:56 +00:00
|
|
|
* exec_simple_query
|
1996-07-09 06:22:35 +00:00
|
|
|
*
|
2003-05-02 20:54:36 +00:00
|
|
|
* Execute a "simple Query" protocol message.
|
1996-07-09 06:22:35 +00:00
|
|
|
*/
|
2003-04-29 22:13:11 +00:00
|
|
|
static void
|
2003-05-05 00:44:56 +00:00
|
|
|
exec_simple_query(const char *query_string)
|
1996-07-09 06:22:35 +00:00
|
|
|
{
|
2003-08-04 00:43:34 +00:00
|
|
|
CommandDest dest = whereToSendOutput;
|
2000-06-28 03:33:33 +00:00
|
|
|
MemoryContext oldcontext;
|
2004-05-26 04:41:50 +00:00
|
|
|
List *parsetree_list;
|
|
|
|
ListCell *parsetree_item;
|
2003-05-02 20:54:36 +00:00
|
|
|
bool save_log_statement_stats = log_statement_stats;
|
2005-05-24 04:18:04 +00:00
|
|
|
bool was_logged = false;
|
2007-03-22 19:55:04 +00:00
|
|
|
bool isTopLevel;
|
2006-09-07 22:52:01 +00:00
|
|
|
char msec_str[32];
|
2005-10-15 02:49:52 +00:00
|
|
|
|
2008-08-01 13:16:09 +00:00
|
|
|
|
2003-05-02 20:54:36 +00:00
|
|
|
/*
|
|
|
|
* Report query to various monitoring facilities.
|
|
|
|
*/
|
2003-04-27 20:09:44 +00:00
|
|
|
debug_query_string = query_string;
|
2001-04-14 19:11:45 +00:00
|
|
|
|
2012-01-19 14:19:20 +01:00
|
|
|
pgstat_report_activity(STATE_RUNNING, query_string);
|
2003-05-02 20:54:36 +00:00
|
|
|
|
2008-08-01 13:16:09 +00:00
|
|
|
TRACE_POSTGRESQL_QUERY_START(query_string);
|
|
|
|
|
2002-09-01 23:26:06 +00:00
|
|
|
/*
|
2006-06-20 22:52:00 +00:00
|
|
|
* We use save_log_statement_stats so ShowUsage doesn't report incorrect
|
|
|
|
* results because ResetUsage wasn't called.
|
2002-09-01 23:26:06 +00:00
|
|
|
*/
|
2003-05-02 20:54:36 +00:00
|
|
|
if (save_log_statement_stats)
|
|
|
|
ResetUsage();
|
|
|
|
|
2000-07-04 06:11:54 +00:00
|
|
|
/*
|
2014-05-06 12:12:18 -04:00
|
|
|
* Start up a transaction command. All queries generated by the
|
2001-03-22 04:01:46 +00:00
|
|
|
* query_string will be in this same command block, *unless* we find a
|
2005-10-15 02:49:52 +00:00
|
|
|
* BEGIN/COMMIT/ABORT statement; we have to force a new xact command after
|
|
|
|
* one of those, else bad things will happen in xact.c. (Note that this
|
|
|
|
* will normally change current memory context.)
|
2000-10-07 00:58:23 +00:00
|
|
|
*/
|
2006-06-20 22:52:00 +00:00
|
|
|
start_xact_command();
|
2003-05-05 00:44:56 +00:00
|
|
|
|
|
|
|
/*
|
2014-05-06 12:12:18 -04:00
|
|
|
* Zap any pre-existing unnamed statement. (While not strictly necessary,
|
2005-10-15 02:49:52 +00:00
|
|
|
* it seems best to define simple-Query mode as if it used the unnamed
|
|
|
|
* statement and portal; this ensures we recover any storage used by prior
|
|
|
|
* unnamed operations.)
|
2003-05-05 00:44:56 +00:00
|
|
|
*/
|
2007-03-13 00:33:44 +00:00
|
|
|
drop_unnamed_stmt();
|
2000-10-07 00:58:23 +00:00
|
|
|
|
2000-06-28 03:33:33 +00:00
|
|
|
/*
|
|
|
|
* Switch to appropriate context for constructing parsetrees.
|
|
|
|
*/
|
2003-05-02 20:54:36 +00:00
|
|
|
oldcontext = MemoryContextSwitchTo(MessageContext);
|
1997-09-07 05:04:48 +00:00
|
|
|
|
1999-05-25 16:15:34 +00:00
|
|
|
/*
|
2005-10-15 02:49:52 +00:00
|
|
|
* Do basic parsing of the query or queries (this should be safe even if
|
|
|
|
* we are in aborted transaction state!)
|
1999-05-13 07:29:22 +00:00
|
|
|
*/
|
2003-04-29 22:13:11 +00:00
|
|
|
parsetree_list = pg_parse_query(query_string);
|
2000-04-04 21:44:40 +00:00
|
|
|
|
2006-06-20 22:52:00 +00:00
|
|
|
/* Log immediately if dictated by log_statement */
|
2007-02-20 17:32:18 +00:00
|
|
|
if (check_log_statement(parsetree_list))
|
2006-09-07 22:52:01 +00:00
|
|
|
{
|
|
|
|
ereport(LOG,
|
|
|
|
(errmsg("statement: %s", query_string),
|
2007-03-02 23:37:23 +00:00
|
|
|
errhidestmt(true),
|
2006-09-07 22:52:01 +00:00
|
|
|
errdetail_execute(parsetree_list)));
|
|
|
|
was_logged = true;
|
|
|
|
}
|
2005-05-24 04:18:04 +00:00
|
|
|
|
2000-06-28 03:33:33 +00:00
|
|
|
/*
|
2003-05-02 20:54:36 +00:00
|
|
|
* Switch back to transaction context to enter the loop.
|
2000-06-28 03:33:33 +00:00
|
|
|
*/
|
|
|
|
MemoryContextSwitchTo(oldcontext);
|
|
|
|
|
2007-03-22 19:55:04 +00:00
|
|
|
/*
|
2007-11-15 21:14:46 +00:00
|
|
|
* We'll tell PortalRun it's a top-level command iff there's exactly one
|
|
|
|
* raw parsetree. If more than one, it's effectively a transaction block
|
|
|
|
* and we want PreventTransactionChain to reject unsafe commands. (Note:
|
|
|
|
* we're assuming that query rewrite cannot add commands that are
|
2007-03-22 19:55:04 +00:00
|
|
|
* significant to PreventTransactionChain.)
|
|
|
|
*/
|
|
|
|
isTopLevel = (list_length(parsetree_list) == 1);
|
|
|
|
|
2000-06-28 03:33:33 +00:00
|
|
|
/*
|
2003-05-02 20:54:36 +00:00
|
|
|
* Run through the raw parsetree(s) and process each one.
|
2000-06-28 03:33:33 +00:00
|
|
|
*/
|
2000-10-07 00:58:23 +00:00
|
|
|
foreach(parsetree_item, parsetree_list)
|
2000-06-28 03:33:33 +00:00
|
|
|
{
|
Improve castNode notation by introducing list-extraction-specific variants.
This extends the castNode() notation introduced by commit 5bcab1114 to
provide, in one step, extraction of a list cell's pointer and coercion to
a concrete node type. For example, "lfirst_node(Foo, lc)" is the same
as "castNode(Foo, lfirst(lc))". Almost half of the uses of castNode
that have appeared so far include a list extraction call, so this is
pretty widely useful, and it saves a few more keystrokes compared to the
old way.
As with the previous patch, back-patch the addition of these macros to
pg_list.h, so that the notation will be available when back-patching.
Patch by me, after an idea of Andrew Gierth's.
Discussion: https://postgr.es/m/14197.1491841216@sss.pgh.pa.us
2017-04-10 13:51:29 -04:00
|
|
|
RawStmt *parsetree = lfirst_node(RawStmt, parsetree_item);
|
2008-12-13 02:00:20 +00:00
|
|
|
bool snapshot_set = false;
|
2002-02-26 22:47:12 +00:00
|
|
|
const char *commandTag;
|
|
|
|
char completionTag[COMPLETION_TAG_BUFSIZE];
|
2001-03-22 04:01:46 +00:00
|
|
|
List *querytree_list,
|
2003-05-02 20:54:36 +00:00
|
|
|
*plantree_list;
|
|
|
|
Portal portal;
|
2003-05-08 18:16:37 +00:00
|
|
|
DestReceiver *receiver;
|
|
|
|
int16 format;
|
2000-04-04 21:44:40 +00:00
|
|
|
|
2002-02-27 19:36:13 +00:00
|
|
|
/*
|
2005-10-15 02:49:52 +00:00
|
|
|
* Get the command name for use in status display (it also becomes the
|
2014-05-06 12:12:18 -04:00
|
|
|
* default completion tag, down inside PortalRun). Set ps_status and
|
2005-10-15 02:49:52 +00:00
|
|
|
* do any special start-of-SQL-command processing needed by the
|
|
|
|
* destination.
|
2002-02-27 19:36:13 +00:00
|
|
|
*/
|
Change representation of statement lists, and add statement location info.
This patch makes several changes that improve the consistency of
representation of lists of statements. It's always been the case
that the output of parse analysis is a list of Query nodes, whatever
the types of the individual statements in the list. This patch brings
similar consistency to the outputs of raw parsing and planning steps:
* The output of raw parsing is now always a list of RawStmt nodes;
the statement-type-dependent nodes are one level down from that.
* The output of pg_plan_queries() is now always a list of PlannedStmt
nodes, even for utility statements. In the case of a utility statement,
"planning" just consists of wrapping a CMD_UTILITY PlannedStmt around
the utility node. This list representation is now used in Portal and
CachedPlan plan lists, replacing the former convention of intermixing
PlannedStmts with bare utility-statement nodes.
Now, every list of statements has a consistent head-node type depending
on how far along it is in processing. This allows changing many places
that formerly used generic "Node *" pointers to use a more specific
pointer type, thus reducing the number of IsA() tests and casts needed,
as well as improving code clarity.
Also, the post-parse-analysis representation of DECLARE CURSOR is changed
so that it looks more like EXPLAIN, PREPARE, etc. That is, the contained
SELECT remains a child of the DeclareCursorStmt rather than getting flipped
around to be the other way. It's now true for both Query and PlannedStmt
that utilityStmt is non-null if and only if commandType is CMD_UTILITY.
That allows simplifying a lot of places that were testing both fields.
(I think some of those were just defensive programming, but in many places,
it was actually necessary to avoid confusing DECLARE CURSOR with SELECT.)
Because PlannedStmt carries a canSetTag field, we're also able to get rid
of some ad-hoc rules about how to reconstruct canSetTag for a bare utility
statement; specifically, the assumption that a utility is canSetTag if and
only if it's the only one in its list. While I see no near-term need for
relaxing that restriction, it's nice to get rid of the ad-hocery.
The API of ProcessUtility() is changed so that what it's passed is the
wrapper PlannedStmt not just the bare utility statement. This will affect
all users of ProcessUtility_hook, but the changes are pretty trivial; see
the affected contrib modules for examples of the minimum change needed.
(Most compilers should give pointer-type-mismatch warnings for uncorrected
code.)
There's also a change in the API of ExplainOneQuery_hook, to pass through
cursorOptions instead of expecting hook functions to know what to pick.
This is needed because of the DECLARE CURSOR changes, but really should
have been done in 9.6; it's unlikely that any extant hook functions
know about using CURSOR_OPT_PARALLEL_OK.
Finally, teach gram.y to save statement boundary locations in RawStmt
nodes, and pass those through to Query and PlannedStmt nodes. This allows
more intelligent handling of cases where a source query string contains
multiple statements. This patch doesn't actually do anything with the
information, but a follow-on patch will. (Passing this information through
cleanly is the true motivation for these changes; while I think this is all
good cleanup, it's unlikely we'd have bothered without this end goal.)
catversion bump because addition of location fields to struct Query
affects stored rules.
This patch is by me, but it owes a good deal to Fabien Coelho who did
a lot of preliminary work on the problem, and also reviewed the patch.
Discussion: https://postgr.es/m/alpine.DEB.2.20.1612200926310.29821@lancre
2017-01-14 16:02:35 -05:00
|
|
|
commandTag = CreateCommandTag(parsetree->stmt);
|
2002-02-27 19:36:13 +00:00
|
|
|
|
2006-06-27 22:16:44 +00:00
|
|
|
set_ps_display(commandTag, false);
|
2002-02-27 19:36:13 +00:00
|
|
|
|
2003-05-06 20:26:28 +00:00
|
|
|
BeginCommand(commandTag, dest);
|
2002-02-27 19:36:13 +00:00
|
|
|
|
2000-10-07 00:58:23 +00:00
|
|
|
/*
|
2003-05-02 20:54:36 +00:00
|
|
|
* If we are in an aborted transaction, reject all commands except
|
2005-10-15 02:49:52 +00:00
|
|
|
* COMMIT/ABORT. It is important that this test occur before we try
|
|
|
|
* to do parse analysis, rewrite, or planning, since all those phases
|
|
|
|
* try to do database accesses, which may fail in abort state. (It
|
|
|
|
* might be safe to allow some additional utility commands in this
|
|
|
|
* state, but not many...)
|
2000-10-07 00:58:23 +00:00
|
|
|
*/
|
2005-11-10 00:31:34 +00:00
|
|
|
if (IsAbortedTransactionBlockState() &&
|
Change representation of statement lists, and add statement location info.
This patch makes several changes that improve the consistency of
representation of lists of statements. It's always been the case
that the output of parse analysis is a list of Query nodes, whatever
the types of the individual statements in the list. This patch brings
similar consistency to the outputs of raw parsing and planning steps:
* The output of raw parsing is now always a list of RawStmt nodes;
the statement-type-dependent nodes are one level down from that.
* The output of pg_plan_queries() is now always a list of PlannedStmt
nodes, even for utility statements. In the case of a utility statement,
"planning" just consists of wrapping a CMD_UTILITY PlannedStmt around
the utility node. This list representation is now used in Portal and
CachedPlan plan lists, replacing the former convention of intermixing
PlannedStmts with bare utility-statement nodes.
Now, every list of statements has a consistent head-node type depending
on how far along it is in processing. This allows changing many places
that formerly used generic "Node *" pointers to use a more specific
pointer type, thus reducing the number of IsA() tests and casts needed,
as well as improving code clarity.
Also, the post-parse-analysis representation of DECLARE CURSOR is changed
so that it looks more like EXPLAIN, PREPARE, etc. That is, the contained
SELECT remains a child of the DeclareCursorStmt rather than getting flipped
around to be the other way. It's now true for both Query and PlannedStmt
that utilityStmt is non-null if and only if commandType is CMD_UTILITY.
That allows simplifying a lot of places that were testing both fields.
(I think some of those were just defensive programming, but in many places,
it was actually necessary to avoid confusing DECLARE CURSOR with SELECT.)
Because PlannedStmt carries a canSetTag field, we're also able to get rid
of some ad-hoc rules about how to reconstruct canSetTag for a bare utility
statement; specifically, the assumption that a utility is canSetTag if and
only if it's the only one in its list. While I see no near-term need for
relaxing that restriction, it's nice to get rid of the ad-hocery.
The API of ProcessUtility() is changed so that what it's passed is the
wrapper PlannedStmt not just the bare utility statement. This will affect
all users of ProcessUtility_hook, but the changes are pretty trivial; see
the affected contrib modules for examples of the minimum change needed.
(Most compilers should give pointer-type-mismatch warnings for uncorrected
code.)
There's also a change in the API of ExplainOneQuery_hook, to pass through
cursorOptions instead of expecting hook functions to know what to pick.
This is needed because of the DECLARE CURSOR changes, but really should
have been done in 9.6; it's unlikely that any extant hook functions
know about using CURSOR_OPT_PARALLEL_OK.
Finally, teach gram.y to save statement boundary locations in RawStmt
nodes, and pass those through to Query and PlannedStmt nodes. This allows
more intelligent handling of cases where a source query string contains
multiple statements. This patch doesn't actually do anything with the
information, but a follow-on patch will. (Passing this information through
cleanly is the true motivation for these changes; while I think this is all
good cleanup, it's unlikely we'd have bothered without this end goal.)
catversion bump because addition of location fields to struct Query
affects stored rules.
This patch is by me, but it owes a good deal to Fabien Coelho who did
a lot of preliminary work on the problem, and also reviewed the patch.
Discussion: https://postgr.es/m/alpine.DEB.2.20.1612200926310.29821@lancre
2017-01-14 16:02:35 -05:00
|
|
|
!IsTransactionExitStmt(parsetree->stmt))
|
2005-11-10 00:31:34 +00:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_IN_FAILED_SQL_TRANSACTION),
|
|
|
|
errmsg("current transaction is aborted, "
|
2010-02-26 02:01:40 +00:00
|
|
|
"commands ignored until end of transaction block"),
|
2010-01-16 10:05:59 +00:00
|
|
|
errdetail_abort()));
|
1997-09-07 05:04:48 +00:00
|
|
|
|
2000-10-07 00:58:23 +00:00
|
|
|
/* Make sure we are in a transaction command */
|
2003-05-05 00:44:56 +00:00
|
|
|
start_xact_command();
|
2000-04-04 21:44:40 +00:00
|
|
|
|
2000-10-07 00:58:23 +00:00
|
|
|
/* If we got a cancel signal in parsing or prior command, quit */
|
2001-01-14 05:08:17 +00:00
|
|
|
CHECK_FOR_INTERRUPTS();
|
2000-10-07 00:58:23 +00:00
|
|
|
|
2008-12-13 02:00:20 +00:00
|
|
|
/*
|
|
|
|
* Set up a snapshot if parse analysis/planning will need one.
|
|
|
|
*/
|
|
|
|
if (analyze_requires_snapshot(parsetree))
|
|
|
|
{
|
|
|
|
PushActiveSnapshot(GetTransactionSnapshot());
|
|
|
|
snapshot_set = true;
|
|
|
|
}
|
|
|
|
|
2000-10-07 00:58:23 +00:00
|
|
|
/*
|
2003-05-02 20:54:36 +00:00
|
|
|
* OK to analyze, rewrite, and plan this query.
|
2002-02-27 19:36:13 +00:00
|
|
|
*
|
2001-03-22 04:01:46 +00:00
|
|
|
* Switch to appropriate context for constructing querytrees (again,
|
|
|
|
* these must outlive the execution context).
|
2000-10-07 00:58:23 +00:00
|
|
|
*/
|
2003-05-02 20:54:36 +00:00
|
|
|
oldcontext = MemoryContextSwitchTo(MessageContext);
|
2000-04-04 21:44:40 +00:00
|
|
|
|
2006-03-14 22:48:25 +00:00
|
|
|
querytree_list = pg_analyze_and_rewrite(parsetree, query_string,
|
2017-03-31 23:17:18 -05:00
|
|
|
NULL, 0, NULL);
|
1998-12-16 11:53:55 +00:00
|
|
|
|
2015-09-16 15:38:47 -04:00
|
|
|
plantree_list = pg_plan_queries(querytree_list,
|
|
|
|
CURSOR_OPT_PARALLEL_OK, NULL);
|
2008-12-13 02:00:20 +00:00
|
|
|
|
2012-11-26 15:55:43 -05:00
|
|
|
/* Done with the snapshot used for parsing/planning */
|
|
|
|
if (snapshot_set)
|
|
|
|
PopActiveSnapshot();
|
|
|
|
|
2003-05-02 20:54:36 +00:00
|
|
|
/* If we got a cancel signal in analysis or planning, quit */
|
|
|
|
CHECK_FOR_INTERRUPTS();
|
|
|
|
|
2000-10-07 00:58:23 +00:00
|
|
|
/*
|
2003-08-04 00:43:34 +00:00
|
|
|
* Create unnamed portal to run the query or queries in. If there
|
|
|
|
* already is one, silently drop it.
|
2000-10-07 00:58:23 +00:00
|
|
|
*/
|
2003-05-02 20:54:36 +00:00
|
|
|
portal = CreatePortal("", true, true);
|
2006-01-18 06:49:30 +00:00
|
|
|
/* Don't display the portal in pg_cursors */
|
|
|
|
portal->visible = false;
|
2000-10-07 00:58:23 +00:00
|
|
|
|
2008-04-02 18:31:50 +00:00
|
|
|
/*
|
|
|
|
* We don't have to copy anything into the portal, because everything
|
2008-12-13 02:00:20 +00:00
|
|
|
* we are passing here is in MessageContext, which will outlive the
|
2008-04-02 18:31:50 +00:00
|
|
|
* portal anyway.
|
|
|
|
*/
|
2003-05-02 20:54:36 +00:00
|
|
|
PortalDefineQuery(portal,
|
2006-08-08 01:23:15 +00:00
|
|
|
NULL,
|
2003-05-02 20:54:36 +00:00
|
|
|
query_string,
|
|
|
|
commandTag,
|
|
|
|
plantree_list,
|
2007-03-13 00:33:44 +00:00
|
|
|
NULL);
|
2000-10-07 00:58:23 +00:00
|
|
|
|
2003-05-02 20:54:36 +00:00
|
|
|
/*
|
2012-11-26 15:55:43 -05:00
|
|
|
* Start the portal. No parameters here.
|
2003-05-02 20:54:36 +00:00
|
|
|
*/
|
2012-11-26 15:55:43 -05:00
|
|
|
PortalStart(portal, NULL, 0, InvalidSnapshot);
|
2000-10-07 00:58:23 +00:00
|
|
|
|
2003-05-08 18:16:37 +00:00
|
|
|
/*
|
2005-10-15 02:49:52 +00:00
|
|
|
* Select the appropriate output format: text unless we are doing a
|
2014-05-06 12:12:18 -04:00
|
|
|
* FETCH from a binary cursor. (Pretty grotty to have to do this here
|
2005-10-15 02:49:52 +00:00
|
|
|
* --- but it avoids grottiness in other places. Ah, the joys of
|
|
|
|
* backward compatibility...)
|
2003-05-08 18:16:37 +00:00
|
|
|
*/
|
|
|
|
format = 0; /* TEXT is default */
|
Change representation of statement lists, and add statement location info.
This patch makes several changes that improve the consistency of
representation of lists of statements. It's always been the case
that the output of parse analysis is a list of Query nodes, whatever
the types of the individual statements in the list. This patch brings
similar consistency to the outputs of raw parsing and planning steps:
* The output of raw parsing is now always a list of RawStmt nodes;
the statement-type-dependent nodes are one level down from that.
* The output of pg_plan_queries() is now always a list of PlannedStmt
nodes, even for utility statements. In the case of a utility statement,
"planning" just consists of wrapping a CMD_UTILITY PlannedStmt around
the utility node. This list representation is now used in Portal and
CachedPlan plan lists, replacing the former convention of intermixing
PlannedStmts with bare utility-statement nodes.
Now, every list of statements has a consistent head-node type depending
on how far along it is in processing. This allows changing many places
that formerly used generic "Node *" pointers to use a more specific
pointer type, thus reducing the number of IsA() tests and casts needed,
as well as improving code clarity.
Also, the post-parse-analysis representation of DECLARE CURSOR is changed
so that it looks more like EXPLAIN, PREPARE, etc. That is, the contained
SELECT remains a child of the DeclareCursorStmt rather than getting flipped
around to be the other way. It's now true for both Query and PlannedStmt
that utilityStmt is non-null if and only if commandType is CMD_UTILITY.
That allows simplifying a lot of places that were testing both fields.
(I think some of those were just defensive programming, but in many places,
it was actually necessary to avoid confusing DECLARE CURSOR with SELECT.)
Because PlannedStmt carries a canSetTag field, we're also able to get rid
of some ad-hoc rules about how to reconstruct canSetTag for a bare utility
statement; specifically, the assumption that a utility is canSetTag if and
only if it's the only one in its list. While I see no near-term need for
relaxing that restriction, it's nice to get rid of the ad-hocery.
The API of ProcessUtility() is changed so that what it's passed is the
wrapper PlannedStmt not just the bare utility statement. This will affect
all users of ProcessUtility_hook, but the changes are pretty trivial; see
the affected contrib modules for examples of the minimum change needed.
(Most compilers should give pointer-type-mismatch warnings for uncorrected
code.)
There's also a change in the API of ExplainOneQuery_hook, to pass through
cursorOptions instead of expecting hook functions to know what to pick.
This is needed because of the DECLARE CURSOR changes, but really should
have been done in 9.6; it's unlikely that any extant hook functions
know about using CURSOR_OPT_PARALLEL_OK.
Finally, teach gram.y to save statement boundary locations in RawStmt
nodes, and pass those through to Query and PlannedStmt nodes. This allows
more intelligent handling of cases where a source query string contains
multiple statements. This patch doesn't actually do anything with the
information, but a follow-on patch will. (Passing this information through
cleanly is the true motivation for these changes; while I think this is all
good cleanup, it's unlikely we'd have bothered without this end goal.)
catversion bump because addition of location fields to struct Query
affects stored rules.
This patch is by me, but it owes a good deal to Fabien Coelho who did
a lot of preliminary work on the problem, and also reviewed the patch.
Discussion: https://postgr.es/m/alpine.DEB.2.20.1612200926310.29821@lancre
2017-01-14 16:02:35 -05:00
|
|
|
if (IsA(parsetree->stmt, FetchStmt))
|
2003-05-08 18:16:37 +00:00
|
|
|
{
|
Change representation of statement lists, and add statement location info.
This patch makes several changes that improve the consistency of
representation of lists of statements. It's always been the case
that the output of parse analysis is a list of Query nodes, whatever
the types of the individual statements in the list. This patch brings
similar consistency to the outputs of raw parsing and planning steps:
* The output of raw parsing is now always a list of RawStmt nodes;
the statement-type-dependent nodes are one level down from that.
* The output of pg_plan_queries() is now always a list of PlannedStmt
nodes, even for utility statements. In the case of a utility statement,
"planning" just consists of wrapping a CMD_UTILITY PlannedStmt around
the utility node. This list representation is now used in Portal and
CachedPlan plan lists, replacing the former convention of intermixing
PlannedStmts with bare utility-statement nodes.
Now, every list of statements has a consistent head-node type depending
on how far along it is in processing. This allows changing many places
that formerly used generic "Node *" pointers to use a more specific
pointer type, thus reducing the number of IsA() tests and casts needed,
as well as improving code clarity.
Also, the post-parse-analysis representation of DECLARE CURSOR is changed
so that it looks more like EXPLAIN, PREPARE, etc. That is, the contained
SELECT remains a child of the DeclareCursorStmt rather than getting flipped
around to be the other way. It's now true for both Query and PlannedStmt
that utilityStmt is non-null if and only if commandType is CMD_UTILITY.
That allows simplifying a lot of places that were testing both fields.
(I think some of those were just defensive programming, but in many places,
it was actually necessary to avoid confusing DECLARE CURSOR with SELECT.)
Because PlannedStmt carries a canSetTag field, we're also able to get rid
of some ad-hoc rules about how to reconstruct canSetTag for a bare utility
statement; specifically, the assumption that a utility is canSetTag if and
only if it's the only one in its list. While I see no near-term need for
relaxing that restriction, it's nice to get rid of the ad-hocery.
The API of ProcessUtility() is changed so that what it's passed is the
wrapper PlannedStmt not just the bare utility statement. This will affect
all users of ProcessUtility_hook, but the changes are pretty trivial; see
the affected contrib modules for examples of the minimum change needed.
(Most compilers should give pointer-type-mismatch warnings for uncorrected
code.)
There's also a change in the API of ExplainOneQuery_hook, to pass through
cursorOptions instead of expecting hook functions to know what to pick.
This is needed because of the DECLARE CURSOR changes, but really should
have been done in 9.6; it's unlikely that any extant hook functions
know about using CURSOR_OPT_PARALLEL_OK.
Finally, teach gram.y to save statement boundary locations in RawStmt
nodes, and pass those through to Query and PlannedStmt nodes. This allows
more intelligent handling of cases where a source query string contains
multiple statements. This patch doesn't actually do anything with the
information, but a follow-on patch will. (Passing this information through
cleanly is the true motivation for these changes; while I think this is all
good cleanup, it's unlikely we'd have bothered without this end goal.)
catversion bump because addition of location fields to struct Query
affects stored rules.
This patch is by me, but it owes a good deal to Fabien Coelho who did
a lot of preliminary work on the problem, and also reviewed the patch.
Discussion: https://postgr.es/m/alpine.DEB.2.20.1612200926310.29821@lancre
2017-01-14 16:02:35 -05:00
|
|
|
FetchStmt *stmt = (FetchStmt *) parsetree->stmt;
|
2003-05-08 18:16:37 +00:00
|
|
|
|
|
|
|
if (!stmt->ismove)
|
|
|
|
{
|
|
|
|
Portal fportal = GetPortalByName(stmt->portalname);
|
|
|
|
|
|
|
|
if (PortalIsValid(fportal) &&
|
|
|
|
(fportal->cursorOptions & CURSOR_OPT_BINARY))
|
2003-08-04 00:43:34 +00:00
|
|
|
format = 1; /* BINARY */
|
2003-05-08 18:16:37 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
PortalSetResultFormat(portal, 1, &format);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Now we can create the destination receiver object.
|
|
|
|
*/
|
2008-11-30 20:51:25 +00:00
|
|
|
receiver = CreateDestReceiver(dest);
|
|
|
|
if (dest == DestRemote)
|
|
|
|
SetRemoteDestReceiverParams(receiver, portal);
|
2003-05-08 18:16:37 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Switch back to transaction context for execution.
|
|
|
|
*/
|
|
|
|
MemoryContextSwitchTo(oldcontext);
|
|
|
|
|
|
|
|
/*
|
2005-10-15 02:49:52 +00:00
|
|
|
* Run the portal to completion, and then drop it (and the receiver).
|
2003-05-08 18:16:37 +00:00
|
|
|
*/
|
2003-05-05 00:44:56 +00:00
|
|
|
(void) PortalRun(portal,
|
|
|
|
FETCH_ALL,
|
2007-03-22 19:55:04 +00:00
|
|
|
isTopLevel,
|
2017-03-23 13:05:48 -04:00
|
|
|
true,
|
2003-05-06 20:26:28 +00:00
|
|
|
receiver,
|
|
|
|
receiver,
|
2003-05-05 00:44:56 +00:00
|
|
|
completionTag);
|
2000-10-07 00:58:23 +00:00
|
|
|
|
2003-08-06 17:46:46 +00:00
|
|
|
(*receiver->rDestroy) (receiver);
|
2003-05-08 18:16:37 +00:00
|
|
|
|
2003-05-02 20:54:36 +00:00
|
|
|
PortalDrop(portal, false);
|
2002-02-26 22:47:12 +00:00
|
|
|
|
Change representation of statement lists, and add statement location info.
This patch makes several changes that improve the consistency of
representation of lists of statements. It's always been the case
that the output of parse analysis is a list of Query nodes, whatever
the types of the individual statements in the list. This patch brings
similar consistency to the outputs of raw parsing and planning steps:
* The output of raw parsing is now always a list of RawStmt nodes;
the statement-type-dependent nodes are one level down from that.
* The output of pg_plan_queries() is now always a list of PlannedStmt
nodes, even for utility statements. In the case of a utility statement,
"planning" just consists of wrapping a CMD_UTILITY PlannedStmt around
the utility node. This list representation is now used in Portal and
CachedPlan plan lists, replacing the former convention of intermixing
PlannedStmts with bare utility-statement nodes.
Now, every list of statements has a consistent head-node type depending
on how far along it is in processing. This allows changing many places
that formerly used generic "Node *" pointers to use a more specific
pointer type, thus reducing the number of IsA() tests and casts needed,
as well as improving code clarity.
Also, the post-parse-analysis representation of DECLARE CURSOR is changed
so that it looks more like EXPLAIN, PREPARE, etc. That is, the contained
SELECT remains a child of the DeclareCursorStmt rather than getting flipped
around to be the other way. It's now true for both Query and PlannedStmt
that utilityStmt is non-null if and only if commandType is CMD_UTILITY.
That allows simplifying a lot of places that were testing both fields.
(I think some of those were just defensive programming, but in many places,
it was actually necessary to avoid confusing DECLARE CURSOR with SELECT.)
Because PlannedStmt carries a canSetTag field, we're also able to get rid
of some ad-hoc rules about how to reconstruct canSetTag for a bare utility
statement; specifically, the assumption that a utility is canSetTag if and
only if it's the only one in its list. While I see no near-term need for
relaxing that restriction, it's nice to get rid of the ad-hocery.
The API of ProcessUtility() is changed so that what it's passed is the
wrapper PlannedStmt not just the bare utility statement. This will affect
all users of ProcessUtility_hook, but the changes are pretty trivial; see
the affected contrib modules for examples of the minimum change needed.
(Most compilers should give pointer-type-mismatch warnings for uncorrected
code.)
There's also a change in the API of ExplainOneQuery_hook, to pass through
cursorOptions instead of expecting hook functions to know what to pick.
This is needed because of the DECLARE CURSOR changes, but really should
have been done in 9.6; it's unlikely that any extant hook functions
know about using CURSOR_OPT_PARALLEL_OK.
Finally, teach gram.y to save statement boundary locations in RawStmt
nodes, and pass those through to Query and PlannedStmt nodes. This allows
more intelligent handling of cases where a source query string contains
multiple statements. This patch doesn't actually do anything with the
information, but a follow-on patch will. (Passing this information through
cleanly is the true motivation for these changes; while I think this is all
good cleanup, it's unlikely we'd have bothered without this end goal.)
catversion bump because addition of location fields to struct Query
affects stored rules.
This patch is by me, but it owes a good deal to Fabien Coelho who did
a lot of preliminary work on the problem, and also reviewed the patch.
Discussion: https://postgr.es/m/alpine.DEB.2.20.1612200926310.29821@lancre
2017-01-14 16:02:35 -05:00
|
|
|
if (IsA(parsetree->stmt, TransactionStmt))
|
2003-05-02 20:54:36 +00:00
|
|
|
{
|
2003-05-05 00:44:56 +00:00
|
|
|
/*
|
2005-10-15 02:49:52 +00:00
|
|
|
* If this was a transaction control statement, commit it. We will
|
|
|
|
* start a new xact command for the next command (if any).
|
2003-05-05 00:44:56 +00:00
|
|
|
*/
|
2003-05-14 03:26:03 +00:00
|
|
|
finish_xact_command();
|
2003-05-02 20:54:36 +00:00
|
|
|
}
|
2004-05-26 04:41:50 +00:00
|
|
|
else if (lnext(parsetree_item) == NULL)
|
2002-02-27 23:16:07 +00:00
|
|
|
{
|
2003-05-05 00:44:56 +00:00
|
|
|
/*
|
2005-10-15 02:49:52 +00:00
|
|
|
* If this is the last parsetree of the query string, close down
|
|
|
|
* transaction statement before reporting command-complete. This
|
|
|
|
* is so that any end-of-transaction errors are reported before
|
|
|
|
* the command-complete message is issued, to avoid confusing
|
|
|
|
* clients who will expect either a command-complete message or an
|
|
|
|
* error, not one and then the other. But for compatibility with
|
|
|
|
* historical Postgres behavior, we do not force a transaction
|
|
|
|
* boundary between queries appearing in a single query string.
|
2003-05-05 00:44:56 +00:00
|
|
|
*/
|
2003-05-14 03:26:03 +00:00
|
|
|
finish_xact_command();
|
2002-02-27 23:16:07 +00:00
|
|
|
}
|
2003-05-02 20:54:36 +00:00
|
|
|
else
|
2002-10-14 22:14:35 +00:00
|
|
|
{
|
2003-05-02 20:54:36 +00:00
|
|
|
/*
|
2003-08-04 00:43:34 +00:00
|
|
|
* We need a CommandCounterIncrement after every query, except
|
|
|
|
* those that start or end a transaction block.
|
2003-05-02 20:54:36 +00:00
|
|
|
*/
|
|
|
|
CommandCounterIncrement();
|
2002-10-14 22:14:35 +00:00
|
|
|
}
|
2002-02-26 22:47:12 +00:00
|
|
|
|
|
|
|
/*
|
2005-10-15 02:49:52 +00:00
|
|
|
* Tell client that we're done with this query. Note we emit exactly
|
|
|
|
* one EndCommand report for each raw parsetree, thus one for each SQL
|
|
|
|
* command the client sent, regardless of rewriting. (But a command
|
|
|
|
* aborted by error will not send an EndCommand report at all.)
|
2002-02-26 22:47:12 +00:00
|
|
|
*/
|
2003-05-06 20:26:28 +00:00
|
|
|
EndCommand(completionTag, dest);
|
2001-03-22 04:01:46 +00:00
|
|
|
} /* end loop over parsetrees */
|
2000-10-07 00:58:23 +00:00
|
|
|
|
2003-05-14 03:26:03 +00:00
|
|
|
/*
|
|
|
|
* Close down transaction statement, if one is open.
|
|
|
|
*/
|
2006-06-20 22:52:00 +00:00
|
|
|
finish_xact_command();
|
2003-05-14 03:26:03 +00:00
|
|
|
|
2003-05-02 20:54:36 +00:00
|
|
|
/*
|
|
|
|
* If there were no parsetrees, return EmptyQueryResponse message.
|
|
|
|
*/
|
2003-03-22 04:23:34 +00:00
|
|
|
if (!parsetree_list)
|
2003-05-06 20:26:28 +00:00
|
|
|
NullCommand(dest);
|
|
|
|
|
2003-05-02 20:54:36 +00:00
|
|
|
/*
|
2006-06-20 22:52:00 +00:00
|
|
|
* Emit duration logging if appropriate.
|
2003-05-02 20:54:36 +00:00
|
|
|
*/
|
2006-09-08 15:55:53 +00:00
|
|
|
switch (check_log_duration(msec_str, was_logged))
|
2002-09-01 23:26:06 +00:00
|
|
|
{
|
2006-09-08 15:55:53 +00:00
|
|
|
case 1:
|
2006-09-07 22:52:01 +00:00
|
|
|
ereport(LOG,
|
2007-03-02 23:37:23 +00:00
|
|
|
(errmsg("duration: %s ms", msec_str),
|
|
|
|
errhidestmt(true)));
|
2006-09-08 15:55:53 +00:00
|
|
|
break;
|
|
|
|
case 2:
|
2006-09-07 22:52:01 +00:00
|
|
|
ereport(LOG,
|
|
|
|
(errmsg("duration: %s ms statement: %s",
|
|
|
|
msec_str, query_string),
|
2007-03-02 23:37:23 +00:00
|
|
|
errhidestmt(true),
|
2006-09-07 22:52:01 +00:00
|
|
|
errdetail_execute(parsetree_list)));
|
2006-09-08 15:55:53 +00:00
|
|
|
break;
|
2002-09-01 23:26:06 +00:00
|
|
|
}
|
2002-09-02 05:25:37 +00:00
|
|
|
|
2003-05-02 20:54:36 +00:00
|
|
|
if (save_log_statement_stats)
|
|
|
|
ShowUsage("QUERY STATISTICS");
|
|
|
|
|
2008-08-01 13:16:09 +00:00
|
|
|
TRACE_POSTGRESQL_QUERY_DONE(query_string);
|
|
|
|
|
2002-09-02 05:25:37 +00:00
|
|
|
debug_query_string = NULL;
|
1996-07-09 06:22:35 +00:00
|
|
|
}
|
|
|
|
|
2003-05-05 00:44:56 +00:00
|
|
|
/*
|
|
|
|
* exec_parse_message
|
|
|
|
*
|
|
|
|
* Execute a "Parse" protocol message.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
exec_parse_message(const char *query_string, /* string to execute */
|
|
|
|
const char *stmt_name, /* name for prepared stmt */
|
2003-08-04 00:43:34 +00:00
|
|
|
Oid *paramTypes, /* parameter types */
|
|
|
|
int numParams) /* number of parameters */
|
2003-05-05 00:44:56 +00:00
|
|
|
{
|
2011-09-16 00:42:53 -04:00
|
|
|
MemoryContext unnamed_stmt_context = NULL;
|
2003-05-05 00:44:56 +00:00
|
|
|
MemoryContext oldcontext;
|
|
|
|
List *parsetree_list;
|
Change representation of statement lists, and add statement location info.
This patch makes several changes that improve the consistency of
representation of lists of statements. It's always been the case
that the output of parse analysis is a list of Query nodes, whatever
the types of the individual statements in the list. This patch brings
similar consistency to the outputs of raw parsing and planning steps:
* The output of raw parsing is now always a list of RawStmt nodes;
the statement-type-dependent nodes are one level down from that.
* The output of pg_plan_queries() is now always a list of PlannedStmt
nodes, even for utility statements. In the case of a utility statement,
"planning" just consists of wrapping a CMD_UTILITY PlannedStmt around
the utility node. This list representation is now used in Portal and
CachedPlan plan lists, replacing the former convention of intermixing
PlannedStmts with bare utility-statement nodes.
Now, every list of statements has a consistent head-node type depending
on how far along it is in processing. This allows changing many places
that formerly used generic "Node *" pointers to use a more specific
pointer type, thus reducing the number of IsA() tests and casts needed,
as well as improving code clarity.
Also, the post-parse-analysis representation of DECLARE CURSOR is changed
so that it looks more like EXPLAIN, PREPARE, etc. That is, the contained
SELECT remains a child of the DeclareCursorStmt rather than getting flipped
around to be the other way. It's now true for both Query and PlannedStmt
that utilityStmt is non-null if and only if commandType is CMD_UTILITY.
That allows simplifying a lot of places that were testing both fields.
(I think some of those were just defensive programming, but in many places,
it was actually necessary to avoid confusing DECLARE CURSOR with SELECT.)
Because PlannedStmt carries a canSetTag field, we're also able to get rid
of some ad-hoc rules about how to reconstruct canSetTag for a bare utility
statement; specifically, the assumption that a utility is canSetTag if and
only if it's the only one in its list. While I see no near-term need for
relaxing that restriction, it's nice to get rid of the ad-hocery.
The API of ProcessUtility() is changed so that what it's passed is the
wrapper PlannedStmt not just the bare utility statement. This will affect
all users of ProcessUtility_hook, but the changes are pretty trivial; see
the affected contrib modules for examples of the minimum change needed.
(Most compilers should give pointer-type-mismatch warnings for uncorrected
code.)
There's also a change in the API of ExplainOneQuery_hook, to pass through
cursorOptions instead of expecting hook functions to know what to pick.
This is needed because of the DECLARE CURSOR changes, but really should
have been done in 9.6; it's unlikely that any extant hook functions
know about using CURSOR_OPT_PARALLEL_OK.
Finally, teach gram.y to save statement boundary locations in RawStmt
nodes, and pass those through to Query and PlannedStmt nodes. This allows
more intelligent handling of cases where a source query string contains
multiple statements. This patch doesn't actually do anything with the
information, but a follow-on patch will. (Passing this information through
cleanly is the true motivation for these changes; while I think this is all
good cleanup, it's unlikely we'd have bothered without this end goal.)
catversion bump because addition of location fields to struct Query
affects stored rules.
This patch is by me, but it owes a good deal to Fabien Coelho who did
a lot of preliminary work on the problem, and also reviewed the patch.
Discussion: https://postgr.es/m/alpine.DEB.2.20.1612200926310.29821@lancre
2017-01-14 16:02:35 -05:00
|
|
|
RawStmt *raw_parse_tree;
|
2003-05-05 00:44:56 +00:00
|
|
|
const char *commandTag;
|
2011-09-16 00:42:53 -04:00
|
|
|
List *querytree_list;
|
|
|
|
CachedPlanSource *psrc;
|
2003-05-05 00:44:56 +00:00
|
|
|
bool is_named;
|
|
|
|
bool save_log_statement_stats = log_statement_stats;
|
2006-09-07 22:52:01 +00:00
|
|
|
char msec_str[32];
|
2003-05-05 00:44:56 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Report query to various monitoring facilities.
|
|
|
|
*/
|
|
|
|
debug_query_string = query_string;
|
|
|
|
|
2012-01-19 14:19:20 +01:00
|
|
|
pgstat_report_activity(STATE_RUNNING, query_string);
|
2003-05-05 00:44:56 +00:00
|
|
|
|
2006-06-27 22:16:44 +00:00
|
|
|
set_ps_display("PARSE", false);
|
2003-05-05 00:44:56 +00:00
|
|
|
|
|
|
|
if (save_log_statement_stats)
|
|
|
|
ResetUsage();
|
|
|
|
|
2006-09-07 22:52:01 +00:00
|
|
|
ereport(DEBUG2,
|
|
|
|
(errmsg("parse %s: %s",
|
|
|
|
*stmt_name ? stmt_name : "<unnamed>",
|
|
|
|
query_string)));
|
2005-05-24 04:18:04 +00:00
|
|
|
|
2003-05-05 00:44:56 +00:00
|
|
|
/*
|
2005-10-15 02:49:52 +00:00
|
|
|
* Start up a transaction command so we can run parse analysis etc. (Note
|
|
|
|
* that this will normally change current memory context.) Nothing happens
|
|
|
|
* if we are already in one.
|
2003-05-05 00:44:56 +00:00
|
|
|
*/
|
2006-06-20 22:52:00 +00:00
|
|
|
start_xact_command();
|
2003-05-05 00:44:56 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Switch to appropriate context for constructing parsetrees.
|
|
|
|
*
|
2003-08-04 00:43:34 +00:00
|
|
|
* We have two strategies depending on whether the prepared statement is
|
|
|
|
* named or not. For a named prepared statement, we do parsing in
|
|
|
|
* MessageContext and copy the finished trees into the prepared
|
2007-03-13 00:33:44 +00:00
|
|
|
* statement's plancache entry; then the reset of MessageContext releases
|
2011-09-16 00:42:53 -04:00
|
|
|
* temporary space used by parsing and rewriting. For an unnamed prepared
|
2005-10-15 02:49:52 +00:00
|
|
|
* statement, we assume the statement isn't going to hang around long, so
|
|
|
|
* getting rid of temp space quickly is probably not worth the costs of
|
2011-09-16 00:42:53 -04:00
|
|
|
* copying parse trees. So in this case, we create the plancache entry's
|
|
|
|
* query_context here, and do all the parsing work therein.
|
2003-05-05 00:44:56 +00:00
|
|
|
*/
|
|
|
|
is_named = (stmt_name[0] != '\0');
|
|
|
|
if (is_named)
|
|
|
|
{
|
|
|
|
/* Named prepared statement --- parse in MessageContext */
|
|
|
|
oldcontext = MemoryContextSwitchTo(MessageContext);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* Unnamed prepared statement --- release any prior unnamed stmt */
|
2007-03-13 00:33:44 +00:00
|
|
|
drop_unnamed_stmt();
|
2011-09-16 00:42:53 -04:00
|
|
|
/* Create context for parsing */
|
2003-05-05 00:44:56 +00:00
|
|
|
unnamed_stmt_context =
|
2011-09-16 00:42:53 -04:00
|
|
|
AllocSetContextCreate(MessageContext,
|
2003-05-05 00:44:56 +00:00
|
|
|
"unnamed prepared statement",
|
Add macros to make AllocSetContextCreate() calls simpler and safer.
I found that half a dozen (nearly 5%) of our AllocSetContextCreate calls
had typos in the context-sizing parameters. While none of these led to
especially significant problems, they did create minor inefficiencies,
and it's now clear that expecting people to copy-and-paste those calls
accurately is not a great idea. Let's reduce the risk of future errors
by introducing single macros that encapsulate the common use-cases.
Three such macros are enough to cover all but two special-purpose contexts;
those two calls can be left as-is, I think.
While this patch doesn't in itself improve matters for third-party
extensions, it doesn't break anything for them either, and they can
gradually adopt the simplified notation over time.
In passing, change TopMemoryContext to use the default allocation
parameters. Formerly it could only be extended 8K at a time. That was
probably reasonable when this code was written; but nowadays we create
many more contexts than we did then, so that it's not unusual to have a
couple hundred K in TopMemoryContext, even without considering various
dubious code that sticks other things there. There seems no good reason
not to let it use growing blocks like most other contexts.
Back-patch to 9.6, mostly because that's still close enough to HEAD that
it's easy to do so, and keeping the branches in sync can be expected to
avoid some future back-patching pain. The bugs fixed by these changes
don't seem to be significant enough to justify fixing them further back.
Discussion: <21072.1472321324@sss.pgh.pa.us>
2016-08-27 17:50:38 -04:00
|
|
|
ALLOCSET_DEFAULT_SIZES);
|
2003-05-05 00:44:56 +00:00
|
|
|
oldcontext = MemoryContextSwitchTo(unnamed_stmt_context);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2005-10-15 02:49:52 +00:00
|
|
|
* Do basic parsing of the query or queries (this should be safe even if
|
|
|
|
* we are in aborted transaction state!)
|
2003-05-05 00:44:56 +00:00
|
|
|
*/
|
|
|
|
parsetree_list = pg_parse_query(query_string);
|
|
|
|
|
|
|
|
/*
|
2005-10-15 02:49:52 +00:00
|
|
|
* We only allow a single user statement in a prepared statement. This is
|
|
|
|
* mainly to keep the protocol simple --- otherwise we'd need to worry
|
|
|
|
* about multiple result tupdescs and things like that.
|
2003-05-05 00:44:56 +00:00
|
|
|
*/
|
2004-05-26 04:41:50 +00:00
|
|
|
if (list_length(parsetree_list) > 1)
|
2003-07-22 19:00:12 +00:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_SYNTAX_ERROR),
|
2005-10-15 02:49:52 +00:00
|
|
|
errmsg("cannot insert multiple commands into a prepared statement")));
|
2003-05-05 00:44:56 +00:00
|
|
|
|
|
|
|
if (parsetree_list != NIL)
|
|
|
|
{
|
2007-06-23 22:12:52 +00:00
|
|
|
Query *query;
|
2008-12-13 02:00:20 +00:00
|
|
|
bool snapshot_set = false;
|
2003-08-04 00:43:34 +00:00
|
|
|
int i;
|
2003-05-05 00:44:56 +00:00
|
|
|
|
Improve castNode notation by introducing list-extraction-specific variants.
This extends the castNode() notation introduced by commit 5bcab1114 to
provide, in one step, extraction of a list cell's pointer and coercion to
a concrete node type. For example, "lfirst_node(Foo, lc)" is the same
as "castNode(Foo, lfirst(lc))". Almost half of the uses of castNode
that have appeared so far include a list extraction call, so this is
pretty widely useful, and it saves a few more keystrokes compared to the
old way.
As with the previous patch, back-patch the addition of these macros to
pg_list.h, so that the notation will be available when back-patching.
Patch by me, after an idea of Andrew Gierth's.
Discussion: https://postgr.es/m/14197.1491841216@sss.pgh.pa.us
2017-04-10 13:51:29 -04:00
|
|
|
raw_parse_tree = linitial_node(RawStmt, parsetree_list);
|
2007-03-13 00:33:44 +00:00
|
|
|
|
2003-05-05 00:44:56 +00:00
|
|
|
/*
|
|
|
|
* Get the command name for possible use in status display.
|
|
|
|
*/
|
Change representation of statement lists, and add statement location info.
This patch makes several changes that improve the consistency of
representation of lists of statements. It's always been the case
that the output of parse analysis is a list of Query nodes, whatever
the types of the individual statements in the list. This patch brings
similar consistency to the outputs of raw parsing and planning steps:
* The output of raw parsing is now always a list of RawStmt nodes;
the statement-type-dependent nodes are one level down from that.
* The output of pg_plan_queries() is now always a list of PlannedStmt
nodes, even for utility statements. In the case of a utility statement,
"planning" just consists of wrapping a CMD_UTILITY PlannedStmt around
the utility node. This list representation is now used in Portal and
CachedPlan plan lists, replacing the former convention of intermixing
PlannedStmts with bare utility-statement nodes.
Now, every list of statements has a consistent head-node type depending
on how far along it is in processing. This allows changing many places
that formerly used generic "Node *" pointers to use a more specific
pointer type, thus reducing the number of IsA() tests and casts needed,
as well as improving code clarity.
Also, the post-parse-analysis representation of DECLARE CURSOR is changed
so that it looks more like EXPLAIN, PREPARE, etc. That is, the contained
SELECT remains a child of the DeclareCursorStmt rather than getting flipped
around to be the other way. It's now true for both Query and PlannedStmt
that utilityStmt is non-null if and only if commandType is CMD_UTILITY.
That allows simplifying a lot of places that were testing both fields.
(I think some of those were just defensive programming, but in many places,
it was actually necessary to avoid confusing DECLARE CURSOR with SELECT.)
Because PlannedStmt carries a canSetTag field, we're also able to get rid
of some ad-hoc rules about how to reconstruct canSetTag for a bare utility
statement; specifically, the assumption that a utility is canSetTag if and
only if it's the only one in its list. While I see no near-term need for
relaxing that restriction, it's nice to get rid of the ad-hocery.
The API of ProcessUtility() is changed so that what it's passed is the
wrapper PlannedStmt not just the bare utility statement. This will affect
all users of ProcessUtility_hook, but the changes are pretty trivial; see
the affected contrib modules for examples of the minimum change needed.
(Most compilers should give pointer-type-mismatch warnings for uncorrected
code.)
There's also a change in the API of ExplainOneQuery_hook, to pass through
cursorOptions instead of expecting hook functions to know what to pick.
This is needed because of the DECLARE CURSOR changes, but really should
have been done in 9.6; it's unlikely that any extant hook functions
know about using CURSOR_OPT_PARALLEL_OK.
Finally, teach gram.y to save statement boundary locations in RawStmt
nodes, and pass those through to Query and PlannedStmt nodes. This allows
more intelligent handling of cases where a source query string contains
multiple statements. This patch doesn't actually do anything with the
information, but a follow-on patch will. (Passing this information through
cleanly is the true motivation for these changes; while I think this is all
good cleanup, it's unlikely we'd have bothered without this end goal.)
catversion bump because addition of location fields to struct Query
affects stored rules.
This patch is by me, but it owes a good deal to Fabien Coelho who did
a lot of preliminary work on the problem, and also reviewed the patch.
Discussion: https://postgr.es/m/alpine.DEB.2.20.1612200926310.29821@lancre
2017-01-14 16:02:35 -05:00
|
|
|
commandTag = CreateCommandTag(raw_parse_tree->stmt);
|
2003-05-05 00:44:56 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If we are in an aborted transaction, reject all commands except
|
2005-10-15 02:49:52 +00:00
|
|
|
* COMMIT/ROLLBACK. It is important that this test occur before we
|
|
|
|
* try to do parse analysis, rewrite, or planning, since all those
|
|
|
|
* phases try to do database accesses, which may fail in abort state.
|
|
|
|
* (It might be safe to allow some additional utility commands in this
|
|
|
|
* state, but not many...)
|
2003-05-05 00:44:56 +00:00
|
|
|
*/
|
2005-11-10 00:31:34 +00:00
|
|
|
if (IsAbortedTransactionBlockState() &&
|
Change representation of statement lists, and add statement location info.
This patch makes several changes that improve the consistency of
representation of lists of statements. It's always been the case
that the output of parse analysis is a list of Query nodes, whatever
the types of the individual statements in the list. This patch brings
similar consistency to the outputs of raw parsing and planning steps:
* The output of raw parsing is now always a list of RawStmt nodes;
the statement-type-dependent nodes are one level down from that.
* The output of pg_plan_queries() is now always a list of PlannedStmt
nodes, even for utility statements. In the case of a utility statement,
"planning" just consists of wrapping a CMD_UTILITY PlannedStmt around
the utility node. This list representation is now used in Portal and
CachedPlan plan lists, replacing the former convention of intermixing
PlannedStmts with bare utility-statement nodes.
Now, every list of statements has a consistent head-node type depending
on how far along it is in processing. This allows changing many places
that formerly used generic "Node *" pointers to use a more specific
pointer type, thus reducing the number of IsA() tests and casts needed,
as well as improving code clarity.
Also, the post-parse-analysis representation of DECLARE CURSOR is changed
so that it looks more like EXPLAIN, PREPARE, etc. That is, the contained
SELECT remains a child of the DeclareCursorStmt rather than getting flipped
around to be the other way. It's now true for both Query and PlannedStmt
that utilityStmt is non-null if and only if commandType is CMD_UTILITY.
That allows simplifying a lot of places that were testing both fields.
(I think some of those were just defensive programming, but in many places,
it was actually necessary to avoid confusing DECLARE CURSOR with SELECT.)
Because PlannedStmt carries a canSetTag field, we're also able to get rid
of some ad-hoc rules about how to reconstruct canSetTag for a bare utility
statement; specifically, the assumption that a utility is canSetTag if and
only if it's the only one in its list. While I see no near-term need for
relaxing that restriction, it's nice to get rid of the ad-hocery.
The API of ProcessUtility() is changed so that what it's passed is the
wrapper PlannedStmt not just the bare utility statement. This will affect
all users of ProcessUtility_hook, but the changes are pretty trivial; see
the affected contrib modules for examples of the minimum change needed.
(Most compilers should give pointer-type-mismatch warnings for uncorrected
code.)
There's also a change in the API of ExplainOneQuery_hook, to pass through
cursorOptions instead of expecting hook functions to know what to pick.
This is needed because of the DECLARE CURSOR changes, but really should
have been done in 9.6; it's unlikely that any extant hook functions
know about using CURSOR_OPT_PARALLEL_OK.
Finally, teach gram.y to save statement boundary locations in RawStmt
nodes, and pass those through to Query and PlannedStmt nodes. This allows
more intelligent handling of cases where a source query string contains
multiple statements. This patch doesn't actually do anything with the
information, but a follow-on patch will. (Passing this information through
cleanly is the true motivation for these changes; while I think this is all
good cleanup, it's unlikely we'd have bothered without this end goal.)
catversion bump because addition of location fields to struct Query
affects stored rules.
This patch is by me, but it owes a good deal to Fabien Coelho who did
a lot of preliminary work on the problem, and also reviewed the patch.
Discussion: https://postgr.es/m/alpine.DEB.2.20.1612200926310.29821@lancre
2017-01-14 16:02:35 -05:00
|
|
|
!IsTransactionExitStmt(raw_parse_tree->stmt))
|
2005-11-10 00:31:34 +00:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_IN_FAILED_SQL_TRANSACTION),
|
|
|
|
errmsg("current transaction is aborted, "
|
2010-02-26 02:01:40 +00:00
|
|
|
"commands ignored until end of transaction block"),
|
2010-01-16 10:05:59 +00:00
|
|
|
errdetail_abort()));
|
2003-05-05 00:44:56 +00:00
|
|
|
|
2008-12-13 02:00:20 +00:00
|
|
|
/*
|
2012-06-10 15:20:04 -04:00
|
|
|
* Create the CachedPlanSource before we do parse analysis, since it
|
|
|
|
* needs to see the unmodified raw parse tree.
|
2011-09-16 00:42:53 -04:00
|
|
|
*/
|
2017-04-01 15:21:05 -05:00
|
|
|
psrc = CreateCachedPlan(raw_parse_tree, query_string, commandTag);
|
2011-09-16 00:42:53 -04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Set up a snapshot if parse analysis will need one.
|
2008-12-13 02:00:20 +00:00
|
|
|
*/
|
|
|
|
if (analyze_requires_snapshot(raw_parse_tree))
|
|
|
|
{
|
|
|
|
PushActiveSnapshot(GetTransactionSnapshot());
|
|
|
|
snapshot_set = true;
|
|
|
|
}
|
|
|
|
|
2003-05-05 00:44:56 +00:00
|
|
|
/*
|
2011-09-16 00:42:53 -04:00
|
|
|
* Analyze and rewrite the query. Note that the originally specified
|
|
|
|
* parameter set is not required to be complete, so we have to use
|
|
|
|
* parse_analyze_varparams().
|
2003-05-05 00:44:56 +00:00
|
|
|
*/
|
|
|
|
if (log_parser_stats)
|
|
|
|
ResetUsage();
|
|
|
|
|
2011-09-16 00:42:53 -04:00
|
|
|
query = parse_analyze_varparams(raw_parse_tree,
|
2007-06-23 22:12:52 +00:00
|
|
|
query_string,
|
|
|
|
¶mTypes,
|
|
|
|
&numParams);
|
2003-05-05 00:44:56 +00:00
|
|
|
|
|
|
|
/*
|
2007-03-13 00:33:44 +00:00
|
|
|
* Check all parameter types got determined.
|
2003-05-05 00:44:56 +00:00
|
|
|
*/
|
|
|
|
for (i = 0; i < numParams; i++)
|
|
|
|
{
|
2003-08-04 00:43:34 +00:00
|
|
|
Oid ptype = paramTypes[i];
|
2003-05-05 00:44:56 +00:00
|
|
|
|
|
|
|
if (ptype == InvalidOid || ptype == UNKNOWNOID)
|
2003-07-22 19:00:12 +00:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_INDETERMINATE_DATATYPE),
|
2005-10-15 02:49:52 +00:00
|
|
|
errmsg("could not determine data type of parameter $%d",
|
|
|
|
i + 1)));
|
2003-05-05 00:44:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (log_parser_stats)
|
|
|
|
ShowUsage("PARSE ANALYSIS STATISTICS");
|
|
|
|
|
2007-06-23 22:12:52 +00:00
|
|
|
querytree_list = pg_rewrite_query(query);
|
2003-05-05 00:44:56 +00:00
|
|
|
|
2011-09-16 00:42:53 -04:00
|
|
|
/* Done with the snapshot used for parsing */
|
2008-12-13 02:00:20 +00:00
|
|
|
if (snapshot_set)
|
|
|
|
PopActiveSnapshot();
|
2003-05-05 00:44:56 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2014-05-06 12:12:18 -04:00
|
|
|
/* Empty input string. This is legal. */
|
2007-03-13 00:33:44 +00:00
|
|
|
raw_parse_tree = NULL;
|
2003-05-05 00:44:56 +00:00
|
|
|
commandTag = NULL;
|
2017-04-01 15:21:05 -05:00
|
|
|
psrc = CreateCachedPlan(raw_parse_tree, query_string, commandTag);
|
2011-09-16 00:42:53 -04:00
|
|
|
querytree_list = NIL;
|
2003-05-05 00:44:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2011-09-16 00:42:53 -04:00
|
|
|
* CachedPlanSource must be a direct child of MessageContext before we
|
|
|
|
* reparent unnamed_stmt_context under it, else we have a disconnected
|
2012-06-10 15:20:04 -04:00
|
|
|
* circular subgraph. Klugy, but less so than flipping contexts even more
|
|
|
|
* above.
|
2003-05-05 00:44:56 +00:00
|
|
|
*/
|
2011-09-16 00:42:53 -04:00
|
|
|
if (unnamed_stmt_context)
|
|
|
|
MemoryContextSetParent(psrc->context, MessageContext);
|
|
|
|
|
|
|
|
/* Finish filling in the CachedPlanSource */
|
|
|
|
CompleteCachedPlan(psrc,
|
|
|
|
querytree_list,
|
|
|
|
unnamed_stmt_context,
|
|
|
|
paramTypes,
|
|
|
|
numParams,
|
|
|
|
NULL,
|
|
|
|
NULL,
|
2016-02-25 13:02:18 +05:30
|
|
|
CURSOR_OPT_PARALLEL_OK, /* allow parallel mode */
|
2011-09-16 00:42:53 -04:00
|
|
|
true); /* fixed result */
|
|
|
|
|
|
|
|
/* If we got a cancel signal during analysis, quit */
|
|
|
|
CHECK_FOR_INTERRUPTS();
|
|
|
|
|
2003-05-05 00:44:56 +00:00
|
|
|
if (is_named)
|
|
|
|
{
|
2011-09-16 00:42:53 -04:00
|
|
|
/*
|
|
|
|
* Store the query as a prepared statement.
|
|
|
|
*/
|
|
|
|
StorePreparedStatement(stmt_name, psrc, false);
|
2003-05-05 00:44:56 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2007-03-29 19:10:10 +00:00
|
|
|
/*
|
2011-09-16 00:42:53 -04:00
|
|
|
* We just save the CachedPlanSource into unnamed_stmt_psrc.
|
2007-03-29 19:10:10 +00:00
|
|
|
*/
|
2011-09-16 00:42:53 -04:00
|
|
|
SaveCachedPlan(psrc);
|
|
|
|
unnamed_stmt_psrc = psrc;
|
2003-05-05 00:44:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
MemoryContextSwitchTo(oldcontext);
|
|
|
|
|
|
|
|
/*
|
2005-10-15 02:49:52 +00:00
|
|
|
* We do NOT close the open transaction command here; that only happens
|
2014-05-06 12:12:18 -04:00
|
|
|
* when the client sends Sync. Instead, do CommandCounterIncrement just
|
2005-10-15 02:49:52 +00:00
|
|
|
* in case something happened during parse/plan.
|
2003-05-05 00:44:56 +00:00
|
|
|
*/
|
|
|
|
CommandCounterIncrement();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Send ParseComplete.
|
|
|
|
*/
|
2005-11-03 17:11:40 +00:00
|
|
|
if (whereToSendOutput == DestRemote)
|
2003-05-05 00:44:56 +00:00
|
|
|
pq_putemptymessage('1');
|
|
|
|
|
2006-09-07 22:52:01 +00:00
|
|
|
/*
|
|
|
|
* Emit duration logging if appropriate.
|
|
|
|
*/
|
2006-09-08 15:55:53 +00:00
|
|
|
switch (check_log_duration(msec_str, false))
|
|
|
|
{
|
|
|
|
case 1:
|
|
|
|
ereport(LOG,
|
2007-03-02 23:37:23 +00:00
|
|
|
(errmsg("duration: %s ms", msec_str),
|
|
|
|
errhidestmt(true)));
|
2006-09-08 15:55:53 +00:00
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
ereport(LOG,
|
|
|
|
(errmsg("duration: %s ms parse %s: %s",
|
|
|
|
msec_str,
|
|
|
|
*stmt_name ? stmt_name : "<unnamed>",
|
2007-03-02 23:37:23 +00:00
|
|
|
query_string),
|
|
|
|
errhidestmt(true)));
|
2006-09-08 15:55:53 +00:00
|
|
|
break;
|
|
|
|
}
|
2006-09-07 22:52:01 +00:00
|
|
|
|
2003-05-05 00:44:56 +00:00
|
|
|
if (save_log_statement_stats)
|
|
|
|
ShowUsage("PARSE MESSAGE STATISTICS");
|
|
|
|
|
|
|
|
debug_query_string = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* exec_bind_message
|
|
|
|
*
|
|
|
|
* Process a "Bind" message to create a portal from a prepared statement
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
exec_bind_message(StringInfo input_message)
|
|
|
|
{
|
|
|
|
const char *portal_name;
|
|
|
|
const char *stmt_name;
|
2003-05-08 18:16:37 +00:00
|
|
|
int numPFormats;
|
|
|
|
int16 *pformats = NULL;
|
2003-05-05 00:44:56 +00:00
|
|
|
int numParams;
|
2003-05-08 18:16:37 +00:00
|
|
|
int numRFormats;
|
|
|
|
int16 *rformats = NULL;
|
2007-03-13 00:33:44 +00:00
|
|
|
CachedPlanSource *psrc;
|
|
|
|
CachedPlan *cplan;
|
2003-05-05 00:44:56 +00:00
|
|
|
Portal portal;
|
2008-04-02 18:31:50 +00:00
|
|
|
char *query_string;
|
|
|
|
char *saved_stmt_name;
|
2003-05-05 00:44:56 +00:00
|
|
|
ParamListInfo params;
|
2008-04-02 18:31:50 +00:00
|
|
|
MemoryContext oldContext;
|
2006-09-07 22:52:01 +00:00
|
|
|
bool save_log_statement_stats = log_statement_stats;
|
2008-12-13 02:00:20 +00:00
|
|
|
bool snapshot_set = false;
|
2006-09-07 22:52:01 +00:00
|
|
|
char msec_str[32];
|
|
|
|
|
|
|
|
/* Get the fixed part of the message */
|
|
|
|
portal_name = pq_getmsgstring(input_message);
|
|
|
|
stmt_name = pq_getmsgstring(input_message);
|
|
|
|
|
|
|
|
ereport(DEBUG2,
|
|
|
|
(errmsg("bind %s to %s",
|
|
|
|
*portal_name ? portal_name : "<unnamed>",
|
|
|
|
*stmt_name ? stmt_name : "<unnamed>")));
|
|
|
|
|
|
|
|
/* Find prepared statement */
|
|
|
|
if (stmt_name[0] != '\0')
|
2007-03-13 00:33:44 +00:00
|
|
|
{
|
|
|
|
PreparedStatement *pstmt;
|
|
|
|
|
2006-09-07 22:52:01 +00:00
|
|
|
pstmt = FetchPreparedStatement(stmt_name, true);
|
2007-03-13 00:33:44 +00:00
|
|
|
psrc = pstmt->plansource;
|
|
|
|
}
|
2006-09-07 22:52:01 +00:00
|
|
|
else
|
|
|
|
{
|
2011-09-16 00:42:53 -04:00
|
|
|
/* special-case the unnamed statement */
|
2007-03-13 00:33:44 +00:00
|
|
|
psrc = unnamed_stmt_psrc;
|
|
|
|
if (!psrc)
|
2006-09-07 22:52:01 +00:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_UNDEFINED_PSTATEMENT),
|
|
|
|
errmsg("unnamed prepared statement does not exist")));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Report query to various monitoring facilities.
|
|
|
|
*/
|
Adjust things so that the query_string of a cached plan and the sourceText of
a portal are never NULL, but reliably provide the source text of the query.
It turns out that there was only one place that was really taking a short-cut,
which was the 'EXECUTE' utility statement. That doesn't seem like a
sufficiently critical performance hotspot to justify not offering a guarantee
of validity of the portal source text. Fix it to copy the source text over
from the cached plan. Add Asserts in the places that set up cached plans and
portals to reject null source strings, and simplify a bunch of places that
formerly needed to guard against nulls.
There may be a few places that cons up statements for execution without
having any source text at all; I found one such in ConvertTriggerToFK().
It seems sufficient to inject a phony source string in such a case,
for instance
ProcessUtility((Node *) atstmt,
"(generated ALTER TABLE ADD FOREIGN KEY command)",
NULL, false, None_Receiver, NULL);
We should take a second look at the usage of debug_query_string,
particularly the recently added current_query() SQL function.
ITAGAKI Takahiro and Tom Lane
2008-07-18 20:26:06 +00:00
|
|
|
debug_query_string = psrc->query_string;
|
2003-05-05 00:44:56 +00:00
|
|
|
|
2012-01-19 14:19:20 +01:00
|
|
|
pgstat_report_activity(STATE_RUNNING, psrc->query_string);
|
2003-05-05 00:44:56 +00:00
|
|
|
|
2006-06-27 22:16:44 +00:00
|
|
|
set_ps_display("BIND", false);
|
2003-05-05 00:44:56 +00:00
|
|
|
|
2006-09-07 22:52:01 +00:00
|
|
|
if (save_log_statement_stats)
|
|
|
|
ResetUsage();
|
|
|
|
|
2003-05-05 00:44:56 +00:00
|
|
|
/*
|
2005-10-15 02:49:52 +00:00
|
|
|
* Start up a transaction command so we can call functions etc. (Note that
|
|
|
|
* this will normally change current memory context.) Nothing happens if
|
|
|
|
* we are already in one.
|
2003-05-05 00:44:56 +00:00
|
|
|
*/
|
2006-06-20 22:52:00 +00:00
|
|
|
start_xact_command();
|
2003-05-05 00:44:56 +00:00
|
|
|
|
2003-05-08 18:16:37 +00:00
|
|
|
/* Switch back to message context */
|
|
|
|
MemoryContextSwitchTo(MessageContext);
|
|
|
|
|
|
|
|
/* Get the parameter format codes */
|
|
|
|
numPFormats = pq_getmsgint(input_message, 2);
|
|
|
|
if (numPFormats > 0)
|
|
|
|
{
|
2006-10-04 00:30:14 +00:00
|
|
|
int i;
|
|
|
|
|
2003-05-08 18:16:37 +00:00
|
|
|
pformats = (int16 *) palloc(numPFormats * sizeof(int16));
|
|
|
|
for (i = 0; i < numPFormats; i++)
|
|
|
|
pformats[i] = pq_getmsgint(input_message, 2);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Get the parameter value count */
|
|
|
|
numParams = pq_getmsgint(input_message, 2);
|
|
|
|
|
|
|
|
if (numPFormats > 1 && numPFormats != numParams)
|
2003-07-22 19:00:12 +00:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_PROTOCOL_VIOLATION),
|
2005-10-15 02:49:52 +00:00
|
|
|
errmsg("bind message has %d parameter formats but %d parameters",
|
|
|
|
numPFormats, numParams)));
|
2003-05-05 00:44:56 +00:00
|
|
|
|
2007-03-13 00:33:44 +00:00
|
|
|
if (numParams != psrc->num_params)
|
2003-07-22 19:00:12 +00:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_PROTOCOL_VIOLATION),
|
|
|
|
errmsg("bind message supplies %d parameters, but prepared statement \"%s\" requires %d",
|
2007-11-15 21:14:46 +00:00
|
|
|
numParams, stmt_name, psrc->num_params)));
|
2003-05-05 00:44:56 +00:00
|
|
|
|
2005-11-10 00:31:34 +00:00
|
|
|
/*
|
|
|
|
* If we are in aborted transaction state, the only portals we can
|
2005-11-22 18:17:34 +00:00
|
|
|
* actually run are those containing COMMIT or ROLLBACK commands. We
|
|
|
|
* disallow binding anything else to avoid problems with infrastructure
|
2014-05-06 12:12:18 -04:00
|
|
|
* that expects to run inside a valid transaction. We also disallow
|
2005-11-22 18:17:34 +00:00
|
|
|
* binding any parameters, since we can't risk calling user-defined I/O
|
|
|
|
* functions.
|
2005-11-10 00:31:34 +00:00
|
|
|
*/
|
|
|
|
if (IsAbortedTransactionBlockState() &&
|
2017-01-19 19:52:13 -05:00
|
|
|
(!(psrc->raw_parse_tree &&
|
|
|
|
IsTransactionExitStmt(psrc->raw_parse_tree->stmt)) ||
|
2005-11-10 00:31:34 +00:00
|
|
|
numParams != 0))
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_IN_FAILED_SQL_TRANSACTION),
|
|
|
|
errmsg("current transaction is aborted, "
|
2010-01-16 10:05:59 +00:00
|
|
|
"commands ignored until end of transaction block"),
|
|
|
|
errdetail_abort()));
|
2005-11-10 00:31:34 +00:00
|
|
|
|
2003-05-05 00:44:56 +00:00
|
|
|
/*
|
2005-10-15 02:49:52 +00:00
|
|
|
* Create the portal. Allow silent replacement of an existing portal only
|
|
|
|
* if the unnamed portal is specified.
|
2003-05-05 00:44:56 +00:00
|
|
|
*/
|
|
|
|
if (portal_name[0] == '\0')
|
|
|
|
portal = CreatePortal(portal_name, true, true);
|
|
|
|
else
|
|
|
|
portal = CreatePortal(portal_name, false, false);
|
|
|
|
|
2008-04-02 18:31:50 +00:00
|
|
|
/*
|
|
|
|
* Prepare to copy stuff into the portal's memory context. We do all this
|
|
|
|
* copying first, because it could possibly fail (out-of-memory) and we
|
2011-09-16 00:42:53 -04:00
|
|
|
* don't want a failure to occur between GetCachedPlan and
|
2008-04-02 18:31:50 +00:00
|
|
|
* PortalDefineQuery; that would result in leaking our plancache refcount.
|
|
|
|
*/
|
|
|
|
oldContext = MemoryContextSwitchTo(PortalGetHeapMemory(portal));
|
|
|
|
|
Adjust things so that the query_string of a cached plan and the sourceText of
a portal are never NULL, but reliably provide the source text of the query.
It turns out that there was only one place that was really taking a short-cut,
which was the 'EXECUTE' utility statement. That doesn't seem like a
sufficiently critical performance hotspot to justify not offering a guarantee
of validity of the portal source text. Fix it to copy the source text over
from the cached plan. Add Asserts in the places that set up cached plans and
portals to reject null source strings, and simplify a bunch of places that
formerly needed to guard against nulls.
There may be a few places that cons up statements for execution without
having any source text at all; I found one such in ConvertTriggerToFK().
It seems sufficient to inject a phony source string in such a case,
for instance
ProcessUtility((Node *) atstmt,
"(generated ALTER TABLE ADD FOREIGN KEY command)",
NULL, false, None_Receiver, NULL);
We should take a second look at the usage of debug_query_string,
particularly the recently added current_query() SQL function.
ITAGAKI Takahiro and Tom Lane
2008-07-18 20:26:06 +00:00
|
|
|
/* Copy the plan's query string into the portal */
|
|
|
|
query_string = pstrdup(psrc->query_string);
|
2008-04-02 18:31:50 +00:00
|
|
|
|
|
|
|
/* Likewise make a copy of the statement name, unless it's unnamed */
|
|
|
|
if (stmt_name[0])
|
|
|
|
saved_stmt_name = pstrdup(stmt_name);
|
|
|
|
else
|
|
|
|
saved_stmt_name = NULL;
|
|
|
|
|
2008-12-13 02:00:20 +00:00
|
|
|
/*
|
|
|
|
* Set a snapshot if we have parameters to fetch (since the input
|
|
|
|
* functions might need it) or the query isn't a utility command (and
|
2012-06-10 15:20:04 -04:00
|
|
|
* hence could require redoing parse analysis and planning). We keep the
|
|
|
|
* snapshot active till we're done, so that plancache.c doesn't have to
|
|
|
|
* take new ones.
|
2008-12-13 02:00:20 +00:00
|
|
|
*/
|
2014-11-12 15:58:37 -05:00
|
|
|
if (numParams > 0 ||
|
|
|
|
(psrc->raw_parse_tree &&
|
|
|
|
analyze_requires_snapshot(psrc->raw_parse_tree)))
|
2008-12-13 02:00:20 +00:00
|
|
|
{
|
|
|
|
PushActiveSnapshot(GetTransactionSnapshot());
|
|
|
|
snapshot_set = true;
|
|
|
|
}
|
|
|
|
|
2003-05-05 00:44:56 +00:00
|
|
|
/*
|
|
|
|
* Fetch parameters, if any, and store in the portal's memory context.
|
|
|
|
*/
|
|
|
|
if (numParams > 0)
|
|
|
|
{
|
2006-08-06 02:00:52 +00:00
|
|
|
int paramno;
|
2003-05-05 00:44:56 +00:00
|
|
|
|
2015-02-20 00:11:42 -05:00
|
|
|
params = (ParamListInfo) palloc(offsetof(ParamListInfoData, params) +
|
|
|
|
numParams * sizeof(ParamExternData));
|
2009-11-04 22:26:08 +00:00
|
|
|
/* we have static list of params, so no hooks needed */
|
|
|
|
params->paramFetch = NULL;
|
|
|
|
params->paramFetchArg = NULL;
|
|
|
|
params->parserSetup = NULL;
|
|
|
|
params->parserSetupArg = NULL;
|
2006-04-22 01:26:01 +00:00
|
|
|
params->numParams = numParams;
|
Fix problems with ParamListInfo serialization mechanism.
Commit d1b7c1ffe72e86932b5395f29e006c3f503bc53d introduced a mechanism
for serializing a ParamListInfo structure to be passed to a parallel
worker. However, this mechanism failed to handle external expanded
values, as pointed out by Noah Misch. Repair.
Moreover, plpgsql_param_fetch requires adjustment because the
serialization mechanism needs it to skip evaluating unused parameters
just as we would do when it is called from copyParamList, but params
== estate->paramLI in that case. To fix, make the bms_is_member test
in that function unconditional.
Finally, have setup_param_list set a new ParamListInfo field,
paramMask, to the parameters actually used in the expression, so that
we don't try to fetch those that are not needed when serializing a
parameter list. This isn't necessary for correctness, but it makes
the performance of the parallel executor code comparable to what we
do for cases involving cursors.
Design suggestions and extensive review by Noah Misch. Patch by me.
2015-11-02 18:11:29 -05:00
|
|
|
params->paramMask = NULL;
|
2003-05-05 00:44:56 +00:00
|
|
|
|
2007-03-13 00:33:44 +00:00
|
|
|
for (paramno = 0; paramno < numParams; paramno++)
|
2003-05-05 00:44:56 +00:00
|
|
|
{
|
2007-03-13 00:33:44 +00:00
|
|
|
Oid ptype = psrc->param_types[paramno];
|
2003-05-08 18:16:37 +00:00
|
|
|
int32 plength;
|
2006-09-06 20:40:48 +00:00
|
|
|
Datum pval;
|
2003-05-05 00:44:56 +00:00
|
|
|
bool isNull;
|
2006-04-04 19:35:37 +00:00
|
|
|
StringInfoData pbuf;
|
|
|
|
char csave;
|
|
|
|
int16 pformat;
|
2003-05-05 00:44:56 +00:00
|
|
|
|
2003-05-08 18:16:37 +00:00
|
|
|
plength = pq_getmsgint(input_message, 4);
|
|
|
|
isNull = (plength == -1);
|
|
|
|
|
2003-05-05 00:44:56 +00:00
|
|
|
if (!isNull)
|
|
|
|
{
|
2003-05-12 16:48:17 +00:00
|
|
|
const char *pvalue = pq_getmsgbytes(input_message, plength);
|
2005-11-10 00:31:34 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Rather than copying data around, we just set up a phony
|
2005-11-22 18:17:34 +00:00
|
|
|
* StringInfo pointing to the correct portion of the message
|
2014-05-06 12:12:18 -04:00
|
|
|
* buffer. We assume we can scribble on the message buffer so
|
2005-11-22 18:17:34 +00:00
|
|
|
* as to maintain the convention that StringInfos have a
|
|
|
|
* trailing null. This is grotty but is a big win when
|
|
|
|
* dealing with very large parameter strings.
|
2005-11-10 00:31:34 +00:00
|
|
|
*/
|
|
|
|
pbuf.data = (char *) pvalue;
|
|
|
|
pbuf.maxlen = plength + 1;
|
|
|
|
pbuf.len = plength;
|
|
|
|
pbuf.cursor = 0;
|
|
|
|
|
|
|
|
csave = pbuf.data[plength];
|
|
|
|
pbuf.data[plength] = '\0';
|
2006-04-04 19:35:37 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
pbuf.data = NULL; /* keep compiler quiet */
|
|
|
|
csave = 0;
|
|
|
|
}
|
2005-11-10 00:31:34 +00:00
|
|
|
|
2006-04-04 19:35:37 +00:00
|
|
|
if (numPFormats > 1)
|
2006-08-06 02:00:52 +00:00
|
|
|
pformat = pformats[paramno];
|
2006-04-04 19:35:37 +00:00
|
|
|
else if (numPFormats > 0)
|
|
|
|
pformat = pformats[0];
|
|
|
|
else
|
|
|
|
pformat = 0; /* default = text */
|
|
|
|
|
2006-08-08 01:23:15 +00:00
|
|
|
if (pformat == 0) /* text mode */
|
2006-04-04 19:35:37 +00:00
|
|
|
{
|
|
|
|
Oid typinput;
|
|
|
|
Oid typioparam;
|
2006-09-07 22:52:01 +00:00
|
|
|
char *pstring;
|
2005-11-10 00:31:34 +00:00
|
|
|
|
2006-04-04 19:35:37 +00:00
|
|
|
getTypeInputInfo(ptype, &typinput, &typioparam);
|
2005-11-10 00:31:34 +00:00
|
|
|
|
2006-04-04 19:35:37 +00:00
|
|
|
/*
|
|
|
|
* We have to do encoding conversion before calling the
|
|
|
|
* typinput routine.
|
|
|
|
*/
|
|
|
|
if (isNull)
|
|
|
|
pstring = NULL;
|
|
|
|
else
|
2005-11-10 00:31:34 +00:00
|
|
|
pstring = pg_client_to_server(pbuf.data, plength);
|
2006-04-04 19:35:37 +00:00
|
|
|
|
2006-09-06 20:40:48 +00:00
|
|
|
pval = OidInputFunctionCall(typinput, pstring, typioparam, -1);
|
2006-08-08 01:23:15 +00:00
|
|
|
|
2006-04-04 19:35:37 +00:00
|
|
|
/* Free result of encoding conversion, if any */
|
|
|
|
if (pstring && pstring != pbuf.data)
|
|
|
|
pfree(pstring);
|
|
|
|
}
|
2006-10-04 00:30:14 +00:00
|
|
|
else if (pformat == 1) /* binary mode */
|
2006-04-04 19:35:37 +00:00
|
|
|
{
|
|
|
|
Oid typreceive;
|
|
|
|
Oid typioparam;
|
|
|
|
StringInfo bufptr;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Call the parameter type's binary input converter
|
|
|
|
*/
|
|
|
|
getTypeBinaryInputInfo(ptype, &typreceive, &typioparam);
|
|
|
|
|
|
|
|
if (isNull)
|
|
|
|
bufptr = NULL;
|
2005-11-10 00:31:34 +00:00
|
|
|
else
|
2006-04-04 19:35:37 +00:00
|
|
|
bufptr = &pbuf;
|
|
|
|
|
2006-09-06 20:40:48 +00:00
|
|
|
pval = OidReceiveFunctionCall(typreceive, bufptr, typioparam, -1);
|
2006-04-04 19:35:37 +00:00
|
|
|
|
|
|
|
/* Trouble if it didn't eat the whole buffer */
|
|
|
|
if (!isNull && pbuf.cursor != pbuf.len)
|
2005-11-10 00:31:34 +00:00
|
|
|
ereport(ERROR,
|
2006-04-04 19:35:37 +00:00
|
|
|
(errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
|
|
|
|
errmsg("incorrect binary data format in bind parameter %d",
|
2006-08-06 02:00:52 +00:00
|
|
|
paramno + 1)));
|
2006-04-04 19:35:37 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
|
|
|
errmsg("unsupported format code: %d",
|
|
|
|
pformat)));
|
2006-09-06 20:40:48 +00:00
|
|
|
pval = 0; /* keep compiler quiet */
|
2006-04-04 19:35:37 +00:00
|
|
|
}
|
2005-11-10 00:31:34 +00:00
|
|
|
|
2006-04-04 19:35:37 +00:00
|
|
|
/* Restore message buffer contents */
|
|
|
|
if (!isNull)
|
2005-11-10 00:31:34 +00:00
|
|
|
pbuf.data[plength] = csave;
|
2003-05-08 18:16:37 +00:00
|
|
|
|
2006-09-06 20:40:48 +00:00
|
|
|
params->params[paramno].value = pval;
|
2006-08-06 02:00:52 +00:00
|
|
|
params->params[paramno].isnull = isNull;
|
2006-10-04 00:30:14 +00:00
|
|
|
|
2006-09-06 20:40:48 +00:00
|
|
|
/*
|
2012-06-10 15:20:04 -04:00
|
|
|
* We mark the params as CONST. This ensures that any custom plan
|
|
|
|
* makes full use of the parameter values.
|
2006-09-06 20:40:48 +00:00
|
|
|
*/
|
|
|
|
params->params[paramno].pflags = PARAM_FLAG_CONST;
|
2006-08-06 02:00:52 +00:00
|
|
|
params->params[paramno].ptype = ptype;
|
2003-05-05 00:44:56 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
params = NULL;
|
|
|
|
|
2008-04-02 18:31:50 +00:00
|
|
|
/* Done storing stuff in portal's context */
|
|
|
|
MemoryContextSwitchTo(oldContext);
|
|
|
|
|
2003-05-08 18:16:37 +00:00
|
|
|
/* Get the result format codes */
|
|
|
|
numRFormats = pq_getmsgint(input_message, 2);
|
|
|
|
if (numRFormats > 0)
|
|
|
|
{
|
2006-10-04 00:30:14 +00:00
|
|
|
int i;
|
2006-08-06 02:00:52 +00:00
|
|
|
|
2003-05-08 18:16:37 +00:00
|
|
|
rformats = (int16 *) palloc(numRFormats * sizeof(int16));
|
|
|
|
for (i = 0; i < numRFormats; i++)
|
|
|
|
rformats[i] = pq_getmsgint(input_message, 2);
|
|
|
|
}
|
|
|
|
|
2003-05-05 00:44:56 +00:00
|
|
|
pq_getmsgend(input_message);
|
|
|
|
|
2011-09-16 00:42:53 -04:00
|
|
|
/*
|
|
|
|
* Obtain a plan from the CachedPlanSource. Any cruft from (re)planning
|
|
|
|
* will be generated in MessageContext. The plan refcount will be
|
|
|
|
* assigned to the Portal, so it will be released at portal destruction.
|
|
|
|
*/
|
2017-03-31 23:17:18 -05:00
|
|
|
cplan = GetCachedPlan(psrc, params, false, NULL);
|
2004-06-11 01:09:22 +00:00
|
|
|
|
|
|
|
/*
|
2009-01-01 17:12:16 +00:00
|
|
|
* Now we can define the portal.
|
|
|
|
*
|
|
|
|
* DO NOT put any code that could possibly throw an error between the
|
2011-09-16 00:42:53 -04:00
|
|
|
* above GetCachedPlan call and here.
|
2004-06-11 01:09:22 +00:00
|
|
|
*/
|
|
|
|
PortalDefineQuery(portal,
|
2008-04-02 18:31:50 +00:00
|
|
|
saved_stmt_name,
|
|
|
|
query_string,
|
2007-03-13 00:33:44 +00:00
|
|
|
psrc->commandTag,
|
2011-09-16 00:42:53 -04:00
|
|
|
cplan->stmt_list,
|
2007-03-13 00:33:44 +00:00
|
|
|
cplan);
|
2004-06-11 01:09:22 +00:00
|
|
|
|
2011-12-21 09:16:55 -05:00
|
|
|
/* Done with the snapshot used for parameter I/O and parsing/planning */
|
|
|
|
if (snapshot_set)
|
|
|
|
PopActiveSnapshot();
|
2003-05-05 00:44:56 +00:00
|
|
|
|
2012-11-26 15:55:43 -05:00
|
|
|
/*
|
|
|
|
* And we're ready to start portal execution.
|
|
|
|
*/
|
|
|
|
PortalStart(portal, params, 0, InvalidSnapshot);
|
|
|
|
|
2003-05-08 18:16:37 +00:00
|
|
|
/*
|
|
|
|
* Apply the result format requests to the portal.
|
|
|
|
*/
|
|
|
|
PortalSetResultFormat(portal, numRFormats, rformats);
|
|
|
|
|
2003-05-05 00:44:56 +00:00
|
|
|
/*
|
|
|
|
* Send BindComplete.
|
|
|
|
*/
|
2005-11-03 17:11:40 +00:00
|
|
|
if (whereToSendOutput == DestRemote)
|
2003-05-05 00:44:56 +00:00
|
|
|
pq_putemptymessage('2');
|
2006-09-07 22:52:01 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Emit duration logging if appropriate.
|
|
|
|
*/
|
2006-09-08 15:55:53 +00:00
|
|
|
switch (check_log_duration(msec_str, false))
|
|
|
|
{
|
|
|
|
case 1:
|
|
|
|
ereport(LOG,
|
2007-03-02 23:37:23 +00:00
|
|
|
(errmsg("duration: %s ms", msec_str),
|
|
|
|
errhidestmt(true)));
|
2006-09-08 15:55:53 +00:00
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
ereport(LOG,
|
2006-09-13 21:59:04 +00:00
|
|
|
(errmsg("duration: %s ms bind %s%s%s: %s",
|
2006-09-08 15:55:53 +00:00
|
|
|
msec_str,
|
|
|
|
*stmt_name ? stmt_name : "<unnamed>",
|
2006-09-13 21:59:04 +00:00
|
|
|
*portal_name ? "/" : "",
|
|
|
|
*portal_name ? portal_name : "",
|
Adjust things so that the query_string of a cached plan and the sourceText of
a portal are never NULL, but reliably provide the source text of the query.
It turns out that there was only one place that was really taking a short-cut,
which was the 'EXECUTE' utility statement. That doesn't seem like a
sufficiently critical performance hotspot to justify not offering a guarantee
of validity of the portal source text. Fix it to copy the source text over
from the cached plan. Add Asserts in the places that set up cached plans and
portals to reject null source strings, and simplify a bunch of places that
formerly needed to guard against nulls.
There may be a few places that cons up statements for execution without
having any source text at all; I found one such in ConvertTriggerToFK().
It seems sufficient to inject a phony source string in such a case,
for instance
ProcessUtility((Node *) atstmt,
"(generated ALTER TABLE ADD FOREIGN KEY command)",
NULL, false, None_Receiver, NULL);
We should take a second look at the usage of debug_query_string,
particularly the recently added current_query() SQL function.
ITAGAKI Takahiro and Tom Lane
2008-07-18 20:26:06 +00:00
|
|
|
psrc->query_string),
|
2007-03-02 23:37:23 +00:00
|
|
|
errhidestmt(true),
|
2006-09-08 15:55:53 +00:00
|
|
|
errdetail_params(params)));
|
|
|
|
break;
|
|
|
|
}
|
2006-09-07 22:52:01 +00:00
|
|
|
|
|
|
|
if (save_log_statement_stats)
|
|
|
|
ShowUsage("BIND MESSAGE STATISTICS");
|
|
|
|
|
|
|
|
debug_query_string = NULL;
|
2003-05-05 00:44:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* exec_execute_message
|
|
|
|
*
|
|
|
|
* Process an "Execute" message for a portal
|
|
|
|
*/
|
|
|
|
static void
|
2006-09-03 03:19:45 +00:00
|
|
|
exec_execute_message(const char *portal_name, long max_rows)
|
2003-05-05 00:44:56 +00:00
|
|
|
{
|
2003-08-04 00:43:34 +00:00
|
|
|
CommandDest dest;
|
2003-05-06 20:26:28 +00:00
|
|
|
DestReceiver *receiver;
|
2003-05-05 00:44:56 +00:00
|
|
|
Portal portal;
|
|
|
|
bool completed;
|
|
|
|
char completionTag[COMPLETION_TAG_BUFSIZE];
|
2006-09-07 22:52:01 +00:00
|
|
|
const char *sourceText;
|
2006-08-13 22:18:08 +00:00
|
|
|
const char *prepStmtName;
|
2006-09-07 22:52:01 +00:00
|
|
|
ParamListInfo portalParams;
|
2005-05-24 04:18:04 +00:00
|
|
|
bool save_log_statement_stats = log_statement_stats;
|
2006-08-13 22:18:08 +00:00
|
|
|
bool is_xact_command;
|
|
|
|
bool execute_is_fetch;
|
2006-09-07 22:52:01 +00:00
|
|
|
bool was_logged = false;
|
|
|
|
char msec_str[32];
|
2003-05-05 00:44:56 +00:00
|
|
|
|
|
|
|
/* Adjust destination to tell printtup.c what to do */
|
|
|
|
dest = whereToSendOutput;
|
2005-11-03 17:11:40 +00:00
|
|
|
if (dest == DestRemote)
|
|
|
|
dest = DestRemoteExecute;
|
2003-05-05 00:44:56 +00:00
|
|
|
|
|
|
|
portal = GetPortalByName(portal_name);
|
|
|
|
if (!PortalIsValid(portal))
|
2003-07-22 19:00:12 +00:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_UNDEFINED_CURSOR),
|
|
|
|
errmsg("portal \"%s\" does not exist", portal_name)));
|
2003-05-05 00:44:56 +00:00
|
|
|
|
|
|
|
/*
|
2003-08-04 00:43:34 +00:00
|
|
|
* If the original query was a null string, just return
|
|
|
|
* EmptyQueryResponse.
|
2003-05-05 00:44:56 +00:00
|
|
|
*/
|
|
|
|
if (portal->commandTag == NULL)
|
|
|
|
{
|
2007-02-20 17:32:18 +00:00
|
|
|
Assert(portal->stmts == NIL);
|
2003-05-05 00:44:56 +00:00
|
|
|
NullCommand(dest);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2006-08-13 22:18:08 +00:00
|
|
|
/* Does the portal contain a transaction command? */
|
2007-02-20 17:32:18 +00:00
|
|
|
is_xact_command = IsTransactionStmtList(portal->stmts);
|
2006-08-13 22:18:08 +00:00
|
|
|
|
2006-08-29 02:11:30 +00:00
|
|
|
/*
|
2006-10-04 00:30:14 +00:00
|
|
|
* We must copy the sourceText and prepStmtName into MessageContext in
|
|
|
|
* case the portal is destroyed during finish_xact_command. Can avoid the
|
|
|
|
* copy if it's not an xact command, though.
|
2006-08-29 02:11:30 +00:00
|
|
|
*/
|
|
|
|
if (is_xact_command)
|
2006-09-07 22:52:01 +00:00
|
|
|
{
|
Adjust things so that the query_string of a cached plan and the sourceText of
a portal are never NULL, but reliably provide the source text of the query.
It turns out that there was only one place that was really taking a short-cut,
which was the 'EXECUTE' utility statement. That doesn't seem like a
sufficiently critical performance hotspot to justify not offering a guarantee
of validity of the portal source text. Fix it to copy the source text over
from the cached plan. Add Asserts in the places that set up cached plans and
portals to reject null source strings, and simplify a bunch of places that
formerly needed to guard against nulls.
There may be a few places that cons up statements for execution without
having any source text at all; I found one such in ConvertTriggerToFK().
It seems sufficient to inject a phony source string in such a case,
for instance
ProcessUtility((Node *) atstmt,
"(generated ALTER TABLE ADD FOREIGN KEY command)",
NULL, false, None_Receiver, NULL);
We should take a second look at the usage of debug_query_string,
particularly the recently added current_query() SQL function.
ITAGAKI Takahiro and Tom Lane
2008-07-18 20:26:06 +00:00
|
|
|
sourceText = pstrdup(portal->sourceText);
|
2006-09-07 22:52:01 +00:00
|
|
|
if (portal->prepStmtName)
|
|
|
|
prepStmtName = pstrdup(portal->prepStmtName);
|
|
|
|
else
|
|
|
|
prepStmtName = "<unnamed>";
|
2006-10-04 00:30:14 +00:00
|
|
|
|
2006-09-07 22:52:01 +00:00
|
|
|
/*
|
|
|
|
* An xact command shouldn't have any parameters, which is a good
|
|
|
|
* thing because they wouldn't be around after finish_xact_command.
|
|
|
|
*/
|
|
|
|
portalParams = NULL;
|
|
|
|
}
|
2006-08-29 02:11:30 +00:00
|
|
|
else
|
|
|
|
{
|
2006-09-07 22:52:01 +00:00
|
|
|
sourceText = portal->sourceText;
|
|
|
|
if (portal->prepStmtName)
|
|
|
|
prepStmtName = portal->prepStmtName;
|
2006-08-29 02:11:30 +00:00
|
|
|
else
|
2006-09-07 22:52:01 +00:00
|
|
|
prepStmtName = "<unnamed>";
|
|
|
|
portalParams = portal->portalParams;
|
2006-08-29 02:11:30 +00:00
|
|
|
}
|
2005-05-24 04:18:04 +00:00
|
|
|
|
2006-10-19 19:52:22 +00:00
|
|
|
/*
|
|
|
|
* Report query to various monitoring facilities.
|
|
|
|
*/
|
Adjust things so that the query_string of a cached plan and the sourceText of
a portal are never NULL, but reliably provide the source text of the query.
It turns out that there was only one place that was really taking a short-cut,
which was the 'EXECUTE' utility statement. That doesn't seem like a
sufficiently critical performance hotspot to justify not offering a guarantee
of validity of the portal source text. Fix it to copy the source text over
from the cached plan. Add Asserts in the places that set up cached plans and
portals to reject null source strings, and simplify a bunch of places that
formerly needed to guard against nulls.
There may be a few places that cons up statements for execution without
having any source text at all; I found one such in ConvertTriggerToFK().
It seems sufficient to inject a phony source string in such a case,
for instance
ProcessUtility((Node *) atstmt,
"(generated ALTER TABLE ADD FOREIGN KEY command)",
NULL, false, None_Receiver, NULL);
We should take a second look at the usage of debug_query_string,
particularly the recently added current_query() SQL function.
ITAGAKI Takahiro and Tom Lane
2008-07-18 20:26:06 +00:00
|
|
|
debug_query_string = sourceText;
|
2006-10-19 19:52:22 +00:00
|
|
|
|
2012-01-19 14:19:20 +01:00
|
|
|
pgstat_report_activity(STATE_RUNNING, sourceText);
|
2006-10-19 19:52:22 +00:00
|
|
|
|
|
|
|
set_ps_display(portal->commandTag, false);
|
|
|
|
|
|
|
|
if (save_log_statement_stats)
|
|
|
|
ResetUsage();
|
|
|
|
|
2003-05-05 00:44:56 +00:00
|
|
|
BeginCommand(portal->commandTag, dest);
|
|
|
|
|
2003-05-08 18:16:37 +00:00
|
|
|
/*
|
2005-10-15 02:49:52 +00:00
|
|
|
* Create dest receiver in MessageContext (we don't want it in transaction
|
|
|
|
* context, because that may get deleted if portal contains VACUUM).
|
2003-05-08 18:16:37 +00:00
|
|
|
*/
|
2008-11-30 20:51:25 +00:00
|
|
|
receiver = CreateDestReceiver(dest);
|
|
|
|
if (dest == DestRemoteExecute)
|
|
|
|
SetRemoteDestReceiverParams(receiver, portal);
|
2003-05-08 18:16:37 +00:00
|
|
|
|
2003-05-05 00:44:56 +00:00
|
|
|
/*
|
2003-08-04 00:43:34 +00:00
|
|
|
* Ensure we are in a transaction command (this should normally be the
|
|
|
|
* case already due to prior BIND).
|
2003-05-05 00:44:56 +00:00
|
|
|
*/
|
2006-06-20 22:52:00 +00:00
|
|
|
start_xact_command();
|
2003-05-05 00:44:56 +00:00
|
|
|
|
2006-10-19 19:52:22 +00:00
|
|
|
/*
|
|
|
|
* If we re-issue an Execute protocol request against an existing portal,
|
|
|
|
* then we are only fetching more rows rather than completely re-executing
|
|
|
|
* the query from the start. atStart is never reset for a v3 portal, so we
|
|
|
|
* are safe to use this check.
|
|
|
|
*/
|
|
|
|
execute_is_fetch = !portal->atStart;
|
|
|
|
|
2006-09-07 22:52:01 +00:00
|
|
|
/* Log immediately if dictated by log_statement */
|
2007-02-20 17:32:18 +00:00
|
|
|
if (check_log_statement(portal->stmts))
|
2006-09-07 22:52:01 +00:00
|
|
|
{
|
|
|
|
ereport(LOG,
|
Adjust things so that the query_string of a cached plan and the sourceText of
a portal are never NULL, but reliably provide the source text of the query.
It turns out that there was only one place that was really taking a short-cut,
which was the 'EXECUTE' utility statement. That doesn't seem like a
sufficiently critical performance hotspot to justify not offering a guarantee
of validity of the portal source text. Fix it to copy the source text over
from the cached plan. Add Asserts in the places that set up cached plans and
portals to reject null source strings, and simplify a bunch of places that
formerly needed to guard against nulls.
There may be a few places that cons up statements for execution without
having any source text at all; I found one such in ConvertTriggerToFK().
It seems sufficient to inject a phony source string in such a case,
for instance
ProcessUtility((Node *) atstmt,
"(generated ALTER TABLE ADD FOREIGN KEY command)",
NULL, false, None_Receiver, NULL);
We should take a second look at the usage of debug_query_string,
particularly the recently added current_query() SQL function.
ITAGAKI Takahiro and Tom Lane
2008-07-18 20:26:06 +00:00
|
|
|
(errmsg("%s %s%s%s: %s",
|
2006-09-07 22:52:01 +00:00
|
|
|
execute_is_fetch ?
|
2006-09-13 21:59:04 +00:00
|
|
|
_("execute fetch from") :
|
|
|
|
_("execute"),
|
2006-09-07 22:52:01 +00:00
|
|
|
prepStmtName,
|
|
|
|
*portal_name ? "/" : "",
|
|
|
|
*portal_name ? portal_name : "",
|
Adjust things so that the query_string of a cached plan and the sourceText of
a portal are never NULL, but reliably provide the source text of the query.
It turns out that there was only one place that was really taking a short-cut,
which was the 'EXECUTE' utility statement. That doesn't seem like a
sufficiently critical performance hotspot to justify not offering a guarantee
of validity of the portal source text. Fix it to copy the source text over
from the cached plan. Add Asserts in the places that set up cached plans and
portals to reject null source strings, and simplify a bunch of places that
formerly needed to guard against nulls.
There may be a few places that cons up statements for execution without
having any source text at all; I found one such in ConvertTriggerToFK().
It seems sufficient to inject a phony source string in such a case,
for instance
ProcessUtility((Node *) atstmt,
"(generated ALTER TABLE ADD FOREIGN KEY command)",
NULL, false, None_Receiver, NULL);
We should take a second look at the usage of debug_query_string,
particularly the recently added current_query() SQL function.
ITAGAKI Takahiro and Tom Lane
2008-07-18 20:26:06 +00:00
|
|
|
sourceText),
|
2007-03-02 23:37:23 +00:00
|
|
|
errhidestmt(true),
|
2006-09-07 22:52:01 +00:00
|
|
|
errdetail_params(portalParams)));
|
|
|
|
was_logged = true;
|
|
|
|
}
|
|
|
|
|
2003-05-05 00:44:56 +00:00
|
|
|
/*
|
|
|
|
* If we are in aborted transaction state, the only portals we can
|
|
|
|
* actually run are those containing COMMIT or ROLLBACK commands.
|
|
|
|
*/
|
2005-11-10 00:31:34 +00:00
|
|
|
if (IsAbortedTransactionBlockState() &&
|
2007-02-20 17:32:18 +00:00
|
|
|
!IsTransactionExitStmtList(portal->stmts))
|
2005-11-10 00:31:34 +00:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_IN_FAILED_SQL_TRANSACTION),
|
|
|
|
errmsg("current transaction is aborted, "
|
2010-01-16 10:05:59 +00:00
|
|
|
"commands ignored until end of transaction block"),
|
|
|
|
errdetail_abort()));
|
2003-05-05 00:44:56 +00:00
|
|
|
|
|
|
|
/* Check for cancel signal before we start execution */
|
|
|
|
CHECK_FOR_INTERRUPTS();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Okay to run the portal.
|
|
|
|
*/
|
|
|
|
if (max_rows <= 0)
|
|
|
|
max_rows = FETCH_ALL;
|
|
|
|
|
|
|
|
completed = PortalRun(portal,
|
|
|
|
max_rows,
|
2007-11-15 21:14:46 +00:00
|
|
|
true, /* always top level */
|
2017-03-23 13:05:48 -04:00
|
|
|
!execute_is_fetch && max_rows == FETCH_ALL,
|
2003-05-06 20:26:28 +00:00
|
|
|
receiver,
|
|
|
|
receiver,
|
2003-05-05 00:44:56 +00:00
|
|
|
completionTag);
|
|
|
|
|
2003-08-06 17:46:46 +00:00
|
|
|
(*receiver->rDestroy) (receiver);
|
2003-05-06 20:26:28 +00:00
|
|
|
|
2003-05-05 00:44:56 +00:00
|
|
|
if (completed)
|
|
|
|
{
|
2006-08-13 22:18:08 +00:00
|
|
|
if (is_xact_command)
|
2003-05-05 00:44:56 +00:00
|
|
|
{
|
|
|
|
/*
|
2014-05-06 12:12:18 -04:00
|
|
|
* If this was a transaction control statement, commit it. We
|
2005-10-15 02:49:52 +00:00
|
|
|
* will start a new xact command for the next command (if any).
|
2003-05-05 00:44:56 +00:00
|
|
|
*/
|
2003-05-14 03:26:03 +00:00
|
|
|
finish_xact_command();
|
2003-05-05 00:44:56 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/*
|
2003-08-04 00:43:34 +00:00
|
|
|
* We need a CommandCounterIncrement after every query, except
|
|
|
|
* those that start or end a transaction block.
|
2003-05-05 00:44:56 +00:00
|
|
|
*/
|
|
|
|
CommandCounterIncrement();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Send appropriate CommandComplete to client */
|
|
|
|
EndCommand(completionTag, dest);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* Portal run not complete, so send PortalSuspended */
|
2005-11-03 17:11:40 +00:00
|
|
|
if (whereToSendOutput == DestRemote)
|
2003-05-05 00:44:56 +00:00
|
|
|
pq_putemptymessage('s');
|
|
|
|
}
|
|
|
|
|
2005-05-24 04:18:04 +00:00
|
|
|
/*
|
2006-06-20 22:52:00 +00:00
|
|
|
* Emit duration logging if appropriate.
|
2005-05-24 04:18:04 +00:00
|
|
|
*/
|
2006-09-08 15:55:53 +00:00
|
|
|
switch (check_log_duration(msec_str, was_logged))
|
2006-09-07 22:52:01 +00:00
|
|
|
{
|
2006-09-08 15:55:53 +00:00
|
|
|
case 1:
|
2006-09-07 22:52:01 +00:00
|
|
|
ereport(LOG,
|
2007-03-02 23:37:23 +00:00
|
|
|
(errmsg("duration: %s ms", msec_str),
|
|
|
|
errhidestmt(true)));
|
2006-09-08 15:55:53 +00:00
|
|
|
break;
|
|
|
|
case 2:
|
2006-09-07 22:52:01 +00:00
|
|
|
ereport(LOG,
|
Adjust things so that the query_string of a cached plan and the sourceText of
a portal are never NULL, but reliably provide the source text of the query.
It turns out that there was only one place that was really taking a short-cut,
which was the 'EXECUTE' utility statement. That doesn't seem like a
sufficiently critical performance hotspot to justify not offering a guarantee
of validity of the portal source text. Fix it to copy the source text over
from the cached plan. Add Asserts in the places that set up cached plans and
portals to reject null source strings, and simplify a bunch of places that
formerly needed to guard against nulls.
There may be a few places that cons up statements for execution without
having any source text at all; I found one such in ConvertTriggerToFK().
It seems sufficient to inject a phony source string in such a case,
for instance
ProcessUtility((Node *) atstmt,
"(generated ALTER TABLE ADD FOREIGN KEY command)",
NULL, false, None_Receiver, NULL);
We should take a second look at the usage of debug_query_string,
particularly the recently added current_query() SQL function.
ITAGAKI Takahiro and Tom Lane
2008-07-18 20:26:06 +00:00
|
|
|
(errmsg("duration: %s ms %s %s%s%s: %s",
|
2006-09-07 22:52:01 +00:00
|
|
|
msec_str,
|
|
|
|
execute_is_fetch ?
|
|
|
|
_("execute fetch from") :
|
|
|
|
_("execute"),
|
|
|
|
prepStmtName,
|
|
|
|
*portal_name ? "/" : "",
|
|
|
|
*portal_name ? portal_name : "",
|
Adjust things so that the query_string of a cached plan and the sourceText of
a portal are never NULL, but reliably provide the source text of the query.
It turns out that there was only one place that was really taking a short-cut,
which was the 'EXECUTE' utility statement. That doesn't seem like a
sufficiently critical performance hotspot to justify not offering a guarantee
of validity of the portal source text. Fix it to copy the source text over
from the cached plan. Add Asserts in the places that set up cached plans and
portals to reject null source strings, and simplify a bunch of places that
formerly needed to guard against nulls.
There may be a few places that cons up statements for execution without
having any source text at all; I found one such in ConvertTriggerToFK().
It seems sufficient to inject a phony source string in such a case,
for instance
ProcessUtility((Node *) atstmt,
"(generated ALTER TABLE ADD FOREIGN KEY command)",
NULL, false, None_Receiver, NULL);
We should take a second look at the usage of debug_query_string,
particularly the recently added current_query() SQL function.
ITAGAKI Takahiro and Tom Lane
2008-07-18 20:26:06 +00:00
|
|
|
sourceText),
|
2007-03-02 23:37:23 +00:00
|
|
|
errhidestmt(true),
|
2006-09-07 22:52:01 +00:00
|
|
|
errdetail_params(portalParams)));
|
2006-09-08 15:55:53 +00:00
|
|
|
break;
|
2006-09-07 22:52:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (save_log_statement_stats)
|
|
|
|
ShowUsage("EXECUTE MESSAGE STATISTICS");
|
|
|
|
|
|
|
|
debug_query_string = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2007-02-20 17:32:18 +00:00
|
|
|
* check_log_statement
|
2006-09-07 22:52:01 +00:00
|
|
|
* Determine whether command should be logged because of log_statement
|
|
|
|
*
|
2014-07-07 19:39:42 +09:00
|
|
|
* stmt_list can be either raw grammar output or a list of planned
|
2007-02-20 17:32:18 +00:00
|
|
|
* statements
|
2006-09-07 22:52:01 +00:00
|
|
|
*/
|
|
|
|
static bool
|
2007-02-20 17:32:18 +00:00
|
|
|
check_log_statement(List *stmt_list)
|
2006-09-07 22:52:01 +00:00
|
|
|
{
|
2007-02-20 17:32:18 +00:00
|
|
|
ListCell *stmt_item;
|
2006-09-07 22:52:01 +00:00
|
|
|
|
|
|
|
if (log_statement == LOGSTMT_NONE)
|
|
|
|
return false;
|
|
|
|
if (log_statement == LOGSTMT_ALL)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
/* Else we have to inspect the statement(s) to see whether to log */
|
2007-02-20 17:32:18 +00:00
|
|
|
foreach(stmt_item, stmt_list)
|
2006-09-07 22:52:01 +00:00
|
|
|
{
|
2007-02-20 17:32:18 +00:00
|
|
|
Node *stmt = (Node *) lfirst(stmt_item);
|
2006-09-07 22:52:01 +00:00
|
|
|
|
2007-02-20 17:32:18 +00:00
|
|
|
if (GetCommandLogLevel(stmt) <= log_statement)
|
2006-09-07 22:52:01 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* check_log_duration
|
|
|
|
* Determine whether current command's duration should be logged
|
|
|
|
*
|
2006-09-08 15:55:53 +00:00
|
|
|
* Returns:
|
|
|
|
* 0 if no logging is needed
|
|
|
|
* 1 if just the duration should be logged
|
|
|
|
* 2 if duration and query details should be logged
|
|
|
|
*
|
2006-09-07 22:52:01 +00:00
|
|
|
* If logging is needed, the duration in msec is formatted into msec_str[],
|
|
|
|
* which must be a 32-byte buffer.
|
2006-09-08 15:55:53 +00:00
|
|
|
*
|
|
|
|
* was_logged should be TRUE if caller already logged query details (this
|
|
|
|
* essentially prevents 2 from being returned).
|
2006-09-07 22:52:01 +00:00
|
|
|
*/
|
2006-09-08 15:55:53 +00:00
|
|
|
int
|
|
|
|
check_log_duration(char *msec_str, bool was_logged)
|
2006-09-07 22:52:01 +00:00
|
|
|
{
|
2006-06-20 22:52:00 +00:00
|
|
|
if (log_duration || log_min_duration_statement >= 0)
|
2005-05-24 04:18:04 +00:00
|
|
|
{
|
2006-06-20 22:52:00 +00:00
|
|
|
long secs;
|
|
|
|
int usecs;
|
|
|
|
int msecs;
|
2006-09-08 15:55:53 +00:00
|
|
|
bool exceeded;
|
2005-05-24 04:18:04 +00:00
|
|
|
|
2006-06-20 22:52:00 +00:00
|
|
|
TimestampDifference(GetCurrentStatementStartTimestamp(),
|
|
|
|
GetCurrentTimestamp(),
|
|
|
|
&secs, &usecs);
|
|
|
|
msecs = usecs / 1000;
|
2005-05-24 04:18:04 +00:00
|
|
|
|
|
|
|
/*
|
2006-10-04 00:30:14 +00:00
|
|
|
* This odd-looking test for log_min_duration_statement being exceeded
|
|
|
|
* is designed to avoid integer overflow with very long durations:
|
|
|
|
* don't compute secs * 1000 until we've verified it will fit in int.
|
2005-05-24 04:18:04 +00:00
|
|
|
*/
|
2006-09-08 15:55:53 +00:00
|
|
|
exceeded = (log_min_duration_statement == 0 ||
|
|
|
|
(log_min_duration_statement > 0 &&
|
|
|
|
(secs > log_min_duration_statement / 1000 ||
|
|
|
|
secs * 1000 + msecs >= log_min_duration_statement)));
|
|
|
|
|
|
|
|
if (exceeded || log_duration)
|
2006-06-20 22:52:00 +00:00
|
|
|
{
|
2006-09-07 22:52:01 +00:00
|
|
|
snprintf(msec_str, 32, "%ld.%03d",
|
|
|
|
secs * 1000 + msecs, usecs % 1000);
|
2006-09-08 15:55:53 +00:00
|
|
|
if (exceeded && !was_logged)
|
|
|
|
return 2;
|
|
|
|
else
|
|
|
|
return 1;
|
2006-06-20 22:52:00 +00:00
|
|
|
}
|
2005-05-24 04:18:04 +00:00
|
|
|
}
|
|
|
|
|
2006-09-08 15:55:53 +00:00
|
|
|
return 0;
|
2006-09-07 22:52:01 +00:00
|
|
|
}
|
2005-05-24 04:18:04 +00:00
|
|
|
|
2006-09-07 22:52:01 +00:00
|
|
|
/*
|
|
|
|
* errdetail_execute
|
|
|
|
*
|
|
|
|
* Add an errdetail() line showing the query referenced by an EXECUTE, if any.
|
|
|
|
* The argument is the raw parsetree list.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
errdetail_execute(List *raw_parsetree_list)
|
|
|
|
{
|
|
|
|
ListCell *parsetree_item;
|
|
|
|
|
|
|
|
foreach(parsetree_item, raw_parsetree_list)
|
|
|
|
{
|
Improve castNode notation by introducing list-extraction-specific variants.
This extends the castNode() notation introduced by commit 5bcab1114 to
provide, in one step, extraction of a list cell's pointer and coercion to
a concrete node type. For example, "lfirst_node(Foo, lc)" is the same
as "castNode(Foo, lfirst(lc))". Almost half of the uses of castNode
that have appeared so far include a list extraction call, so this is
pretty widely useful, and it saves a few more keystrokes compared to the
old way.
As with the previous patch, back-patch the addition of these macros to
pg_list.h, so that the notation will be available when back-patching.
Patch by me, after an idea of Andrew Gierth's.
Discussion: https://postgr.es/m/14197.1491841216@sss.pgh.pa.us
2017-04-10 13:51:29 -04:00
|
|
|
RawStmt *parsetree = lfirst_node(RawStmt, parsetree_item);
|
2006-09-07 22:52:01 +00:00
|
|
|
|
Change representation of statement lists, and add statement location info.
This patch makes several changes that improve the consistency of
representation of lists of statements. It's always been the case
that the output of parse analysis is a list of Query nodes, whatever
the types of the individual statements in the list. This patch brings
similar consistency to the outputs of raw parsing and planning steps:
* The output of raw parsing is now always a list of RawStmt nodes;
the statement-type-dependent nodes are one level down from that.
* The output of pg_plan_queries() is now always a list of PlannedStmt
nodes, even for utility statements. In the case of a utility statement,
"planning" just consists of wrapping a CMD_UTILITY PlannedStmt around
the utility node. This list representation is now used in Portal and
CachedPlan plan lists, replacing the former convention of intermixing
PlannedStmts with bare utility-statement nodes.
Now, every list of statements has a consistent head-node type depending
on how far along it is in processing. This allows changing many places
that formerly used generic "Node *" pointers to use a more specific
pointer type, thus reducing the number of IsA() tests and casts needed,
as well as improving code clarity.
Also, the post-parse-analysis representation of DECLARE CURSOR is changed
so that it looks more like EXPLAIN, PREPARE, etc. That is, the contained
SELECT remains a child of the DeclareCursorStmt rather than getting flipped
around to be the other way. It's now true for both Query and PlannedStmt
that utilityStmt is non-null if and only if commandType is CMD_UTILITY.
That allows simplifying a lot of places that were testing both fields.
(I think some of those were just defensive programming, but in many places,
it was actually necessary to avoid confusing DECLARE CURSOR with SELECT.)
Because PlannedStmt carries a canSetTag field, we're also able to get rid
of some ad-hoc rules about how to reconstruct canSetTag for a bare utility
statement; specifically, the assumption that a utility is canSetTag if and
only if it's the only one in its list. While I see no near-term need for
relaxing that restriction, it's nice to get rid of the ad-hocery.
The API of ProcessUtility() is changed so that what it's passed is the
wrapper PlannedStmt not just the bare utility statement. This will affect
all users of ProcessUtility_hook, but the changes are pretty trivial; see
the affected contrib modules for examples of the minimum change needed.
(Most compilers should give pointer-type-mismatch warnings for uncorrected
code.)
There's also a change in the API of ExplainOneQuery_hook, to pass through
cursorOptions instead of expecting hook functions to know what to pick.
This is needed because of the DECLARE CURSOR changes, but really should
have been done in 9.6; it's unlikely that any extant hook functions
know about using CURSOR_OPT_PARALLEL_OK.
Finally, teach gram.y to save statement boundary locations in RawStmt
nodes, and pass those through to Query and PlannedStmt nodes. This allows
more intelligent handling of cases where a source query string contains
multiple statements. This patch doesn't actually do anything with the
information, but a follow-on patch will. (Passing this information through
cleanly is the true motivation for these changes; while I think this is all
good cleanup, it's unlikely we'd have bothered without this end goal.)
catversion bump because addition of location fields to struct Query
affects stored rules.
This patch is by me, but it owes a good deal to Fabien Coelho who did
a lot of preliminary work on the problem, and also reviewed the patch.
Discussion: https://postgr.es/m/alpine.DEB.2.20.1612200926310.29821@lancre
2017-01-14 16:02:35 -05:00
|
|
|
if (IsA(parsetree->stmt, ExecuteStmt))
|
2006-09-07 22:52:01 +00:00
|
|
|
{
|
Change representation of statement lists, and add statement location info.
This patch makes several changes that improve the consistency of
representation of lists of statements. It's always been the case
that the output of parse analysis is a list of Query nodes, whatever
the types of the individual statements in the list. This patch brings
similar consistency to the outputs of raw parsing and planning steps:
* The output of raw parsing is now always a list of RawStmt nodes;
the statement-type-dependent nodes are one level down from that.
* The output of pg_plan_queries() is now always a list of PlannedStmt
nodes, even for utility statements. In the case of a utility statement,
"planning" just consists of wrapping a CMD_UTILITY PlannedStmt around
the utility node. This list representation is now used in Portal and
CachedPlan plan lists, replacing the former convention of intermixing
PlannedStmts with bare utility-statement nodes.
Now, every list of statements has a consistent head-node type depending
on how far along it is in processing. This allows changing many places
that formerly used generic "Node *" pointers to use a more specific
pointer type, thus reducing the number of IsA() tests and casts needed,
as well as improving code clarity.
Also, the post-parse-analysis representation of DECLARE CURSOR is changed
so that it looks more like EXPLAIN, PREPARE, etc. That is, the contained
SELECT remains a child of the DeclareCursorStmt rather than getting flipped
around to be the other way. It's now true for both Query and PlannedStmt
that utilityStmt is non-null if and only if commandType is CMD_UTILITY.
That allows simplifying a lot of places that were testing both fields.
(I think some of those were just defensive programming, but in many places,
it was actually necessary to avoid confusing DECLARE CURSOR with SELECT.)
Because PlannedStmt carries a canSetTag field, we're also able to get rid
of some ad-hoc rules about how to reconstruct canSetTag for a bare utility
statement; specifically, the assumption that a utility is canSetTag if and
only if it's the only one in its list. While I see no near-term need for
relaxing that restriction, it's nice to get rid of the ad-hocery.
The API of ProcessUtility() is changed so that what it's passed is the
wrapper PlannedStmt not just the bare utility statement. This will affect
all users of ProcessUtility_hook, but the changes are pretty trivial; see
the affected contrib modules for examples of the minimum change needed.
(Most compilers should give pointer-type-mismatch warnings for uncorrected
code.)
There's also a change in the API of ExplainOneQuery_hook, to pass through
cursorOptions instead of expecting hook functions to know what to pick.
This is needed because of the DECLARE CURSOR changes, but really should
have been done in 9.6; it's unlikely that any extant hook functions
know about using CURSOR_OPT_PARALLEL_OK.
Finally, teach gram.y to save statement boundary locations in RawStmt
nodes, and pass those through to Query and PlannedStmt nodes. This allows
more intelligent handling of cases where a source query string contains
multiple statements. This patch doesn't actually do anything with the
information, but a follow-on patch will. (Passing this information through
cleanly is the true motivation for these changes; while I think this is all
good cleanup, it's unlikely we'd have bothered without this end goal.)
catversion bump because addition of location fields to struct Query
affects stored rules.
This patch is by me, but it owes a good deal to Fabien Coelho who did
a lot of preliminary work on the problem, and also reviewed the patch.
Discussion: https://postgr.es/m/alpine.DEB.2.20.1612200926310.29821@lancre
2017-01-14 16:02:35 -05:00
|
|
|
ExecuteStmt *stmt = (ExecuteStmt *) parsetree->stmt;
|
2006-09-07 22:52:01 +00:00
|
|
|
PreparedStatement *pstmt;
|
|
|
|
|
|
|
|
pstmt = FetchPreparedStatement(stmt->name, false);
|
Adjust things so that the query_string of a cached plan and the sourceText of
a portal are never NULL, but reliably provide the source text of the query.
It turns out that there was only one place that was really taking a short-cut,
which was the 'EXECUTE' utility statement. That doesn't seem like a
sufficiently critical performance hotspot to justify not offering a guarantee
of validity of the portal source text. Fix it to copy the source text over
from the cached plan. Add Asserts in the places that set up cached plans and
portals to reject null source strings, and simplify a bunch of places that
formerly needed to guard against nulls.
There may be a few places that cons up statements for execution without
having any source text at all; I found one such in ConvertTriggerToFK().
It seems sufficient to inject a phony source string in such a case,
for instance
ProcessUtility((Node *) atstmt,
"(generated ALTER TABLE ADD FOREIGN KEY command)",
NULL, false, None_Receiver, NULL);
We should take a second look at the usage of debug_query_string,
particularly the recently added current_query() SQL function.
ITAGAKI Takahiro and Tom Lane
2008-07-18 20:26:06 +00:00
|
|
|
if (pstmt)
|
2006-09-07 22:52:01 +00:00
|
|
|
{
|
2007-03-13 00:33:44 +00:00
|
|
|
errdetail("prepare: %s", pstmt->plansource->query_string);
|
2006-09-07 22:52:01 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* errdetail_params
|
|
|
|
*
|
|
|
|
* Add an errdetail() line showing bind-parameter data, if available.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
errdetail_params(ParamListInfo params)
|
|
|
|
{
|
|
|
|
/* We mustn't call user-defined I/O functions when in an aborted xact */
|
|
|
|
if (params && params->numParams > 0 && !IsAbortedTransactionBlockState())
|
|
|
|
{
|
|
|
|
StringInfoData param_str;
|
|
|
|
MemoryContext oldcontext;
|
|
|
|
int paramno;
|
|
|
|
|
|
|
|
/* Make sure any trash is generated in MessageContext */
|
|
|
|
oldcontext = MemoryContextSwitchTo(MessageContext);
|
|
|
|
|
|
|
|
initStringInfo(¶m_str);
|
|
|
|
|
|
|
|
for (paramno = 0; paramno < params->numParams; paramno++)
|
|
|
|
{
|
|
|
|
ParamExternData *prm = ¶ms->params[paramno];
|
|
|
|
Oid typoutput;
|
|
|
|
bool typisvarlena;
|
|
|
|
char *pstring;
|
|
|
|
char *p;
|
|
|
|
|
|
|
|
appendStringInfo(¶m_str, "%s$%d = ",
|
|
|
|
paramno > 0 ? ", " : "",
|
|
|
|
paramno + 1);
|
|
|
|
|
|
|
|
if (prm->isnull || !OidIsValid(prm->ptype))
|
|
|
|
{
|
|
|
|
appendStringInfoString(¶m_str, "NULL");
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
getTypeOutputInfo(prm->ptype, &typoutput, &typisvarlena);
|
|
|
|
|
|
|
|
pstring = OidOutputFunctionCall(typoutput, prm->value);
|
|
|
|
|
|
|
|
appendStringInfoCharMacro(¶m_str, '\'');
|
|
|
|
for (p = pstring; *p; p++)
|
|
|
|
{
|
2006-10-04 00:30:14 +00:00
|
|
|
if (*p == '\'') /* double single quotes */
|
2006-09-07 22:52:01 +00:00
|
|
|
appendStringInfoCharMacro(¶m_str, *p);
|
|
|
|
appendStringInfoCharMacro(¶m_str, *p);
|
|
|
|
}
|
|
|
|
appendStringInfoCharMacro(¶m_str, '\'');
|
|
|
|
|
|
|
|
pfree(pstring);
|
|
|
|
}
|
|
|
|
|
|
|
|
errdetail("parameters: %s", param_str.data);
|
|
|
|
|
|
|
|
pfree(param_str.data);
|
|
|
|
|
|
|
|
MemoryContextSwitchTo(oldcontext);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2003-05-05 00:44:56 +00:00
|
|
|
}
|
|
|
|
|
2010-01-16 10:05:59 +00:00
|
|
|
/*
|
|
|
|
* errdetail_abort
|
|
|
|
*
|
|
|
|
* Add an errdetail() line showing abort reason, if any.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
errdetail_abort(void)
|
|
|
|
{
|
|
|
|
if (MyProc->recoveryConflictPending)
|
|
|
|
errdetail("abort reason: recovery conflict");
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-01-23 17:04:05 +00:00
|
|
|
/*
|
|
|
|
* errdetail_recovery_conflict
|
|
|
|
*
|
|
|
|
* Add an errdetail() line showing conflict source.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
errdetail_recovery_conflict(void)
|
|
|
|
{
|
|
|
|
switch (RecoveryConflictReason)
|
|
|
|
{
|
|
|
|
case PROCSIG_RECOVERY_CONFLICT_BUFFERPIN:
|
2010-02-26 02:01:40 +00:00
|
|
|
errdetail("User was holding shared buffer pin for too long.");
|
|
|
|
break;
|
2010-01-23 17:04:05 +00:00
|
|
|
case PROCSIG_RECOVERY_CONFLICT_LOCK:
|
2010-02-26 02:01:40 +00:00
|
|
|
errdetail("User was holding a relation lock for too long.");
|
|
|
|
break;
|
2010-01-23 17:04:05 +00:00
|
|
|
case PROCSIG_RECOVERY_CONFLICT_TABLESPACE:
|
2010-03-21 00:17:59 +00:00
|
|
|
errdetail("User was or might have been using tablespace that must be dropped.");
|
2010-02-26 02:01:40 +00:00
|
|
|
break;
|
2010-01-23 17:04:05 +00:00
|
|
|
case PROCSIG_RECOVERY_CONFLICT_SNAPSHOT:
|
2010-02-26 02:01:40 +00:00
|
|
|
errdetail("User query might have needed to see row versions that must be removed.");
|
|
|
|
break;
|
2010-02-13 01:32:20 +00:00
|
|
|
case PROCSIG_RECOVERY_CONFLICT_STARTUP_DEADLOCK:
|
2010-02-26 02:01:40 +00:00
|
|
|
errdetail("User transaction caused buffer deadlock with recovery.");
|
|
|
|
break;
|
2010-01-23 17:04:05 +00:00
|
|
|
case PROCSIG_RECOVERY_CONFLICT_DATABASE:
|
2010-02-26 02:01:40 +00:00
|
|
|
errdetail("User was connected to a database that must be dropped.");
|
|
|
|
break;
|
2010-01-23 17:04:05 +00:00
|
|
|
default:
|
2010-02-26 02:01:40 +00:00
|
|
|
break;
|
|
|
|
/* no errdetail */
|
2010-01-23 17:04:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2003-05-05 00:44:56 +00:00
|
|
|
/*
|
|
|
|
* exec_describe_statement_message
|
|
|
|
*
|
|
|
|
* Process a "Describe" message for a prepared statement
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
exec_describe_statement_message(const char *stmt_name)
|
|
|
|
{
|
2007-03-13 00:33:44 +00:00
|
|
|
CachedPlanSource *psrc;
|
2003-05-05 00:44:56 +00:00
|
|
|
StringInfoData buf;
|
2007-03-13 00:33:44 +00:00
|
|
|
int i;
|
2003-05-05 00:44:56 +00:00
|
|
|
|
2005-12-14 17:06:28 +00:00
|
|
|
/*
|
|
|
|
* Start up a transaction command. (Note that this will normally change
|
|
|
|
* current memory context.) Nothing happens if we are already in one.
|
|
|
|
*/
|
2006-06-20 22:52:00 +00:00
|
|
|
start_xact_command();
|
2005-12-14 17:06:28 +00:00
|
|
|
|
|
|
|
/* Switch back to message context */
|
|
|
|
MemoryContextSwitchTo(MessageContext);
|
|
|
|
|
2003-05-05 00:44:56 +00:00
|
|
|
/* Find prepared statement */
|
|
|
|
if (stmt_name[0] != '\0')
|
2007-03-13 00:33:44 +00:00
|
|
|
{
|
|
|
|
PreparedStatement *pstmt;
|
|
|
|
|
2003-05-05 00:44:56 +00:00
|
|
|
pstmt = FetchPreparedStatement(stmt_name, true);
|
2007-03-13 00:33:44 +00:00
|
|
|
psrc = pstmt->plansource;
|
|
|
|
}
|
2003-05-05 00:44:56 +00:00
|
|
|
else
|
|
|
|
{
|
|
|
|
/* special-case the unnamed statement */
|
2007-03-13 00:33:44 +00:00
|
|
|
psrc = unnamed_stmt_psrc;
|
|
|
|
if (!psrc)
|
2003-07-22 19:00:12 +00:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_UNDEFINED_PSTATEMENT),
|
2005-10-15 02:49:52 +00:00
|
|
|
errmsg("unnamed prepared statement does not exist")));
|
2003-05-05 00:44:56 +00:00
|
|
|
}
|
|
|
|
|
2007-03-13 00:33:44 +00:00
|
|
|
/* Prepared statements shouldn't have changeable result descs */
|
|
|
|
Assert(psrc->fixed_result);
|
|
|
|
|
2005-12-14 17:06:28 +00:00
|
|
|
/*
|
2007-03-13 00:33:44 +00:00
|
|
|
* If we are in aborted transaction state, we can't run
|
2011-09-16 00:42:53 -04:00
|
|
|
* SendRowDescriptionMessage(), because that needs catalog accesses.
|
2007-03-13 00:33:44 +00:00
|
|
|
* Hence, refuse to Describe statements that return data. (We shouldn't
|
|
|
|
* just refuse all Describes, since that might break the ability of some
|
|
|
|
* clients to issue COMMIT or ROLLBACK commands, if they use code that
|
|
|
|
* blindly Describes whatever it does.) We can Describe parameters
|
|
|
|
* without doing anything dangerous, so we don't restrict that.
|
2005-12-14 17:06:28 +00:00
|
|
|
*/
|
|
|
|
if (IsAbortedTransactionBlockState() &&
|
2007-03-13 00:33:44 +00:00
|
|
|
psrc->resultDesc)
|
2005-12-14 17:06:28 +00:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_IN_FAILED_SQL_TRANSACTION),
|
|
|
|
errmsg("current transaction is aborted, "
|
2010-01-16 10:05:59 +00:00
|
|
|
"commands ignored until end of transaction block"),
|
|
|
|
errdetail_abort()));
|
2005-12-14 17:06:28 +00:00
|
|
|
|
2005-11-03 17:11:40 +00:00
|
|
|
if (whereToSendOutput != DestRemote)
|
2003-05-05 00:44:56 +00:00
|
|
|
return; /* can't actually do anything... */
|
|
|
|
|
2003-05-06 21:51:42 +00:00
|
|
|
/*
|
|
|
|
* First describe the parameters...
|
|
|
|
*/
|
2003-08-04 00:43:34 +00:00
|
|
|
pq_beginmessage(&buf, 't'); /* parameter description message type */
|
2007-03-13 00:33:44 +00:00
|
|
|
pq_sendint(&buf, psrc->num_params, 2);
|
2003-05-05 00:44:56 +00:00
|
|
|
|
2007-03-13 00:33:44 +00:00
|
|
|
for (i = 0; i < psrc->num_params; i++)
|
2003-05-05 00:44:56 +00:00
|
|
|
{
|
2007-03-13 00:33:44 +00:00
|
|
|
Oid ptype = psrc->param_types[i];
|
2003-05-05 00:44:56 +00:00
|
|
|
|
|
|
|
pq_sendint(&buf, (int) ptype, 4);
|
|
|
|
}
|
|
|
|
pq_endmessage(&buf);
|
2003-05-06 21:51:42 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Next send RowDescription or NoData to describe the result...
|
|
|
|
*/
|
2007-03-13 00:33:44 +00:00
|
|
|
if (psrc->resultDesc)
|
|
|
|
{
|
|
|
|
List *tlist;
|
|
|
|
|
2011-09-16 00:42:53 -04:00
|
|
|
/* Get the plan's primary targetlist */
|
2017-03-31 23:17:18 -05:00
|
|
|
tlist = CachedPlanGetTargetList(psrc, NULL);
|
2007-03-13 00:33:44 +00:00
|
|
|
|
|
|
|
SendRowDescriptionMessage(psrc->resultDesc, tlist, NULL);
|
|
|
|
}
|
2003-05-06 21:51:42 +00:00
|
|
|
else
|
|
|
|
pq_putemptymessage('n'); /* NoData */
|
|
|
|
|
2003-05-05 00:44:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* exec_describe_portal_message
|
|
|
|
*
|
|
|
|
* Process a "Describe" message for a portal
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
exec_describe_portal_message(const char *portal_name)
|
|
|
|
{
|
|
|
|
Portal portal;
|
|
|
|
|
2005-12-14 17:06:28 +00:00
|
|
|
/*
|
|
|
|
* Start up a transaction command. (Note that this will normally change
|
|
|
|
* current memory context.) Nothing happens if we are already in one.
|
|
|
|
*/
|
2006-06-20 22:52:00 +00:00
|
|
|
start_xact_command();
|
2005-12-14 17:06:28 +00:00
|
|
|
|
|
|
|
/* Switch back to message context */
|
|
|
|
MemoryContextSwitchTo(MessageContext);
|
|
|
|
|
2003-05-05 00:44:56 +00:00
|
|
|
portal = GetPortalByName(portal_name);
|
|
|
|
if (!PortalIsValid(portal))
|
2003-07-22 19:00:12 +00:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_UNDEFINED_CURSOR),
|
|
|
|
errmsg("portal \"%s\" does not exist", portal_name)));
|
2003-05-05 00:44:56 +00:00
|
|
|
|
2005-12-14 17:06:28 +00:00
|
|
|
/*
|
|
|
|
* If we are in aborted transaction state, we can't run
|
|
|
|
* SendRowDescriptionMessage(), because that needs catalog accesses.
|
2014-05-06 12:12:18 -04:00
|
|
|
* Hence, refuse to Describe portals that return data. (We shouldn't just
|
2005-12-14 17:06:28 +00:00
|
|
|
* refuse all Describes, since that might break the ability of some
|
|
|
|
* clients to issue COMMIT or ROLLBACK commands, if they use code that
|
|
|
|
* blindly Describes whatever it does.)
|
|
|
|
*/
|
|
|
|
if (IsAbortedTransactionBlockState() &&
|
|
|
|
portal->tupDesc)
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_IN_FAILED_SQL_TRANSACTION),
|
|
|
|
errmsg("current transaction is aborted, "
|
2010-01-16 10:05:59 +00:00
|
|
|
"commands ignored until end of transaction block"),
|
|
|
|
errdetail_abort()));
|
2005-12-14 17:06:28 +00:00
|
|
|
|
2005-11-03 17:11:40 +00:00
|
|
|
if (whereToSendOutput != DestRemote)
|
2003-05-05 00:44:56 +00:00
|
|
|
return; /* can't actually do anything... */
|
|
|
|
|
|
|
|
if (portal->tupDesc)
|
2005-06-22 17:45:46 +00:00
|
|
|
SendRowDescriptionMessage(portal->tupDesc,
|
|
|
|
FetchPortalTargetList(portal),
|
2003-05-08 18:16:37 +00:00
|
|
|
portal->formats);
|
2003-05-05 00:44:56 +00:00
|
|
|
else
|
|
|
|
pq_putemptymessage('n'); /* NoData */
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2000-10-07 00:58:23 +00:00
|
|
|
/*
|
2006-06-20 22:52:00 +00:00
|
|
|
* Convenience routines for starting/committing a single command.
|
2000-10-07 00:58:23 +00:00
|
|
|
*/
|
|
|
|
static void
|
2006-06-20 22:52:00 +00:00
|
|
|
start_xact_command(void)
|
2000-10-07 00:58:23 +00:00
|
|
|
{
|
2006-06-20 22:52:00 +00:00
|
|
|
if (!xact_started)
|
2003-05-05 00:44:56 +00:00
|
|
|
{
|
2006-06-20 22:52:00 +00:00
|
|
|
StartTransactionCommand();
|
2003-05-05 00:44:56 +00:00
|
|
|
|
|
|
|
/* Set statement timeout running, if any */
|
2006-06-20 22:52:00 +00:00
|
|
|
/* NB: this mustn't be enabled until we are within an xact */
|
2003-05-05 00:44:56 +00:00
|
|
|
if (StatementTimeout > 0)
|
Introduce timeout handling framework
Management of timeouts was getting a little cumbersome; what we
originally had was more than enough back when we were only concerned
about deadlocks and query cancel; however, when we added timeouts for
standby processes, the code got considerably messier. Since there are
plans to add more complex timeouts, this seems a good time to introduce
a central timeout handling module.
External modules register their timeout handlers during process
initialization, and later enable and disable them as they see fit using
a simple API; timeout.c is in charge of keeping track of which timeouts
are in effect at any time, installing a common SIGALRM signal handler,
and calling setitimer() as appropriate to ensure timely firing of
external handlers.
timeout.c additionally supports pluggable modules to add their own
timeouts, though this capability isn't exercised anywhere yet.
Additionally, as of this commit, walsender processes are aware of
timeouts; we had a preexisting bug there that made those ignore SIGALRM,
thus being subject to unhandled deadlocks, particularly during the
authentication phase. This has already been fixed in back branches in
commit 0bf8eb2a, which see for more details.
Main author: Zoltán Böszörményi
Some review and cleanup by Álvaro Herrera
Extensive reworking by Tom Lane
2012-07-16 18:43:21 -04:00
|
|
|
enable_timeout_after(STATEMENT_TIMEOUT, StatementTimeout);
|
2005-09-19 17:21:49 +00:00
|
|
|
else
|
Introduce timeout handling framework
Management of timeouts was getting a little cumbersome; what we
originally had was more than enough back when we were only concerned
about deadlocks and query cancel; however, when we added timeouts for
standby processes, the code got considerably messier. Since there are
plans to add more complex timeouts, this seems a good time to introduce
a central timeout handling module.
External modules register their timeout handlers during process
initialization, and later enable and disable them as they see fit using
a simple API; timeout.c is in charge of keeping track of which timeouts
are in effect at any time, installing a common SIGALRM signal handler,
and calling setitimer() as appropriate to ensure timely firing of
external handlers.
timeout.c additionally supports pluggable modules to add their own
timeouts, though this capability isn't exercised anywhere yet.
Additionally, as of this commit, walsender processes are aware of
timeouts; we had a preexisting bug there that made those ignore SIGALRM,
thus being subject to unhandled deadlocks, particularly during the
authentication phase. This has already been fixed in back branches in
commit 0bf8eb2a, which see for more details.
Main author: Zoltán Böszörményi
Some review and cleanup by Álvaro Herrera
Extensive reworking by Tom Lane
2012-07-16 18:43:21 -04:00
|
|
|
disable_timeout(STATEMENT_TIMEOUT, false);
|
2005-10-15 02:49:52 +00:00
|
|
|
|
2006-04-25 00:25:22 +00:00
|
|
|
xact_started = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
finish_xact_command(void)
|
|
|
|
{
|
|
|
|
if (xact_started)
|
|
|
|
{
|
2006-06-20 22:52:00 +00:00
|
|
|
/* Cancel any active statement timeout before committing */
|
Introduce timeout handling framework
Management of timeouts was getting a little cumbersome; what we
originally had was more than enough back when we were only concerned
about deadlocks and query cancel; however, when we added timeouts for
standby processes, the code got considerably messier. Since there are
plans to add more complex timeouts, this seems a good time to introduce
a central timeout handling module.
External modules register their timeout handlers during process
initialization, and later enable and disable them as they see fit using
a simple API; timeout.c is in charge of keeping track of which timeouts
are in effect at any time, installing a common SIGALRM signal handler,
and calling setitimer() as appropriate to ensure timely firing of
external handlers.
timeout.c additionally supports pluggable modules to add their own
timeouts, though this capability isn't exercised anywhere yet.
Additionally, as of this commit, walsender processes are aware of
timeouts; we had a preexisting bug there that made those ignore SIGALRM,
thus being subject to unhandled deadlocks, particularly during the
authentication phase. This has already been fixed in back branches in
commit 0bf8eb2a, which see for more details.
Main author: Zoltán Böszörményi
Some review and cleanup by Álvaro Herrera
Extensive reworking by Tom Lane
2012-07-16 18:43:21 -04:00
|
|
|
disable_timeout(STATEMENT_TIMEOUT, false);
|
2006-06-20 22:52:00 +00:00
|
|
|
|
2003-05-14 03:26:03 +00:00
|
|
|
CommitTransactionCommand();
|
2000-10-07 04:00:41 +00:00
|
|
|
|
2003-09-14 00:03:32 +00:00
|
|
|
#ifdef MEMORY_CONTEXT_CHECKING
|
|
|
|
/* Check all memory contexts that weren't freed during commit */
|
|
|
|
/* (those that were, were checked before being deleted) */
|
|
|
|
MemoryContextCheck(TopMemoryContext);
|
|
|
|
#endif
|
|
|
|
|
2000-10-07 00:58:23 +00:00
|
|
|
#ifdef SHOW_MEMORY_STATS
|
2003-08-13 16:16:23 +00:00
|
|
|
/* Print mem stats after each commit for leak tracking */
|
2006-12-08 02:15:07 +00:00
|
|
|
MemoryContextStats(TopMemoryContext);
|
2000-10-07 00:58:23 +00:00
|
|
|
#endif
|
2003-05-05 00:44:56 +00:00
|
|
|
|
|
|
|
xact_started = false;
|
|
|
|
}
|
2000-10-07 00:58:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-11-10 00:31:34 +00:00
|
|
|
/*
|
|
|
|
* Convenience routines for checking whether a statement is one of the
|
|
|
|
* ones that we allow in transaction-aborted state.
|
|
|
|
*/
|
|
|
|
|
2007-02-20 17:32:18 +00:00
|
|
|
/* Test a bare parsetree */
|
2005-11-10 00:31:34 +00:00
|
|
|
static bool
|
|
|
|
IsTransactionExitStmt(Node *parsetree)
|
|
|
|
{
|
|
|
|
if (parsetree && IsA(parsetree, TransactionStmt))
|
|
|
|
{
|
|
|
|
TransactionStmt *stmt = (TransactionStmt *) parsetree;
|
|
|
|
|
|
|
|
if (stmt->kind == TRANS_STMT_COMMIT ||
|
|
|
|
stmt->kind == TRANS_STMT_PREPARE ||
|
|
|
|
stmt->kind == TRANS_STMT_ROLLBACK ||
|
|
|
|
stmt->kind == TRANS_STMT_ROLLBACK_TO)
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
Change representation of statement lists, and add statement location info.
This patch makes several changes that improve the consistency of
representation of lists of statements. It's always been the case
that the output of parse analysis is a list of Query nodes, whatever
the types of the individual statements in the list. This patch brings
similar consistency to the outputs of raw parsing and planning steps:
* The output of raw parsing is now always a list of RawStmt nodes;
the statement-type-dependent nodes are one level down from that.
* The output of pg_plan_queries() is now always a list of PlannedStmt
nodes, even for utility statements. In the case of a utility statement,
"planning" just consists of wrapping a CMD_UTILITY PlannedStmt around
the utility node. This list representation is now used in Portal and
CachedPlan plan lists, replacing the former convention of intermixing
PlannedStmts with bare utility-statement nodes.
Now, every list of statements has a consistent head-node type depending
on how far along it is in processing. This allows changing many places
that formerly used generic "Node *" pointers to use a more specific
pointer type, thus reducing the number of IsA() tests and casts needed,
as well as improving code clarity.
Also, the post-parse-analysis representation of DECLARE CURSOR is changed
so that it looks more like EXPLAIN, PREPARE, etc. That is, the contained
SELECT remains a child of the DeclareCursorStmt rather than getting flipped
around to be the other way. It's now true for both Query and PlannedStmt
that utilityStmt is non-null if and only if commandType is CMD_UTILITY.
That allows simplifying a lot of places that were testing both fields.
(I think some of those were just defensive programming, but in many places,
it was actually necessary to avoid confusing DECLARE CURSOR with SELECT.)
Because PlannedStmt carries a canSetTag field, we're also able to get rid
of some ad-hoc rules about how to reconstruct canSetTag for a bare utility
statement; specifically, the assumption that a utility is canSetTag if and
only if it's the only one in its list. While I see no near-term need for
relaxing that restriction, it's nice to get rid of the ad-hocery.
The API of ProcessUtility() is changed so that what it's passed is the
wrapper PlannedStmt not just the bare utility statement. This will affect
all users of ProcessUtility_hook, but the changes are pretty trivial; see
the affected contrib modules for examples of the minimum change needed.
(Most compilers should give pointer-type-mismatch warnings for uncorrected
code.)
There's also a change in the API of ExplainOneQuery_hook, to pass through
cursorOptions instead of expecting hook functions to know what to pick.
This is needed because of the DECLARE CURSOR changes, but really should
have been done in 9.6; it's unlikely that any extant hook functions
know about using CURSOR_OPT_PARALLEL_OK.
Finally, teach gram.y to save statement boundary locations in RawStmt
nodes, and pass those through to Query and PlannedStmt nodes. This allows
more intelligent handling of cases where a source query string contains
multiple statements. This patch doesn't actually do anything with the
information, but a follow-on patch will. (Passing this information through
cleanly is the true motivation for these changes; while I think this is all
good cleanup, it's unlikely we'd have bothered without this end goal.)
catversion bump because addition of location fields to struct Query
affects stored rules.
This patch is by me, but it owes a good deal to Fabien Coelho who did
a lot of preliminary work on the problem, and also reviewed the patch.
Discussion: https://postgr.es/m/alpine.DEB.2.20.1612200926310.29821@lancre
2017-01-14 16:02:35 -05:00
|
|
|
/* Test a list that contains PlannedStmt nodes */
|
2005-11-10 00:31:34 +00:00
|
|
|
static bool
|
Change representation of statement lists, and add statement location info.
This patch makes several changes that improve the consistency of
representation of lists of statements. It's always been the case
that the output of parse analysis is a list of Query nodes, whatever
the types of the individual statements in the list. This patch brings
similar consistency to the outputs of raw parsing and planning steps:
* The output of raw parsing is now always a list of RawStmt nodes;
the statement-type-dependent nodes are one level down from that.
* The output of pg_plan_queries() is now always a list of PlannedStmt
nodes, even for utility statements. In the case of a utility statement,
"planning" just consists of wrapping a CMD_UTILITY PlannedStmt around
the utility node. This list representation is now used in Portal and
CachedPlan plan lists, replacing the former convention of intermixing
PlannedStmts with bare utility-statement nodes.
Now, every list of statements has a consistent head-node type depending
on how far along it is in processing. This allows changing many places
that formerly used generic "Node *" pointers to use a more specific
pointer type, thus reducing the number of IsA() tests and casts needed,
as well as improving code clarity.
Also, the post-parse-analysis representation of DECLARE CURSOR is changed
so that it looks more like EXPLAIN, PREPARE, etc. That is, the contained
SELECT remains a child of the DeclareCursorStmt rather than getting flipped
around to be the other way. It's now true for both Query and PlannedStmt
that utilityStmt is non-null if and only if commandType is CMD_UTILITY.
That allows simplifying a lot of places that were testing both fields.
(I think some of those were just defensive programming, but in many places,
it was actually necessary to avoid confusing DECLARE CURSOR with SELECT.)
Because PlannedStmt carries a canSetTag field, we're also able to get rid
of some ad-hoc rules about how to reconstruct canSetTag for a bare utility
statement; specifically, the assumption that a utility is canSetTag if and
only if it's the only one in its list. While I see no near-term need for
relaxing that restriction, it's nice to get rid of the ad-hocery.
The API of ProcessUtility() is changed so that what it's passed is the
wrapper PlannedStmt not just the bare utility statement. This will affect
all users of ProcessUtility_hook, but the changes are pretty trivial; see
the affected contrib modules for examples of the minimum change needed.
(Most compilers should give pointer-type-mismatch warnings for uncorrected
code.)
There's also a change in the API of ExplainOneQuery_hook, to pass through
cursorOptions instead of expecting hook functions to know what to pick.
This is needed because of the DECLARE CURSOR changes, but really should
have been done in 9.6; it's unlikely that any extant hook functions
know about using CURSOR_OPT_PARALLEL_OK.
Finally, teach gram.y to save statement boundary locations in RawStmt
nodes, and pass those through to Query and PlannedStmt nodes. This allows
more intelligent handling of cases where a source query string contains
multiple statements. This patch doesn't actually do anything with the
information, but a follow-on patch will. (Passing this information through
cleanly is the true motivation for these changes; while I think this is all
good cleanup, it's unlikely we'd have bothered without this end goal.)
catversion bump because addition of location fields to struct Query
affects stored rules.
This patch is by me, but it owes a good deal to Fabien Coelho who did
a lot of preliminary work on the problem, and also reviewed the patch.
Discussion: https://postgr.es/m/alpine.DEB.2.20.1612200926310.29821@lancre
2017-01-14 16:02:35 -05:00
|
|
|
IsTransactionExitStmtList(List *pstmts)
|
2005-11-10 00:31:34 +00:00
|
|
|
{
|
Change representation of statement lists, and add statement location info.
This patch makes several changes that improve the consistency of
representation of lists of statements. It's always been the case
that the output of parse analysis is a list of Query nodes, whatever
the types of the individual statements in the list. This patch brings
similar consistency to the outputs of raw parsing and planning steps:
* The output of raw parsing is now always a list of RawStmt nodes;
the statement-type-dependent nodes are one level down from that.
* The output of pg_plan_queries() is now always a list of PlannedStmt
nodes, even for utility statements. In the case of a utility statement,
"planning" just consists of wrapping a CMD_UTILITY PlannedStmt around
the utility node. This list representation is now used in Portal and
CachedPlan plan lists, replacing the former convention of intermixing
PlannedStmts with bare utility-statement nodes.
Now, every list of statements has a consistent head-node type depending
on how far along it is in processing. This allows changing many places
that formerly used generic "Node *" pointers to use a more specific
pointer type, thus reducing the number of IsA() tests and casts needed,
as well as improving code clarity.
Also, the post-parse-analysis representation of DECLARE CURSOR is changed
so that it looks more like EXPLAIN, PREPARE, etc. That is, the contained
SELECT remains a child of the DeclareCursorStmt rather than getting flipped
around to be the other way. It's now true for both Query and PlannedStmt
that utilityStmt is non-null if and only if commandType is CMD_UTILITY.
That allows simplifying a lot of places that were testing both fields.
(I think some of those were just defensive programming, but in many places,
it was actually necessary to avoid confusing DECLARE CURSOR with SELECT.)
Because PlannedStmt carries a canSetTag field, we're also able to get rid
of some ad-hoc rules about how to reconstruct canSetTag for a bare utility
statement; specifically, the assumption that a utility is canSetTag if and
only if it's the only one in its list. While I see no near-term need for
relaxing that restriction, it's nice to get rid of the ad-hocery.
The API of ProcessUtility() is changed so that what it's passed is the
wrapper PlannedStmt not just the bare utility statement. This will affect
all users of ProcessUtility_hook, but the changes are pretty trivial; see
the affected contrib modules for examples of the minimum change needed.
(Most compilers should give pointer-type-mismatch warnings for uncorrected
code.)
There's also a change in the API of ExplainOneQuery_hook, to pass through
cursorOptions instead of expecting hook functions to know what to pick.
This is needed because of the DECLARE CURSOR changes, but really should
have been done in 9.6; it's unlikely that any extant hook functions
know about using CURSOR_OPT_PARALLEL_OK.
Finally, teach gram.y to save statement boundary locations in RawStmt
nodes, and pass those through to Query and PlannedStmt nodes. This allows
more intelligent handling of cases where a source query string contains
multiple statements. This patch doesn't actually do anything with the
information, but a follow-on patch will. (Passing this information through
cleanly is the true motivation for these changes; while I think this is all
good cleanup, it's unlikely we'd have bothered without this end goal.)
catversion bump because addition of location fields to struct Query
affects stored rules.
This patch is by me, but it owes a good deal to Fabien Coelho who did
a lot of preliminary work on the problem, and also reviewed the patch.
Discussion: https://postgr.es/m/alpine.DEB.2.20.1612200926310.29821@lancre
2017-01-14 16:02:35 -05:00
|
|
|
if (list_length(pstmts) == 1)
|
2005-11-10 00:31:34 +00:00
|
|
|
{
|
Improve castNode notation by introducing list-extraction-specific variants.
This extends the castNode() notation introduced by commit 5bcab1114 to
provide, in one step, extraction of a list cell's pointer and coercion to
a concrete node type. For example, "lfirst_node(Foo, lc)" is the same
as "castNode(Foo, lfirst(lc))". Almost half of the uses of castNode
that have appeared so far include a list extraction call, so this is
pretty widely useful, and it saves a few more keystrokes compared to the
old way.
As with the previous patch, back-patch the addition of these macros to
pg_list.h, so that the notation will be available when back-patching.
Patch by me, after an idea of Andrew Gierth's.
Discussion: https://postgr.es/m/14197.1491841216@sss.pgh.pa.us
2017-04-10 13:51:29 -04:00
|
|
|
PlannedStmt *pstmt = linitial_node(PlannedStmt, pstmts);
|
2005-11-10 00:31:34 +00:00
|
|
|
|
Change representation of statement lists, and add statement location info.
This patch makes several changes that improve the consistency of
representation of lists of statements. It's always been the case
that the output of parse analysis is a list of Query nodes, whatever
the types of the individual statements in the list. This patch brings
similar consistency to the outputs of raw parsing and planning steps:
* The output of raw parsing is now always a list of RawStmt nodes;
the statement-type-dependent nodes are one level down from that.
* The output of pg_plan_queries() is now always a list of PlannedStmt
nodes, even for utility statements. In the case of a utility statement,
"planning" just consists of wrapping a CMD_UTILITY PlannedStmt around
the utility node. This list representation is now used in Portal and
CachedPlan plan lists, replacing the former convention of intermixing
PlannedStmts with bare utility-statement nodes.
Now, every list of statements has a consistent head-node type depending
on how far along it is in processing. This allows changing many places
that formerly used generic "Node *" pointers to use a more specific
pointer type, thus reducing the number of IsA() tests and casts needed,
as well as improving code clarity.
Also, the post-parse-analysis representation of DECLARE CURSOR is changed
so that it looks more like EXPLAIN, PREPARE, etc. That is, the contained
SELECT remains a child of the DeclareCursorStmt rather than getting flipped
around to be the other way. It's now true for both Query and PlannedStmt
that utilityStmt is non-null if and only if commandType is CMD_UTILITY.
That allows simplifying a lot of places that were testing both fields.
(I think some of those were just defensive programming, but in many places,
it was actually necessary to avoid confusing DECLARE CURSOR with SELECT.)
Because PlannedStmt carries a canSetTag field, we're also able to get rid
of some ad-hoc rules about how to reconstruct canSetTag for a bare utility
statement; specifically, the assumption that a utility is canSetTag if and
only if it's the only one in its list. While I see no near-term need for
relaxing that restriction, it's nice to get rid of the ad-hocery.
The API of ProcessUtility() is changed so that what it's passed is the
wrapper PlannedStmt not just the bare utility statement. This will affect
all users of ProcessUtility_hook, but the changes are pretty trivial; see
the affected contrib modules for examples of the minimum change needed.
(Most compilers should give pointer-type-mismatch warnings for uncorrected
code.)
There's also a change in the API of ExplainOneQuery_hook, to pass through
cursorOptions instead of expecting hook functions to know what to pick.
This is needed because of the DECLARE CURSOR changes, but really should
have been done in 9.6; it's unlikely that any extant hook functions
know about using CURSOR_OPT_PARALLEL_OK.
Finally, teach gram.y to save statement boundary locations in RawStmt
nodes, and pass those through to Query and PlannedStmt nodes. This allows
more intelligent handling of cases where a source query string contains
multiple statements. This patch doesn't actually do anything with the
information, but a follow-on patch will. (Passing this information through
cleanly is the true motivation for these changes; while I think this is all
good cleanup, it's unlikely we'd have bothered without this end goal.)
catversion bump because addition of location fields to struct Query
affects stored rules.
This patch is by me, but it owes a good deal to Fabien Coelho who did
a lot of preliminary work on the problem, and also reviewed the patch.
Discussion: https://postgr.es/m/alpine.DEB.2.20.1612200926310.29821@lancre
2017-01-14 16:02:35 -05:00
|
|
|
if (pstmt->commandType == CMD_UTILITY &&
|
|
|
|
IsTransactionExitStmt(pstmt->utilityStmt))
|
2005-11-10 00:31:34 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
Change representation of statement lists, and add statement location info.
This patch makes several changes that improve the consistency of
representation of lists of statements. It's always been the case
that the output of parse analysis is a list of Query nodes, whatever
the types of the individual statements in the list. This patch brings
similar consistency to the outputs of raw parsing and planning steps:
* The output of raw parsing is now always a list of RawStmt nodes;
the statement-type-dependent nodes are one level down from that.
* The output of pg_plan_queries() is now always a list of PlannedStmt
nodes, even for utility statements. In the case of a utility statement,
"planning" just consists of wrapping a CMD_UTILITY PlannedStmt around
the utility node. This list representation is now used in Portal and
CachedPlan plan lists, replacing the former convention of intermixing
PlannedStmts with bare utility-statement nodes.
Now, every list of statements has a consistent head-node type depending
on how far along it is in processing. This allows changing many places
that formerly used generic "Node *" pointers to use a more specific
pointer type, thus reducing the number of IsA() tests and casts needed,
as well as improving code clarity.
Also, the post-parse-analysis representation of DECLARE CURSOR is changed
so that it looks more like EXPLAIN, PREPARE, etc. That is, the contained
SELECT remains a child of the DeclareCursorStmt rather than getting flipped
around to be the other way. It's now true for both Query and PlannedStmt
that utilityStmt is non-null if and only if commandType is CMD_UTILITY.
That allows simplifying a lot of places that were testing both fields.
(I think some of those were just defensive programming, but in many places,
it was actually necessary to avoid confusing DECLARE CURSOR with SELECT.)
Because PlannedStmt carries a canSetTag field, we're also able to get rid
of some ad-hoc rules about how to reconstruct canSetTag for a bare utility
statement; specifically, the assumption that a utility is canSetTag if and
only if it's the only one in its list. While I see no near-term need for
relaxing that restriction, it's nice to get rid of the ad-hocery.
The API of ProcessUtility() is changed so that what it's passed is the
wrapper PlannedStmt not just the bare utility statement. This will affect
all users of ProcessUtility_hook, but the changes are pretty trivial; see
the affected contrib modules for examples of the minimum change needed.
(Most compilers should give pointer-type-mismatch warnings for uncorrected
code.)
There's also a change in the API of ExplainOneQuery_hook, to pass through
cursorOptions instead of expecting hook functions to know what to pick.
This is needed because of the DECLARE CURSOR changes, but really should
have been done in 9.6; it's unlikely that any extant hook functions
know about using CURSOR_OPT_PARALLEL_OK.
Finally, teach gram.y to save statement boundary locations in RawStmt
nodes, and pass those through to Query and PlannedStmt nodes. This allows
more intelligent handling of cases where a source query string contains
multiple statements. This patch doesn't actually do anything with the
information, but a follow-on patch will. (Passing this information through
cleanly is the true motivation for these changes; while I think this is all
good cleanup, it's unlikely we'd have bothered without this end goal.)
catversion bump because addition of location fields to struct Query
affects stored rules.
This patch is by me, but it owes a good deal to Fabien Coelho who did
a lot of preliminary work on the problem, and also reviewed the patch.
Discussion: https://postgr.es/m/alpine.DEB.2.20.1612200926310.29821@lancre
2017-01-14 16:02:35 -05:00
|
|
|
/* Test a list that contains PlannedStmt nodes */
|
2005-11-10 00:31:34 +00:00
|
|
|
static bool
|
Change representation of statement lists, and add statement location info.
This patch makes several changes that improve the consistency of
representation of lists of statements. It's always been the case
that the output of parse analysis is a list of Query nodes, whatever
the types of the individual statements in the list. This patch brings
similar consistency to the outputs of raw parsing and planning steps:
* The output of raw parsing is now always a list of RawStmt nodes;
the statement-type-dependent nodes are one level down from that.
* The output of pg_plan_queries() is now always a list of PlannedStmt
nodes, even for utility statements. In the case of a utility statement,
"planning" just consists of wrapping a CMD_UTILITY PlannedStmt around
the utility node. This list representation is now used in Portal and
CachedPlan plan lists, replacing the former convention of intermixing
PlannedStmts with bare utility-statement nodes.
Now, every list of statements has a consistent head-node type depending
on how far along it is in processing. This allows changing many places
that formerly used generic "Node *" pointers to use a more specific
pointer type, thus reducing the number of IsA() tests and casts needed,
as well as improving code clarity.
Also, the post-parse-analysis representation of DECLARE CURSOR is changed
so that it looks more like EXPLAIN, PREPARE, etc. That is, the contained
SELECT remains a child of the DeclareCursorStmt rather than getting flipped
around to be the other way. It's now true for both Query and PlannedStmt
that utilityStmt is non-null if and only if commandType is CMD_UTILITY.
That allows simplifying a lot of places that were testing both fields.
(I think some of those were just defensive programming, but in many places,
it was actually necessary to avoid confusing DECLARE CURSOR with SELECT.)
Because PlannedStmt carries a canSetTag field, we're also able to get rid
of some ad-hoc rules about how to reconstruct canSetTag for a bare utility
statement; specifically, the assumption that a utility is canSetTag if and
only if it's the only one in its list. While I see no near-term need for
relaxing that restriction, it's nice to get rid of the ad-hocery.
The API of ProcessUtility() is changed so that what it's passed is the
wrapper PlannedStmt not just the bare utility statement. This will affect
all users of ProcessUtility_hook, but the changes are pretty trivial; see
the affected contrib modules for examples of the minimum change needed.
(Most compilers should give pointer-type-mismatch warnings for uncorrected
code.)
There's also a change in the API of ExplainOneQuery_hook, to pass through
cursorOptions instead of expecting hook functions to know what to pick.
This is needed because of the DECLARE CURSOR changes, but really should
have been done in 9.6; it's unlikely that any extant hook functions
know about using CURSOR_OPT_PARALLEL_OK.
Finally, teach gram.y to save statement boundary locations in RawStmt
nodes, and pass those through to Query and PlannedStmt nodes. This allows
more intelligent handling of cases where a source query string contains
multiple statements. This patch doesn't actually do anything with the
information, but a follow-on patch will. (Passing this information through
cleanly is the true motivation for these changes; while I think this is all
good cleanup, it's unlikely we'd have bothered without this end goal.)
catversion bump because addition of location fields to struct Query
affects stored rules.
This patch is by me, but it owes a good deal to Fabien Coelho who did
a lot of preliminary work on the problem, and also reviewed the patch.
Discussion: https://postgr.es/m/alpine.DEB.2.20.1612200926310.29821@lancre
2017-01-14 16:02:35 -05:00
|
|
|
IsTransactionStmtList(List *pstmts)
|
2005-11-10 00:31:34 +00:00
|
|
|
{
|
Change representation of statement lists, and add statement location info.
This patch makes several changes that improve the consistency of
representation of lists of statements. It's always been the case
that the output of parse analysis is a list of Query nodes, whatever
the types of the individual statements in the list. This patch brings
similar consistency to the outputs of raw parsing and planning steps:
* The output of raw parsing is now always a list of RawStmt nodes;
the statement-type-dependent nodes are one level down from that.
* The output of pg_plan_queries() is now always a list of PlannedStmt
nodes, even for utility statements. In the case of a utility statement,
"planning" just consists of wrapping a CMD_UTILITY PlannedStmt around
the utility node. This list representation is now used in Portal and
CachedPlan plan lists, replacing the former convention of intermixing
PlannedStmts with bare utility-statement nodes.
Now, every list of statements has a consistent head-node type depending
on how far along it is in processing. This allows changing many places
that formerly used generic "Node *" pointers to use a more specific
pointer type, thus reducing the number of IsA() tests and casts needed,
as well as improving code clarity.
Also, the post-parse-analysis representation of DECLARE CURSOR is changed
so that it looks more like EXPLAIN, PREPARE, etc. That is, the contained
SELECT remains a child of the DeclareCursorStmt rather than getting flipped
around to be the other way. It's now true for both Query and PlannedStmt
that utilityStmt is non-null if and only if commandType is CMD_UTILITY.
That allows simplifying a lot of places that were testing both fields.
(I think some of those were just defensive programming, but in many places,
it was actually necessary to avoid confusing DECLARE CURSOR with SELECT.)
Because PlannedStmt carries a canSetTag field, we're also able to get rid
of some ad-hoc rules about how to reconstruct canSetTag for a bare utility
statement; specifically, the assumption that a utility is canSetTag if and
only if it's the only one in its list. While I see no near-term need for
relaxing that restriction, it's nice to get rid of the ad-hocery.
The API of ProcessUtility() is changed so that what it's passed is the
wrapper PlannedStmt not just the bare utility statement. This will affect
all users of ProcessUtility_hook, but the changes are pretty trivial; see
the affected contrib modules for examples of the minimum change needed.
(Most compilers should give pointer-type-mismatch warnings for uncorrected
code.)
There's also a change in the API of ExplainOneQuery_hook, to pass through
cursorOptions instead of expecting hook functions to know what to pick.
This is needed because of the DECLARE CURSOR changes, but really should
have been done in 9.6; it's unlikely that any extant hook functions
know about using CURSOR_OPT_PARALLEL_OK.
Finally, teach gram.y to save statement boundary locations in RawStmt
nodes, and pass those through to Query and PlannedStmt nodes. This allows
more intelligent handling of cases where a source query string contains
multiple statements. This patch doesn't actually do anything with the
information, but a follow-on patch will. (Passing this information through
cleanly is the true motivation for these changes; while I think this is all
good cleanup, it's unlikely we'd have bothered without this end goal.)
catversion bump because addition of location fields to struct Query
affects stored rules.
This patch is by me, but it owes a good deal to Fabien Coelho who did
a lot of preliminary work on the problem, and also reviewed the patch.
Discussion: https://postgr.es/m/alpine.DEB.2.20.1612200926310.29821@lancre
2017-01-14 16:02:35 -05:00
|
|
|
if (list_length(pstmts) == 1)
|
2005-11-10 00:31:34 +00:00
|
|
|
{
|
Improve castNode notation by introducing list-extraction-specific variants.
This extends the castNode() notation introduced by commit 5bcab1114 to
provide, in one step, extraction of a list cell's pointer and coercion to
a concrete node type. For example, "lfirst_node(Foo, lc)" is the same
as "castNode(Foo, lfirst(lc))". Almost half of the uses of castNode
that have appeared so far include a list extraction call, so this is
pretty widely useful, and it saves a few more keystrokes compared to the
old way.
As with the previous patch, back-patch the addition of these macros to
pg_list.h, so that the notation will be available when back-patching.
Patch by me, after an idea of Andrew Gierth's.
Discussion: https://postgr.es/m/14197.1491841216@sss.pgh.pa.us
2017-04-10 13:51:29 -04:00
|
|
|
PlannedStmt *pstmt = linitial_node(PlannedStmt, pstmts);
|
2005-11-10 00:31:34 +00:00
|
|
|
|
Change representation of statement lists, and add statement location info.
This patch makes several changes that improve the consistency of
representation of lists of statements. It's always been the case
that the output of parse analysis is a list of Query nodes, whatever
the types of the individual statements in the list. This patch brings
similar consistency to the outputs of raw parsing and planning steps:
* The output of raw parsing is now always a list of RawStmt nodes;
the statement-type-dependent nodes are one level down from that.
* The output of pg_plan_queries() is now always a list of PlannedStmt
nodes, even for utility statements. In the case of a utility statement,
"planning" just consists of wrapping a CMD_UTILITY PlannedStmt around
the utility node. This list representation is now used in Portal and
CachedPlan plan lists, replacing the former convention of intermixing
PlannedStmts with bare utility-statement nodes.
Now, every list of statements has a consistent head-node type depending
on how far along it is in processing. This allows changing many places
that formerly used generic "Node *" pointers to use a more specific
pointer type, thus reducing the number of IsA() tests and casts needed,
as well as improving code clarity.
Also, the post-parse-analysis representation of DECLARE CURSOR is changed
so that it looks more like EXPLAIN, PREPARE, etc. That is, the contained
SELECT remains a child of the DeclareCursorStmt rather than getting flipped
around to be the other way. It's now true for both Query and PlannedStmt
that utilityStmt is non-null if and only if commandType is CMD_UTILITY.
That allows simplifying a lot of places that were testing both fields.
(I think some of those were just defensive programming, but in many places,
it was actually necessary to avoid confusing DECLARE CURSOR with SELECT.)
Because PlannedStmt carries a canSetTag field, we're also able to get rid
of some ad-hoc rules about how to reconstruct canSetTag for a bare utility
statement; specifically, the assumption that a utility is canSetTag if and
only if it's the only one in its list. While I see no near-term need for
relaxing that restriction, it's nice to get rid of the ad-hocery.
The API of ProcessUtility() is changed so that what it's passed is the
wrapper PlannedStmt not just the bare utility statement. This will affect
all users of ProcessUtility_hook, but the changes are pretty trivial; see
the affected contrib modules for examples of the minimum change needed.
(Most compilers should give pointer-type-mismatch warnings for uncorrected
code.)
There's also a change in the API of ExplainOneQuery_hook, to pass through
cursorOptions instead of expecting hook functions to know what to pick.
This is needed because of the DECLARE CURSOR changes, but really should
have been done in 9.6; it's unlikely that any extant hook functions
know about using CURSOR_OPT_PARALLEL_OK.
Finally, teach gram.y to save statement boundary locations in RawStmt
nodes, and pass those through to Query and PlannedStmt nodes. This allows
more intelligent handling of cases where a source query string contains
multiple statements. This patch doesn't actually do anything with the
information, but a follow-on patch will. (Passing this information through
cleanly is the true motivation for these changes; while I think this is all
good cleanup, it's unlikely we'd have bothered without this end goal.)
catversion bump because addition of location fields to struct Query
affects stored rules.
This patch is by me, but it owes a good deal to Fabien Coelho who did
a lot of preliminary work on the problem, and also reviewed the patch.
Discussion: https://postgr.es/m/alpine.DEB.2.20.1612200926310.29821@lancre
2017-01-14 16:02:35 -05:00
|
|
|
if (pstmt->commandType == CMD_UTILITY &&
|
|
|
|
IsA(pstmt->utilityStmt, TransactionStmt))
|
2005-11-10 00:31:34 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2007-03-13 00:33:44 +00:00
|
|
|
/* Release any existing unnamed prepared statement */
|
|
|
|
static void
|
|
|
|
drop_unnamed_stmt(void)
|
|
|
|
{
|
2011-09-16 00:42:53 -04:00
|
|
|
/* paranoia to avoid a dangling pointer in case of error */
|
2007-03-13 00:33:44 +00:00
|
|
|
if (unnamed_stmt_psrc)
|
2011-09-16 00:42:53 -04:00
|
|
|
{
|
|
|
|
CachedPlanSource *psrc = unnamed_stmt_psrc;
|
2007-11-15 21:14:46 +00:00
|
|
|
|
2011-09-16 00:42:53 -04:00
|
|
|
unnamed_stmt_psrc = NULL;
|
|
|
|
DropCachedPlan(psrc);
|
|
|
|
}
|
2007-03-13 00:33:44 +00:00
|
|
|
}
|
|
|
|
|
2005-11-10 00:31:34 +00:00
|
|
|
|
1996-07-09 06:22:35 +00:00
|
|
|
/* --------------------------------
|
1997-09-07 05:04:48 +00:00
|
|
|
* signal handler routines used in PostgresMain()
|
1996-07-09 06:22:35 +00:00
|
|
|
* --------------------------------
|
|
|
|
*/
|
|
|
|
|
2001-01-14 05:08:17 +00:00
|
|
|
/*
|
XLOG (and related) changes:
* Store two past checkpoint locations, not just one, in pg_control.
On startup, we fall back to the older checkpoint if the newer one
is unreadable. Also, a physical copy of the newest checkpoint record
is kept in pg_control for possible use in disaster recovery (ie,
complete loss of pg_xlog). Also add a version number for pg_control
itself. Remove archdir from pg_control; it ought to be a GUC
parameter, not a special case (not that it's implemented yet anyway).
* Suppress successive checkpoint records when nothing has been entered
in the WAL log since the last one. This is not so much to avoid I/O
as to make it actually useful to keep track of the last two
checkpoints. If the things are right next to each other then there's
not a lot of redundancy gained...
* Change CRC scheme to a true 64-bit CRC, not a pair of 32-bit CRCs
on alternate bytes. Polynomial borrowed from ECMA DLT1 standard.
* Fix XLOG record length handling so that it will work at BLCKSZ = 32k.
* Change XID allocation to work more like OID allocation. (This is of
dubious necessity, but I think it's a good idea anyway.)
* Fix a number of minor bugs, such as off-by-one logic for XLOG file
wraparound at the 4 gig mark.
* Add documentation and clean up some coding infelicities; move file
format declarations out to include files where planned contrib
utilities can get at them.
* Checkpoint will now occur every CHECKPOINT_SEGMENTS log segments or
every CHECKPOINT_TIMEOUT seconds, whichever comes first. It is also
possible to force a checkpoint by sending SIGUSR1 to the postmaster
(undocumented feature...)
* Defend against kill -9 postmaster by storing shmem block's key and ID
in postmaster.pid lockfile, and checking at startup to ensure that no
processes are still connected to old shmem block (if it still exists).
* Switch backends to accept SIGQUIT rather than SIGUSR1 for emergency
stop, for symmetry with postmaster and xlog utilities. Clean up signal
handling in bootstrap.c so that xlog utilities launched by postmaster
will react to signals better.
* Standalone bootstrap now grabs lockfile in target directory, as added
insurance against running it in parallel with live postmaster.
2001-03-13 01:17:06 +00:00
|
|
|
* quickdie() occurs when signalled SIGQUIT by the postmaster.
|
2001-01-14 05:08:17 +00:00
|
|
|
*
|
|
|
|
* Some backend has bought the farm,
|
|
|
|
* so we need to stop what we're doing and exit.
|
|
|
|
*/
|
XLOG (and related) changes:
* Store two past checkpoint locations, not just one, in pg_control.
On startup, we fall back to the older checkpoint if the newer one
is unreadable. Also, a physical copy of the newest checkpoint record
is kept in pg_control for possible use in disaster recovery (ie,
complete loss of pg_xlog). Also add a version number for pg_control
itself. Remove archdir from pg_control; it ought to be a GUC
parameter, not a special case (not that it's implemented yet anyway).
* Suppress successive checkpoint records when nothing has been entered
in the WAL log since the last one. This is not so much to avoid I/O
as to make it actually useful to keep track of the last two
checkpoints. If the things are right next to each other then there's
not a lot of redundancy gained...
* Change CRC scheme to a true 64-bit CRC, not a pair of 32-bit CRCs
on alternate bytes. Polynomial borrowed from ECMA DLT1 standard.
* Fix XLOG record length handling so that it will work at BLCKSZ = 32k.
* Change XID allocation to work more like OID allocation. (This is of
dubious necessity, but I think it's a good idea anyway.)
* Fix a number of minor bugs, such as off-by-one logic for XLOG file
wraparound at the 4 gig mark.
* Add documentation and clean up some coding infelicities; move file
format declarations out to include files where planned contrib
utilities can get at them.
* Checkpoint will now occur every CHECKPOINT_SEGMENTS log segments or
every CHECKPOINT_TIMEOUT seconds, whichever comes first. It is also
possible to force a checkpoint by sending SIGUSR1 to the postmaster
(undocumented feature...)
* Defend against kill -9 postmaster by storing shmem block's key and ID
in postmaster.pid lockfile, and checking at startup to ensure that no
processes are still connected to old shmem block (if it still exists).
* Switch backends to accept SIGQUIT rather than SIGUSR1 for emergency
stop, for symmetry with postmaster and xlog utilities. Clean up signal
handling in bootstrap.c so that xlog utilities launched by postmaster
will react to signals better.
* Standalone bootstrap now grabs lockfile in target directory, as added
insurance against running it in parallel with live postmaster.
2001-03-13 01:17:06 +00:00
|
|
|
void
|
2000-08-29 09:36:51 +00:00
|
|
|
quickdie(SIGNAL_ARGS)
|
1996-07-09 06:22:35 +00:00
|
|
|
{
|
2010-02-26 02:01:40 +00:00
|
|
|
sigaddset(&BlockSig, SIGQUIT); /* prevent nested calls */
|
1999-10-06 21:58:18 +00:00
|
|
|
PG_SETMASK(&BlockSig);
|
2003-08-04 00:43:34 +00:00
|
|
|
|
2013-09-11 20:10:15 -04:00
|
|
|
/*
|
|
|
|
* Prevent interrupts while exiting; though we just blocked signals that
|
|
|
|
* would queue new interrupts, one may have been pending. We don't want a
|
|
|
|
* quickdie() downgraded to a mere query cancel.
|
|
|
|
*/
|
|
|
|
HOLD_INTERRUPTS();
|
|
|
|
|
2009-08-29 19:26:52 +00:00
|
|
|
/*
|
|
|
|
* If we're aborting out of client auth, don't risk trying to send
|
2010-02-26 02:01:40 +00:00
|
|
|
* anything to the client; we will likely violate the protocol, not to
|
|
|
|
* mention that we may have interrupted the guts of OpenSSL or some
|
|
|
|
* authentication library.
|
2009-08-29 19:26:52 +00:00
|
|
|
*/
|
|
|
|
if (ClientAuthInProgress && whereToSendOutput == DestRemote)
|
|
|
|
whereToSendOutput = DestNone;
|
|
|
|
|
2003-07-22 19:00:12 +00:00
|
|
|
/*
|
2005-10-15 02:49:52 +00:00
|
|
|
* Ideally this should be ereport(FATAL), but then we'd not get control
|
|
|
|
* back...
|
2003-07-22 19:00:12 +00:00
|
|
|
*/
|
|
|
|
ereport(WARNING,
|
|
|
|
(errcode(ERRCODE_CRASH_SHUTDOWN),
|
2004-08-29 05:07:03 +00:00
|
|
|
errmsg("terminating connection because of crash of another server process"),
|
2005-10-15 02:49:52 +00:00
|
|
|
errdetail("The postmaster has commanded this server process to roll back"
|
|
|
|
" the current transaction and exit, because another"
|
|
|
|
" server process exited abnormally and possibly corrupted"
|
|
|
|
" shared memory."),
|
2003-07-22 19:00:12 +00:00
|
|
|
errhint("In a moment you should be able to reconnect to the"
|
2003-09-25 06:58:07 +00:00
|
|
|
" database and repeat your command.")));
|
2003-08-04 00:43:34 +00:00
|
|
|
|
1997-09-07 05:04:48 +00:00
|
|
|
/*
|
2009-05-15 15:56:39 +00:00
|
|
|
* We DO NOT want to run proc_exit() callbacks -- we're here because
|
|
|
|
* shared memory may be corrupted, so we don't want to try to clean up our
|
|
|
|
* transaction. Just nail the windows shut and get out of town. Now that
|
|
|
|
* there's an atexit callback to prevent third-party code from breaking
|
|
|
|
* things by calling exit() directly, we have to reset the callbacks
|
|
|
|
* explicitly to make this work as intended.
|
|
|
|
*/
|
|
|
|
on_exit_reset();
|
|
|
|
|
|
|
|
/*
|
2014-05-06 12:12:18 -04:00
|
|
|
* Note we do exit(2) not exit(0). This is to force the postmaster into a
|
2005-10-15 02:49:52 +00:00
|
|
|
* system reset cycle if some idiot DBA sends a manual SIGQUIT to a random
|
|
|
|
* backend. This is necessary precisely because we don't clean up our
|
2009-05-15 15:56:39 +00:00
|
|
|
* shared memory state. (The "dead man switch" mechanism in pmsignal.c
|
2009-06-11 14:49:15 +00:00
|
|
|
* should ensure the postmaster sees this as a crash, too, but no harm in
|
|
|
|
* being doubly sure.)
|
1997-09-07 05:04:48 +00:00
|
|
|
*/
|
2006-11-21 00:49:55 +00:00
|
|
|
exit(2);
|
1996-07-09 06:22:35 +00:00
|
|
|
}
|
|
|
|
|
1999-10-06 21:58:18 +00:00
|
|
|
/*
|
2001-01-14 05:08:17 +00:00
|
|
|
* Shutdown signal from postmaster: abort transaction and exit
|
|
|
|
* at soonest convenient time
|
1999-10-06 21:58:18 +00:00
|
|
|
*/
|
1996-07-09 06:22:35 +00:00
|
|
|
void
|
2000-08-29 09:36:51 +00:00
|
|
|
die(SIGNAL_ARGS)
|
1996-07-09 06:22:35 +00:00
|
|
|
{
|
2001-01-07 04:17:29 +00:00
|
|
|
int save_errno = errno;
|
|
|
|
|
|
|
|
/* Don't joggle the elbow of proc_exit */
|
2001-03-22 04:01:46 +00:00
|
|
|
if (!proc_exit_inprogress)
|
1999-10-06 21:58:18 +00:00
|
|
|
{
|
2001-01-14 05:08:17 +00:00
|
|
|
InterruptPending = true;
|
2000-12-18 00:44:50 +00:00
|
|
|
ProcDiePending = true;
|
1999-10-06 21:58:18 +00:00
|
|
|
}
|
2001-01-14 05:08:17 +00:00
|
|
|
|
2011-08-10 12:20:30 -04:00
|
|
|
/* If we're still here, waken anything waiting on the process latch */
|
2015-01-14 18:45:22 +01:00
|
|
|
SetLatch(MyLatch);
|
2011-08-10 12:20:30 -04:00
|
|
|
|
Introduce and use infrastructure for interrupt processing during client reads.
Up to now large swathes of backend code ran inside signal handlers
while reading commands from the client, to allow for speedy reaction to
asynchronous events. Most prominently shared invalidation and NOTIFY
handling. That means that complex code like the starting/stopping of
transactions is run in signal handlers... The required code was
fragile and verbose, and is likely to contain bugs.
That approach also severely limited what could be done while
communicating with the client. As the read might be from within
openssl it wasn't safely possible to trigger an error, e.g. to cancel
a backend in idle-in-transaction state. We did that in some cases,
namely fatal errors, nonetheless.
Now that FE/BE communication in the backend employs non-blocking
sockets and latches to block, we can quite simply interrupt reads from
signal handlers by setting the latch. That allows us to signal an
interrupted read, which is supposed to be retried after returning from
within the ssl library.
As signal handlers now only need to set the latch to guarantee timely
interrupt processing, remove a fair amount of complicated & fragile
code from async.c and sinval.c.
We could now actually start to process some kinds of interrupts, like
sinval ones, more often that before, but that seems better done
separately.
This work will hopefully allow to handle cases like being blocked by
sending data, interrupting idle transactions and similar to be
implemented without too much effort. In addition to allowing getting
rid of ImmediateInterruptOK, that is.
Author: Andres Freund
Reviewed-By: Heikki Linnakangas
2015-02-03 22:25:20 +01:00
|
|
|
/*
|
|
|
|
* If we're in single user mode, we want to quit immediately - we can't
|
2015-05-23 21:35:49 -04:00
|
|
|
* rely on latches as they wouldn't work when stdin/stdout is a file.
|
|
|
|
* Rather ugly, but it's unlikely to be worthwhile to invest much more
|
|
|
|
* effort just for the benefit of single user mode.
|
Introduce and use infrastructure for interrupt processing during client reads.
Up to now large swathes of backend code ran inside signal handlers
while reading commands from the client, to allow for speedy reaction to
asynchronous events. Most prominently shared invalidation and NOTIFY
handling. That means that complex code like the starting/stopping of
transactions is run in signal handlers... The required code was
fragile and verbose, and is likely to contain bugs.
That approach also severely limited what could be done while
communicating with the client. As the read might be from within
openssl it wasn't safely possible to trigger an error, e.g. to cancel
a backend in idle-in-transaction state. We did that in some cases,
namely fatal errors, nonetheless.
Now that FE/BE communication in the backend employs non-blocking
sockets and latches to block, we can quite simply interrupt reads from
signal handlers by setting the latch. That allows us to signal an
interrupted read, which is supposed to be retried after returning from
within the ssl library.
As signal handlers now only need to set the latch to guarantee timely
interrupt processing, remove a fair amount of complicated & fragile
code from async.c and sinval.c.
We could now actually start to process some kinds of interrupts, like
sinval ones, more often that before, but that seems better done
separately.
This work will hopefully allow to handle cases like being blocked by
sending data, interrupting idle transactions and similar to be
implemented without too much effort. In addition to allowing getting
rid of ImmediateInterruptOK, that is.
Author: Andres Freund
Reviewed-By: Heikki Linnakangas
2015-02-03 22:25:20 +01:00
|
|
|
*/
|
|
|
|
if (DoingCommandRead && whereToSendOutput != DestRemote)
|
|
|
|
ProcessInterrupts();
|
|
|
|
|
2001-01-14 05:08:17 +00:00
|
|
|
errno = save_errno;
|
1996-07-09 06:22:35 +00:00
|
|
|
}
|
|
|
|
|
2001-01-07 04:17:29 +00:00
|
|
|
/*
|
2001-01-14 05:08:17 +00:00
|
|
|
* Query-cancel signal from postmaster: abort current transaction
|
|
|
|
* at soonest convenient time
|
2001-01-07 04:17:29 +00:00
|
|
|
*/
|
2005-07-14 05:13:45 +00:00
|
|
|
void
|
2002-07-13 01:02:14 +00:00
|
|
|
StatementCancelHandler(SIGNAL_ARGS)
|
1998-05-19 18:05:58 +00:00
|
|
|
{
|
2000-12-18 17:33:42 +00:00
|
|
|
int save_errno = errno;
|
|
|
|
|
2001-03-22 04:01:46 +00:00
|
|
|
/*
|
2004-07-31 00:45:57 +00:00
|
|
|
* Don't joggle the elbow of proc_exit
|
2001-03-22 04:01:46 +00:00
|
|
|
*/
|
2004-07-31 00:45:57 +00:00
|
|
|
if (!proc_exit_inprogress)
|
2001-01-07 04:17:29 +00:00
|
|
|
{
|
2001-01-14 05:08:17 +00:00
|
|
|
InterruptPending = true;
|
|
|
|
QueryCancelPending = true;
|
2001-01-07 04:17:29 +00:00
|
|
|
}
|
|
|
|
|
2011-08-10 12:20:30 -04:00
|
|
|
/* If we're still here, waken anything waiting on the process latch */
|
2015-01-14 18:45:22 +01:00
|
|
|
SetLatch(MyLatch);
|
2011-08-10 12:20:30 -04:00
|
|
|
|
2000-12-18 17:33:42 +00:00
|
|
|
errno = save_errno;
|
1998-05-19 18:05:58 +00:00
|
|
|
}
|
|
|
|
|
2001-01-07 04:17:29 +00:00
|
|
|
/* signal handler for floating point exception */
|
2005-08-11 21:11:50 +00:00
|
|
|
void
|
2001-01-07 04:17:29 +00:00
|
|
|
FloatExceptionHandler(SIGNAL_ARGS)
|
|
|
|
{
|
2011-08-10 12:20:30 -04:00
|
|
|
/* We're not returning, so no need to save errno */
|
2003-07-22 19:00:12 +00:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_FLOATING_POINT_EXCEPTION),
|
|
|
|
errmsg("floating-point exception"),
|
2005-10-15 02:49:52 +00:00
|
|
|
errdetail("An invalid floating-point operation was signaled. "
|
|
|
|
"This probably means an out-of-range result or an "
|
|
|
|
"invalid operation, such as division by zero.")));
|
2001-01-07 04:17:29 +00:00
|
|
|
}
|
|
|
|
|
2017-06-05 18:53:41 -07:00
|
|
|
/*
|
|
|
|
* SIGHUP: set flag to re-read config file at next convenient time.
|
|
|
|
*
|
|
|
|
* Sets the ConfigReloadPending flag, which should be checked at convenient
|
|
|
|
* places inside main loops. (Better than doing the reading in the signal
|
|
|
|
* handler, ey?)
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
PostgresSigHupHandler(SIGNAL_ARGS)
|
2000-05-31 00:28:42 +00:00
|
|
|
{
|
2011-08-10 12:20:30 -04:00
|
|
|
int save_errno = errno;
|
|
|
|
|
2017-06-05 18:53:41 -07:00
|
|
|
ConfigReloadPending = true;
|
2015-01-14 18:45:22 +01:00
|
|
|
SetLatch(MyLatch);
|
2011-08-10 12:20:30 -04:00
|
|
|
|
|
|
|
errno = save_errno;
|
2000-05-31 00:28:42 +00:00
|
|
|
}
|
|
|
|
|
2010-01-16 10:05:59 +00:00
|
|
|
/*
|
|
|
|
* RecoveryConflictInterrupt: out-of-line portion of recovery conflict
|
2010-08-12 23:24:54 +00:00
|
|
|
* handling following receipt of SIGUSR1. Designed to be similar to die()
|
2010-01-16 10:05:59 +00:00
|
|
|
* and StatementCancelHandler(). Called only by a normal user backend
|
|
|
|
* that begins a transaction during recovery.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
RecoveryConflictInterrupt(ProcSignalReason reason)
|
|
|
|
{
|
2010-02-26 02:01:40 +00:00
|
|
|
int save_errno = errno;
|
2010-01-16 10:05:59 +00:00
|
|
|
|
|
|
|
/*
|
2010-02-26 02:01:40 +00:00
|
|
|
* Don't joggle the elbow of proc_exit
|
|
|
|
*/
|
2010-01-16 10:05:59 +00:00
|
|
|
if (!proc_exit_inprogress)
|
|
|
|
{
|
2010-01-23 17:04:05 +00:00
|
|
|
RecoveryConflictReason = reason;
|
2010-01-16 10:05:59 +00:00
|
|
|
switch (reason)
|
|
|
|
{
|
2010-02-13 01:32:20 +00:00
|
|
|
case PROCSIG_RECOVERY_CONFLICT_STARTUP_DEADLOCK:
|
|
|
|
|
2010-02-26 02:01:40 +00:00
|
|
|
/*
|
|
|
|
* If we aren't waiting for a lock we can never deadlock.
|
|
|
|
*/
|
|
|
|
if (!IsWaitingForLock())
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Intentional drop through to check wait for pin */
|
2010-02-13 01:32:20 +00:00
|
|
|
|
2010-01-23 16:37:12 +00:00
|
|
|
case PROCSIG_RECOVERY_CONFLICT_BUFFERPIN:
|
|
|
|
|
2010-02-26 02:01:40 +00:00
|
|
|
/*
|
|
|
|
* If we aren't blocking the Startup process there is nothing
|
|
|
|
* more to do.
|
|
|
|
*/
|
|
|
|
if (!HoldingBufferPinThatDelaysRecovery())
|
|
|
|
return;
|
|
|
|
|
|
|
|
MyProc->recoveryConflictPending = true;
|
2010-01-23 16:37:12 +00:00
|
|
|
|
2010-02-26 02:01:40 +00:00
|
|
|
/* Intentional drop through to error handling */
|
2010-01-23 16:37:12 +00:00
|
|
|
|
2010-01-16 10:05:59 +00:00
|
|
|
case PROCSIG_RECOVERY_CONFLICT_LOCK:
|
|
|
|
case PROCSIG_RECOVERY_CONFLICT_TABLESPACE:
|
|
|
|
case PROCSIG_RECOVERY_CONFLICT_SNAPSHOT:
|
|
|
|
|
2010-02-26 02:01:40 +00:00
|
|
|
/*
|
|
|
|
* If we aren't in a transaction any longer then ignore.
|
|
|
|
*/
|
|
|
|
if (!IsTransactionOrTransactionBlock())
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we can abort just the current subtransaction then we are
|
|
|
|
* OK to throw an ERROR to resolve the conflict. Otherwise
|
|
|
|
* drop through to the FATAL case.
|
|
|
|
*
|
|
|
|
* XXX other times that we can throw just an ERROR *may* be
|
|
|
|
* PROCSIG_RECOVERY_CONFLICT_LOCK if no locks are held in
|
|
|
|
* parent transactions
|
|
|
|
*
|
|
|
|
* PROCSIG_RECOVERY_CONFLICT_SNAPSHOT if no snapshots are held
|
|
|
|
* by parent transactions and the transaction is not
|
2010-09-11 18:38:58 +00:00
|
|
|
* transaction-snapshot mode
|
2010-02-26 02:01:40 +00:00
|
|
|
*
|
|
|
|
* PROCSIG_RECOVERY_CONFLICT_TABLESPACE if no temp files or
|
|
|
|
* cursors open in parent transactions
|
|
|
|
*/
|
|
|
|
if (!IsSubTransaction())
|
|
|
|
{
|
2010-01-16 10:05:59 +00:00
|
|
|
/*
|
2010-02-26 02:01:40 +00:00
|
|
|
* If we already aborted then we no longer need to cancel.
|
|
|
|
* We do this here since we do not wish to ignore aborted
|
|
|
|
* subtransactions, which must cause FATAL, currently.
|
2010-01-16 10:05:59 +00:00
|
|
|
*/
|
2010-02-26 02:01:40 +00:00
|
|
|
if (IsAbortedTransactionBlockState())
|
|
|
|
return;
|
2010-01-16 10:05:59 +00:00
|
|
|
|
|
|
|
RecoveryConflictPending = true;
|
2010-02-26 02:01:40 +00:00
|
|
|
QueryCancelPending = true;
|
2010-01-16 10:05:59 +00:00
|
|
|
InterruptPending = true;
|
|
|
|
break;
|
2010-02-26 02:01:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Intentional drop through to session cancel */
|
|
|
|
|
|
|
|
case PROCSIG_RECOVERY_CONFLICT_DATABASE:
|
|
|
|
RecoveryConflictPending = true;
|
|
|
|
ProcDiePending = true;
|
|
|
|
InterruptPending = true;
|
|
|
|
break;
|
2010-01-16 10:05:59 +00:00
|
|
|
|
|
|
|
default:
|
2011-04-07 00:11:01 -04:00
|
|
|
elog(FATAL, "unrecognized conflict mode: %d",
|
|
|
|
(int) reason);
|
2010-01-16 10:05:59 +00:00
|
|
|
}
|
|
|
|
|
2010-02-13 01:32:20 +00:00
|
|
|
Assert(RecoveryConflictPending && (QueryCancelPending || ProcDiePending));
|
|
|
|
|
2010-05-12 19:45:02 +00:00
|
|
|
/*
|
|
|
|
* All conflicts apart from database cause dynamic errors where the
|
|
|
|
* command or transaction can be retried at a later point with some
|
2010-07-06 19:19:02 +00:00
|
|
|
* potential for success. No need to reset this, since non-retryable
|
|
|
|
* conflict errors are currently FATAL.
|
2010-05-12 19:45:02 +00:00
|
|
|
*/
|
|
|
|
if (reason == PROCSIG_RECOVERY_CONFLICT_DATABASE)
|
|
|
|
RecoveryConflictRetryable = false;
|
2010-01-16 10:05:59 +00:00
|
|
|
}
|
|
|
|
|
2014-06-03 14:02:54 +02:00
|
|
|
/*
|
|
|
|
* Set the process latch. This function essentially emulates signal
|
|
|
|
* handlers like die() and StatementCancelHandler() and it seems prudent
|
2015-10-09 14:31:04 -04:00
|
|
|
* to behave similarly as they do.
|
2014-06-03 14:02:54 +02:00
|
|
|
*/
|
2015-01-14 18:45:22 +01:00
|
|
|
SetLatch(MyLatch);
|
2014-06-03 14:02:54 +02:00
|
|
|
|
2010-01-16 10:05:59 +00:00
|
|
|
errno = save_errno;
|
|
|
|
}
|
1998-05-19 18:05:58 +00:00
|
|
|
|
2001-01-14 05:08:17 +00:00
|
|
|
/*
|
|
|
|
* ProcessInterrupts: out-of-line portion of CHECK_FOR_INTERRUPTS() macro
|
|
|
|
*
|
|
|
|
* If an interrupt condition is pending, and it's safe to service it,
|
|
|
|
* then clear the flag and accept the interrupt. Called only when
|
|
|
|
* InterruptPending is true.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
ProcessInterrupts(void)
|
|
|
|
{
|
Be more careful to not lose sync in the FE/BE protocol.
If any error occurred while we were in the middle of reading a protocol
message from the client, we could lose sync, and incorrectly try to
interpret a part of another message as a new protocol message. That will
usually lead to an "invalid frontend message" error that terminates the
connection. However, this is a security issue because an attacker might
be able to deliberately cause an error, inject a Query message in what's
supposed to be just user data, and have the server execute it.
We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other
operations that could ereport(ERROR) in the middle of processing a message,
but a query cancel interrupt or statement timeout could nevertheless cause
it to happen. Also, the V2 fastpath and COPY handling were not so careful.
It's very difficult to recover in the V2 COPY protocol, so we will just
terminate the connection on error. In practice, that's what happened
previously anyway, as we lost protocol sync.
To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set
whenever we're in the middle of reading a message. When it's set, we cannot
safely ERROR out and continue running, because we might've read only part
of a message. PqCommReadingMsg acts somewhat similarly to critical sections
in that if an error occurs while it's set, the error handler will force the
connection to be terminated, as if the error was FATAL. It's not
implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted
to PANIC in critical sections, because we want to be able to use
PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes
advantage of that to prevent an OOM error from terminating the connection.
To prevent unnecessary connection terminations, add a holdoff mechanism
similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel
interrupts, but still allow die interrupts. The rules on which interrupts
are processed when are now a bit more complicated, so refactor
ProcessInterrupts() and the calls to it in signal handlers so that the
signal handlers always call it if ImmediateInterruptOK is set, and
ProcessInterrupts() can decide to not do anything if the other conditions
are not met.
Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund.
Backpatch to all supported versions.
Security: CVE-2015-0244
2015-02-02 17:08:45 +02:00
|
|
|
/* OK to accept any interrupts now? */
|
2001-01-19 22:08:47 +00:00
|
|
|
if (InterruptHoldoffCount != 0 || CritSectionCount != 0)
|
2001-01-14 05:08:17 +00:00
|
|
|
return;
|
|
|
|
InterruptPending = false;
|
Be more careful to not lose sync in the FE/BE protocol.
If any error occurred while we were in the middle of reading a protocol
message from the client, we could lose sync, and incorrectly try to
interpret a part of another message as a new protocol message. That will
usually lead to an "invalid frontend message" error that terminates the
connection. However, this is a security issue because an attacker might
be able to deliberately cause an error, inject a Query message in what's
supposed to be just user data, and have the server execute it.
We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other
operations that could ereport(ERROR) in the middle of processing a message,
but a query cancel interrupt or statement timeout could nevertheless cause
it to happen. Also, the V2 fastpath and COPY handling were not so careful.
It's very difficult to recover in the V2 COPY protocol, so we will just
terminate the connection on error. In practice, that's what happened
previously anyway, as we lost protocol sync.
To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set
whenever we're in the middle of reading a message. When it's set, we cannot
safely ERROR out and continue running, because we might've read only part
of a message. PqCommReadingMsg acts somewhat similarly to critical sections
in that if an error occurs while it's set, the error handler will force the
connection to be terminated, as if the error was FATAL. It's not
implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted
to PANIC in critical sections, because we want to be able to use
PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes
advantage of that to prevent an OOM error from terminating the connection.
To prevent unnecessary connection terminations, add a holdoff mechanism
similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel
interrupts, but still allow die interrupts. The rules on which interrupts
are processed when are now a bit more complicated, so refactor
ProcessInterrupts() and the calls to it in signal handlers so that the
signal handlers always call it if ImmediateInterruptOK is set, and
ProcessInterrupts() can decide to not do anything if the other conditions
are not met.
Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund.
Backpatch to all supported versions.
Security: CVE-2015-0244
2015-02-02 17:08:45 +02:00
|
|
|
|
2001-01-14 05:08:17 +00:00
|
|
|
if (ProcDiePending)
|
|
|
|
{
|
|
|
|
ProcDiePending = false;
|
2001-03-22 04:01:46 +00:00
|
|
|
QueryCancelPending = false; /* ProcDie trumps QueryCancel */
|
Be more careful to not lose sync in the FE/BE protocol.
If any error occurred while we were in the middle of reading a protocol
message from the client, we could lose sync, and incorrectly try to
interpret a part of another message as a new protocol message. That will
usually lead to an "invalid frontend message" error that terminates the
connection. However, this is a security issue because an attacker might
be able to deliberately cause an error, inject a Query message in what's
supposed to be just user data, and have the server execute it.
We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other
operations that could ereport(ERROR) in the middle of processing a message,
but a query cancel interrupt or statement timeout could nevertheless cause
it to happen. Also, the V2 fastpath and COPY handling were not so careful.
It's very difficult to recover in the V2 COPY protocol, so we will just
terminate the connection on error. In practice, that's what happened
previously anyway, as we lost protocol sync.
To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set
whenever we're in the middle of reading a message. When it's set, we cannot
safely ERROR out and continue running, because we might've read only part
of a message. PqCommReadingMsg acts somewhat similarly to critical sections
in that if an error occurs while it's set, the error handler will force the
connection to be terminated, as if the error was FATAL. It's not
implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted
to PANIC in critical sections, because we want to be able to use
PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes
advantage of that to prevent an OOM error from terminating the connection.
To prevent unnecessary connection terminations, add a holdoff mechanism
similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel
interrupts, but still allow die interrupts. The rules on which interrupts
are processed when are now a bit more complicated, so refactor
ProcessInterrupts() and the calls to it in signal handlers so that the
signal handlers always call it if ImmediateInterruptOK is set, and
ProcessInterrupts() can decide to not do anything if the other conditions
are not met.
Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund.
Backpatch to all supported versions.
Security: CVE-2015-0244
2015-02-02 17:08:45 +02:00
|
|
|
LockErrorCleanup();
|
2009-08-29 19:26:52 +00:00
|
|
|
/* As in quickdie, don't risk sending to client during auth */
|
|
|
|
if (ClientAuthInProgress && whereToSendOutput == DestRemote)
|
|
|
|
whereToSendOutput = DestNone;
|
2015-02-03 22:54:48 +01:00
|
|
|
if (ClientAuthInProgress)
|
|
|
|
ereport(FATAL,
|
|
|
|
(errcode(ERRCODE_QUERY_CANCELED),
|
|
|
|
errmsg("canceling authentication due to timeout")));
|
|
|
|
else if (IsAutoVacuumWorkerProcess())
|
2007-06-29 17:07:39 +00:00
|
|
|
ereport(FATAL,
|
|
|
|
(errcode(ERRCODE_ADMIN_SHUTDOWN),
|
|
|
|
errmsg("terminating autovacuum process due to administrator command")));
|
2017-06-02 14:46:00 -04:00
|
|
|
else if (IsLogicalWorker())
|
|
|
|
ereport(FATAL,
|
|
|
|
(errcode(ERRCODE_ADMIN_SHUTDOWN),
|
|
|
|
errmsg("terminating logical replication worker due to administrator command")));
|
2017-06-08 15:00:53 -07:00
|
|
|
else if (IsLogicalLauncher())
|
|
|
|
{
|
|
|
|
ereport(DEBUG1,
|
|
|
|
(errmsg("logical replication launcher shutting down")));
|
|
|
|
|
|
|
|
/* The logical replication launcher can be stopped at any time. */
|
|
|
|
proc_exit(0);
|
|
|
|
}
|
2010-05-12 19:45:02 +00:00
|
|
|
else if (RecoveryConflictPending && RecoveryConflictRetryable)
|
2011-01-03 12:46:03 +01:00
|
|
|
{
|
|
|
|
pgstat_report_recovery_conflict(RecoveryConflictReason);
|
2010-05-12 19:45:02 +00:00
|
|
|
ereport(FATAL,
|
|
|
|
(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
|
|
|
|
errmsg("terminating connection due to conflict with recovery"),
|
|
|
|
errdetail_recovery_conflict()));
|
2011-01-03 12:46:03 +01:00
|
|
|
}
|
2010-01-16 10:05:59 +00:00
|
|
|
else if (RecoveryConflictPending)
|
2011-01-03 12:46:03 +01:00
|
|
|
{
|
2011-02-01 00:20:53 +00:00
|
|
|
/* Currently there is only one non-retryable recovery conflict */
|
|
|
|
Assert(RecoveryConflictReason == PROCSIG_RECOVERY_CONFLICT_DATABASE);
|
2011-01-03 12:46:03 +01:00
|
|
|
pgstat_report_recovery_conflict(RecoveryConflictReason);
|
2010-01-16 10:05:59 +00:00
|
|
|
ereport(FATAL,
|
2011-02-01 08:44:01 +00:00
|
|
|
(errcode(ERRCODE_DATABASE_DROPPED),
|
2010-02-26 02:01:40 +00:00
|
|
|
errmsg("terminating connection due to conflict with recovery"),
|
2010-01-23 17:04:05 +00:00
|
|
|
errdetail_recovery_conflict()));
|
2011-01-03 12:46:03 +01:00
|
|
|
}
|
2007-06-29 17:07:39 +00:00
|
|
|
else
|
|
|
|
ereport(FATAL,
|
|
|
|
(errcode(ERRCODE_ADMIN_SHUTDOWN),
|
2007-11-15 21:14:46 +00:00
|
|
|
errmsg("terminating connection due to administrator command")));
|
2001-01-14 05:08:17 +00:00
|
|
|
}
|
2011-12-09 11:37:21 +02:00
|
|
|
if (ClientConnectionLost)
|
|
|
|
{
|
|
|
|
QueryCancelPending = false; /* lost connection trumps QueryCancel */
|
Be more careful to not lose sync in the FE/BE protocol.
If any error occurred while we were in the middle of reading a protocol
message from the client, we could lose sync, and incorrectly try to
interpret a part of another message as a new protocol message. That will
usually lead to an "invalid frontend message" error that terminates the
connection. However, this is a security issue because an attacker might
be able to deliberately cause an error, inject a Query message in what's
supposed to be just user data, and have the server execute it.
We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other
operations that could ereport(ERROR) in the middle of processing a message,
but a query cancel interrupt or statement timeout could nevertheless cause
it to happen. Also, the V2 fastpath and COPY handling were not so careful.
It's very difficult to recover in the V2 COPY protocol, so we will just
terminate the connection on error. In practice, that's what happened
previously anyway, as we lost protocol sync.
To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set
whenever we're in the middle of reading a message. When it's set, we cannot
safely ERROR out and continue running, because we might've read only part
of a message. PqCommReadingMsg acts somewhat similarly to critical sections
in that if an error occurs while it's set, the error handler will force the
connection to be terminated, as if the error was FATAL. It's not
implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted
to PANIC in critical sections, because we want to be able to use
PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes
advantage of that to prevent an OOM error from terminating the connection.
To prevent unnecessary connection terminations, add a holdoff mechanism
similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel
interrupts, but still allow die interrupts. The rules on which interrupts
are processed when are now a bit more complicated, so refactor
ProcessInterrupts() and the calls to it in signal handlers so that the
signal handlers always call it if ImmediateInterruptOK is set, and
ProcessInterrupts() can decide to not do anything if the other conditions
are not met.
Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund.
Backpatch to all supported versions.
Security: CVE-2015-0244
2015-02-02 17:08:45 +02:00
|
|
|
LockErrorCleanup();
|
2011-12-09 11:37:21 +02:00
|
|
|
/* don't send to client, we already know the connection to be dead. */
|
|
|
|
whereToSendOutput = DestNone;
|
|
|
|
ereport(FATAL,
|
|
|
|
(errcode(ERRCODE_CONNECTION_FAILURE),
|
|
|
|
errmsg("connection to client lost")));
|
|
|
|
}
|
Be more careful to not lose sync in the FE/BE protocol.
If any error occurred while we were in the middle of reading a protocol
message from the client, we could lose sync, and incorrectly try to
interpret a part of another message as a new protocol message. That will
usually lead to an "invalid frontend message" error that terminates the
connection. However, this is a security issue because an attacker might
be able to deliberately cause an error, inject a Query message in what's
supposed to be just user data, and have the server execute it.
We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other
operations that could ereport(ERROR) in the middle of processing a message,
but a query cancel interrupt or statement timeout could nevertheless cause
it to happen. Also, the V2 fastpath and COPY handling were not so careful.
It's very difficult to recover in the V2 COPY protocol, so we will just
terminate the connection on error. In practice, that's what happened
previously anyway, as we lost protocol sync.
To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set
whenever we're in the middle of reading a message. When it's set, we cannot
safely ERROR out and continue running, because we might've read only part
of a message. PqCommReadingMsg acts somewhat similarly to critical sections
in that if an error occurs while it's set, the error handler will force the
connection to be terminated, as if the error was FATAL. It's not
implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted
to PANIC in critical sections, because we want to be able to use
PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes
advantage of that to prevent an OOM error from terminating the connection.
To prevent unnecessary connection terminations, add a holdoff mechanism
similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel
interrupts, but still allow die interrupts. The rules on which interrupts
are processed when are now a bit more complicated, so refactor
ProcessInterrupts() and the calls to it in signal handlers so that the
signal handlers always call it if ImmediateInterruptOK is set, and
ProcessInterrupts() can decide to not do anything if the other conditions
are not met.
Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund.
Backpatch to all supported versions.
Security: CVE-2015-0244
2015-02-02 17:08:45 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If a recovery conflict happens while we are waiting for input from the
|
|
|
|
* client, the client is presumably just sitting idle in a transaction,
|
|
|
|
* preventing recovery from making progress. Terminate the connection to
|
|
|
|
* dislodge it.
|
|
|
|
*/
|
|
|
|
if (RecoveryConflictPending && DoingCommandRead)
|
|
|
|
{
|
2015-05-23 21:35:49 -04:00
|
|
|
QueryCancelPending = false; /* this trumps QueryCancel */
|
Be more careful to not lose sync in the FE/BE protocol.
If any error occurred while we were in the middle of reading a protocol
message from the client, we could lose sync, and incorrectly try to
interpret a part of another message as a new protocol message. That will
usually lead to an "invalid frontend message" error that terminates the
connection. However, this is a security issue because an attacker might
be able to deliberately cause an error, inject a Query message in what's
supposed to be just user data, and have the server execute it.
We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other
operations that could ereport(ERROR) in the middle of processing a message,
but a query cancel interrupt or statement timeout could nevertheless cause
it to happen. Also, the V2 fastpath and COPY handling were not so careful.
It's very difficult to recover in the V2 COPY protocol, so we will just
terminate the connection on error. In practice, that's what happened
previously anyway, as we lost protocol sync.
To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set
whenever we're in the middle of reading a message. When it's set, we cannot
safely ERROR out and continue running, because we might've read only part
of a message. PqCommReadingMsg acts somewhat similarly to critical sections
in that if an error occurs while it's set, the error handler will force the
connection to be terminated, as if the error was FATAL. It's not
implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted
to PANIC in critical sections, because we want to be able to use
PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes
advantage of that to prevent an OOM error from terminating the connection.
To prevent unnecessary connection terminations, add a holdoff mechanism
similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel
interrupts, but still allow die interrupts. The rules on which interrupts
are processed when are now a bit more complicated, so refactor
ProcessInterrupts() and the calls to it in signal handlers so that the
signal handlers always call it if ImmediateInterruptOK is set, and
ProcessInterrupts() can decide to not do anything if the other conditions
are not met.
Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund.
Backpatch to all supported versions.
Security: CVE-2015-0244
2015-02-02 17:08:45 +02:00
|
|
|
RecoveryConflictPending = false;
|
|
|
|
LockErrorCleanup();
|
|
|
|
pgstat_report_recovery_conflict(RecoveryConflictReason);
|
|
|
|
ereport(FATAL,
|
|
|
|
(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
|
2015-05-23 21:35:49 -04:00
|
|
|
errmsg("terminating connection due to conflict with recovery"),
|
Be more careful to not lose sync in the FE/BE protocol.
If any error occurred while we were in the middle of reading a protocol
message from the client, we could lose sync, and incorrectly try to
interpret a part of another message as a new protocol message. That will
usually lead to an "invalid frontend message" error that terminates the
connection. However, this is a security issue because an attacker might
be able to deliberately cause an error, inject a Query message in what's
supposed to be just user data, and have the server execute it.
We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other
operations that could ereport(ERROR) in the middle of processing a message,
but a query cancel interrupt or statement timeout could nevertheless cause
it to happen. Also, the V2 fastpath and COPY handling were not so careful.
It's very difficult to recover in the V2 COPY protocol, so we will just
terminate the connection on error. In practice, that's what happened
previously anyway, as we lost protocol sync.
To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set
whenever we're in the middle of reading a message. When it's set, we cannot
safely ERROR out and continue running, because we might've read only part
of a message. PqCommReadingMsg acts somewhat similarly to critical sections
in that if an error occurs while it's set, the error handler will force the
connection to be terminated, as if the error was FATAL. It's not
implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted
to PANIC in critical sections, because we want to be able to use
PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes
advantage of that to prevent an OOM error from terminating the connection.
To prevent unnecessary connection terminations, add a holdoff mechanism
similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel
interrupts, but still allow die interrupts. The rules on which interrupts
are processed when are now a bit more complicated, so refactor
ProcessInterrupts() and the calls to it in signal handlers so that the
signal handlers always call it if ImmediateInterruptOK is set, and
ProcessInterrupts() can decide to not do anything if the other conditions
are not met.
Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund.
Backpatch to all supported versions.
Security: CVE-2015-0244
2015-02-02 17:08:45 +02:00
|
|
|
errdetail_recovery_conflict(),
|
|
|
|
errhint("In a moment you should be able to reconnect to the"
|
|
|
|
" database and repeat your command.")));
|
|
|
|
}
|
|
|
|
|
2001-01-14 05:08:17 +00:00
|
|
|
if (QueryCancelPending)
|
|
|
|
{
|
Be more predictable about reporting "lock timeout" vs "statement timeout".
If both timeout indicators are set when we arrive at ProcessInterrupts,
we've historically just reported "lock timeout". However, some buildfarm
members have been observed to fail isolationtester's timeouts test by
reporting "lock timeout" when the statement timeout was expected to fire
first. The cause seems to be that the process is allowed to sleep longer
than expected (probably due to heavy machine load) so that the lock
timeout happens before we reach the point of reporting the error, and
then this arbitrary tiebreak rule does the wrong thing. We can improve
matters by comparing the scheduled timeout times to decide which error
to report.
I had originally proposed greatly reducing the 1-second window between
the two timeouts in the test cases. On reflection that is a bad idea,
at least for the case where the lock timeout is expected to fire first,
because that would assume that it takes negligible time to get from
statement start to the beginning of the lock wait. Thus, this patch
doesn't completely remove the risk of test failures on slow machines.
Empirically, however, the case this handles is the one we are seeing
in the buildfarm. The explanation may be that the other case requires
the scheduler to take the CPU away from a busy process, whereas the
case fixed here only requires the scheduler to not give the CPU back
right away to a process that has been woken from a multi-second sleep
(and, perhaps, has been swapped out meanwhile).
Back-patch to 9.3 where the isolationtester timeouts test was added.
Discussion: <8693.1464314819@sss.pgh.pa.us>
2016-05-27 10:40:20 -04:00
|
|
|
bool lock_timeout_occurred;
|
|
|
|
bool stmt_timeout_occurred;
|
|
|
|
|
Be more careful to not lose sync in the FE/BE protocol.
If any error occurred while we were in the middle of reading a protocol
message from the client, we could lose sync, and incorrectly try to
interpret a part of another message as a new protocol message. That will
usually lead to an "invalid frontend message" error that terminates the
connection. However, this is a security issue because an attacker might
be able to deliberately cause an error, inject a Query message in what's
supposed to be just user data, and have the server execute it.
We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other
operations that could ereport(ERROR) in the middle of processing a message,
but a query cancel interrupt or statement timeout could nevertheless cause
it to happen. Also, the V2 fastpath and COPY handling were not so careful.
It's very difficult to recover in the V2 COPY protocol, so we will just
terminate the connection on error. In practice, that's what happened
previously anyway, as we lost protocol sync.
To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set
whenever we're in the middle of reading a message. When it's set, we cannot
safely ERROR out and continue running, because we might've read only part
of a message. PqCommReadingMsg acts somewhat similarly to critical sections
in that if an error occurs while it's set, the error handler will force the
connection to be terminated, as if the error was FATAL. It's not
implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted
to PANIC in critical sections, because we want to be able to use
PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes
advantage of that to prevent an OOM error from terminating the connection.
To prevent unnecessary connection terminations, add a holdoff mechanism
similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel
interrupts, but still allow die interrupts. The rules on which interrupts
are processed when are now a bit more complicated, so refactor
ProcessInterrupts() and the calls to it in signal handlers so that the
signal handlers always call it if ImmediateInterruptOK is set, and
ProcessInterrupts() can decide to not do anything if the other conditions
are not met.
Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund.
Backpatch to all supported versions.
Security: CVE-2015-0244
2015-02-02 17:08:45 +02:00
|
|
|
/*
|
|
|
|
* Don't allow query cancel interrupts while reading input from the
|
|
|
|
* client, because we might lose sync in the FE/BE protocol. (Die
|
|
|
|
* interrupts are OK, because we won't read any further messages from
|
|
|
|
* the client in that case.)
|
|
|
|
*/
|
|
|
|
if (QueryCancelHoldoffCount != 0)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Re-arm InterruptPending so that we process the cancel request
|
|
|
|
* as soon as we're done reading the message.
|
|
|
|
*/
|
|
|
|
InterruptPending = true;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2001-01-14 05:08:17 +00:00
|
|
|
QueryCancelPending = false;
|
2013-03-16 23:22:17 -04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If LOCK_TIMEOUT and STATEMENT_TIMEOUT indicators are both set, we
|
Be more predictable about reporting "lock timeout" vs "statement timeout".
If both timeout indicators are set when we arrive at ProcessInterrupts,
we've historically just reported "lock timeout". However, some buildfarm
members have been observed to fail isolationtester's timeouts test by
reporting "lock timeout" when the statement timeout was expected to fire
first. The cause seems to be that the process is allowed to sleep longer
than expected (probably due to heavy machine load) so that the lock
timeout happens before we reach the point of reporting the error, and
then this arbitrary tiebreak rule does the wrong thing. We can improve
matters by comparing the scheduled timeout times to decide which error
to report.
I had originally proposed greatly reducing the 1-second window between
the two timeouts in the test cases. On reflection that is a bad idea,
at least for the case where the lock timeout is expected to fire first,
because that would assume that it takes negligible time to get from
statement start to the beginning of the lock wait. Thus, this patch
doesn't completely remove the risk of test failures on slow machines.
Empirically, however, the case this handles is the one we are seeing
in the buildfarm. The explanation may be that the other case requires
the scheduler to take the CPU away from a busy process, whereas the
case fixed here only requires the scheduler to not give the CPU back
right away to a process that has been woken from a multi-second sleep
(and, perhaps, has been swapped out meanwhile).
Back-patch to 9.3 where the isolationtester timeouts test was added.
Discussion: <8693.1464314819@sss.pgh.pa.us>
2016-05-27 10:40:20 -04:00
|
|
|
* need to clear both, so always fetch both.
|
2013-03-16 23:22:17 -04:00
|
|
|
*/
|
Be more predictable about reporting "lock timeout" vs "statement timeout".
If both timeout indicators are set when we arrive at ProcessInterrupts,
we've historically just reported "lock timeout". However, some buildfarm
members have been observed to fail isolationtester's timeouts test by
reporting "lock timeout" when the statement timeout was expected to fire
first. The cause seems to be that the process is allowed to sleep longer
than expected (probably due to heavy machine load) so that the lock
timeout happens before we reach the point of reporting the error, and
then this arbitrary tiebreak rule does the wrong thing. We can improve
matters by comparing the scheduled timeout times to decide which error
to report.
I had originally proposed greatly reducing the 1-second window between
the two timeouts in the test cases. On reflection that is a bad idea,
at least for the case where the lock timeout is expected to fire first,
because that would assume that it takes negligible time to get from
statement start to the beginning of the lock wait. Thus, this patch
doesn't completely remove the risk of test failures on slow machines.
Empirically, however, the case this handles is the one we are seeing
in the buildfarm. The explanation may be that the other case requires
the scheduler to take the CPU away from a busy process, whereas the
case fixed here only requires the scheduler to not give the CPU back
right away to a process that has been woken from a multi-second sleep
(and, perhaps, has been swapped out meanwhile).
Back-patch to 9.3 where the isolationtester timeouts test was added.
Discussion: <8693.1464314819@sss.pgh.pa.us>
2016-05-27 10:40:20 -04:00
|
|
|
lock_timeout_occurred = get_timeout_indicator(LOCK_TIMEOUT, true);
|
|
|
|
stmt_timeout_occurred = get_timeout_indicator(STATEMENT_TIMEOUT, true);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If both were set, we want to report whichever timeout completed
|
|
|
|
* earlier; this ensures consistent behavior if the machine is slow
|
|
|
|
* enough that the second timeout triggers before we get here. A tie
|
|
|
|
* is arbitrarily broken in favor of reporting a lock timeout.
|
|
|
|
*/
|
|
|
|
if (lock_timeout_occurred && stmt_timeout_occurred &&
|
|
|
|
get_timeout_finish_time(STATEMENT_TIMEOUT) < get_timeout_finish_time(LOCK_TIMEOUT))
|
|
|
|
lock_timeout_occurred = false; /* report stmt timeout */
|
|
|
|
|
|
|
|
if (lock_timeout_occurred)
|
2013-03-16 23:22:17 -04:00
|
|
|
{
|
Be more careful to not lose sync in the FE/BE protocol.
If any error occurred while we were in the middle of reading a protocol
message from the client, we could lose sync, and incorrectly try to
interpret a part of another message as a new protocol message. That will
usually lead to an "invalid frontend message" error that terminates the
connection. However, this is a security issue because an attacker might
be able to deliberately cause an error, inject a Query message in what's
supposed to be just user data, and have the server execute it.
We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other
operations that could ereport(ERROR) in the middle of processing a message,
but a query cancel interrupt or statement timeout could nevertheless cause
it to happen. Also, the V2 fastpath and COPY handling were not so careful.
It's very difficult to recover in the V2 COPY protocol, so we will just
terminate the connection on error. In practice, that's what happened
previously anyway, as we lost protocol sync.
To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set
whenever we're in the middle of reading a message. When it's set, we cannot
safely ERROR out and continue running, because we might've read only part
of a message. PqCommReadingMsg acts somewhat similarly to critical sections
in that if an error occurs while it's set, the error handler will force the
connection to be terminated, as if the error was FATAL. It's not
implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted
to PANIC in critical sections, because we want to be able to use
PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes
advantage of that to prevent an OOM error from terminating the connection.
To prevent unnecessary connection terminations, add a holdoff mechanism
similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel
interrupts, but still allow die interrupts. The rules on which interrupts
are processed when are now a bit more complicated, so refactor
ProcessInterrupts() and the calls to it in signal handlers so that the
signal handlers always call it if ImmediateInterruptOK is set, and
ProcessInterrupts() can decide to not do anything if the other conditions
are not met.
Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund.
Backpatch to all supported versions.
Security: CVE-2015-0244
2015-02-02 17:08:45 +02:00
|
|
|
LockErrorCleanup();
|
2013-03-16 23:22:17 -04:00
|
|
|
ereport(ERROR,
|
2013-06-29 00:57:25 +01:00
|
|
|
(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
|
2013-03-16 23:22:17 -04:00
|
|
|
errmsg("canceling statement due to lock timeout")));
|
|
|
|
}
|
Be more predictable about reporting "lock timeout" vs "statement timeout".
If both timeout indicators are set when we arrive at ProcessInterrupts,
we've historically just reported "lock timeout". However, some buildfarm
members have been observed to fail isolationtester's timeouts test by
reporting "lock timeout" when the statement timeout was expected to fire
first. The cause seems to be that the process is allowed to sleep longer
than expected (probably due to heavy machine load) so that the lock
timeout happens before we reach the point of reporting the error, and
then this arbitrary tiebreak rule does the wrong thing. We can improve
matters by comparing the scheduled timeout times to decide which error
to report.
I had originally proposed greatly reducing the 1-second window between
the two timeouts in the test cases. On reflection that is a bad idea,
at least for the case where the lock timeout is expected to fire first,
because that would assume that it takes negligible time to get from
statement start to the beginning of the lock wait. Thus, this patch
doesn't completely remove the risk of test failures on slow machines.
Empirically, however, the case this handles is the one we are seeing
in the buildfarm. The explanation may be that the other case requires
the scheduler to take the CPU away from a busy process, whereas the
case fixed here only requires the scheduler to not give the CPU back
right away to a process that has been woken from a multi-second sleep
(and, perhaps, has been swapped out meanwhile).
Back-patch to 9.3 where the isolationtester timeouts test was added.
Discussion: <8693.1464314819@sss.pgh.pa.us>
2016-05-27 10:40:20 -04:00
|
|
|
if (stmt_timeout_occurred)
|
2010-01-07 16:29:58 +00:00
|
|
|
{
|
Be more careful to not lose sync in the FE/BE protocol.
If any error occurred while we were in the middle of reading a protocol
message from the client, we could lose sync, and incorrectly try to
interpret a part of another message as a new protocol message. That will
usually lead to an "invalid frontend message" error that terminates the
connection. However, this is a security issue because an attacker might
be able to deliberately cause an error, inject a Query message in what's
supposed to be just user data, and have the server execute it.
We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other
operations that could ereport(ERROR) in the middle of processing a message,
but a query cancel interrupt or statement timeout could nevertheless cause
it to happen. Also, the V2 fastpath and COPY handling were not so careful.
It's very difficult to recover in the V2 COPY protocol, so we will just
terminate the connection on error. In practice, that's what happened
previously anyway, as we lost protocol sync.
To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set
whenever we're in the middle of reading a message. When it's set, we cannot
safely ERROR out and continue running, because we might've read only part
of a message. PqCommReadingMsg acts somewhat similarly to critical sections
in that if an error occurs while it's set, the error handler will force the
connection to be terminated, as if the error was FATAL. It's not
implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted
to PANIC in critical sections, because we want to be able to use
PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes
advantage of that to prevent an OOM error from terminating the connection.
To prevent unnecessary connection terminations, add a holdoff mechanism
similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel
interrupts, but still allow die interrupts. The rules on which interrupts
are processed when are now a bit more complicated, so refactor
ProcessInterrupts() and the calls to it in signal handlers so that the
signal handlers always call it if ImmediateInterruptOK is set, and
ProcessInterrupts() can decide to not do anything if the other conditions
are not met.
Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund.
Backpatch to all supported versions.
Security: CVE-2015-0244
2015-02-02 17:08:45 +02:00
|
|
|
LockErrorCleanup();
|
2005-09-19 17:21:49 +00:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_QUERY_CANCELED),
|
|
|
|
errmsg("canceling statement due to statement timeout")));
|
2010-01-07 16:29:58 +00:00
|
|
|
}
|
|
|
|
if (IsAutoVacuumWorkerProcess())
|
|
|
|
{
|
Be more careful to not lose sync in the FE/BE protocol.
If any error occurred while we were in the middle of reading a protocol
message from the client, we could lose sync, and incorrectly try to
interpret a part of another message as a new protocol message. That will
usually lead to an "invalid frontend message" error that terminates the
connection. However, this is a security issue because an attacker might
be able to deliberately cause an error, inject a Query message in what's
supposed to be just user data, and have the server execute it.
We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other
operations that could ereport(ERROR) in the middle of processing a message,
but a query cancel interrupt or statement timeout could nevertheless cause
it to happen. Also, the V2 fastpath and COPY handling were not so careful.
It's very difficult to recover in the V2 COPY protocol, so we will just
terminate the connection on error. In practice, that's what happened
previously anyway, as we lost protocol sync.
To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set
whenever we're in the middle of reading a message. When it's set, we cannot
safely ERROR out and continue running, because we might've read only part
of a message. PqCommReadingMsg acts somewhat similarly to critical sections
in that if an error occurs while it's set, the error handler will force the
connection to be terminated, as if the error was FATAL. It's not
implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted
to PANIC in critical sections, because we want to be able to use
PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes
advantage of that to prevent an OOM error from terminating the connection.
To prevent unnecessary connection terminations, add a holdoff mechanism
similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel
interrupts, but still allow die interrupts. The rules on which interrupts
are processed when are now a bit more complicated, so refactor
ProcessInterrupts() and the calls to it in signal handlers so that the
signal handlers always call it if ImmediateInterruptOK is set, and
ProcessInterrupts() can decide to not do anything if the other conditions
are not met.
Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund.
Backpatch to all supported versions.
Security: CVE-2015-0244
2015-02-02 17:08:45 +02:00
|
|
|
LockErrorCleanup();
|
2007-12-06 14:32:54 +00:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_QUERY_CANCELED),
|
|
|
|
errmsg("canceling autovacuum task")));
|
2010-01-07 16:29:58 +00:00
|
|
|
}
|
2010-01-16 10:05:59 +00:00
|
|
|
if (RecoveryConflictPending)
|
Allow read only connections during recovery, known as Hot Standby.
Enabled by recovery_connections = on (default) and forcing archive recovery using a recovery.conf. Recovery processing now emulates the original transactions as they are replayed, providing full locking and MVCC behaviour for read only queries. Recovery must enter consistent state before connections are allowed, so there is a delay, typically short, before connections succeed. Replay of recovering transactions can conflict and in some cases deadlock with queries during recovery; these result in query cancellation after max_standby_delay seconds have expired. Infrastructure changes have minor effects on normal running, though introduce four new types of WAL record.
New test mode "make standbycheck" allows regression tests of static command behaviour on a standby server while in recovery. Typical and extreme dynamic behaviours have been checked via code inspection and manual testing. Few port specific behaviours have been utilised, though primary testing has been on Linux only so far.
This commit is the basic patch. Additional changes will follow in this release to enhance some aspects of behaviour, notably improved handling of conflicts, deadlock detection and query cancellation. Changes to VACUUM FULL are also required.
Simon Riggs, with significant and lengthy review by Heikki Linnakangas, including streamlined redesign of snapshot creation and two-phase commit.
Important contributions from Florian Pflug, Mark Kirkwood, Merlin Moncure, Greg Stark, Gianni Ciolli, Gabriele Bartolini, Hannu Krosing, Robert Haas, Tatsuo Ishii, Hiroyuki Yamada plus support and feedback from many other community members.
2009-12-19 01:32:45 +00:00
|
|
|
{
|
2010-01-21 09:30:36 +00:00
|
|
|
RecoveryConflictPending = false;
|
Be more careful to not lose sync in the FE/BE protocol.
If any error occurred while we were in the middle of reading a protocol
message from the client, we could lose sync, and incorrectly try to
interpret a part of another message as a new protocol message. That will
usually lead to an "invalid frontend message" error that terminates the
connection. However, this is a security issue because an attacker might
be able to deliberately cause an error, inject a Query message in what's
supposed to be just user data, and have the server execute it.
We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other
operations that could ereport(ERROR) in the middle of processing a message,
but a query cancel interrupt or statement timeout could nevertheless cause
it to happen. Also, the V2 fastpath and COPY handling were not so careful.
It's very difficult to recover in the V2 COPY protocol, so we will just
terminate the connection on error. In practice, that's what happened
previously anyway, as we lost protocol sync.
To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set
whenever we're in the middle of reading a message. When it's set, we cannot
safely ERROR out and continue running, because we might've read only part
of a message. PqCommReadingMsg acts somewhat similarly to critical sections
in that if an error occurs while it's set, the error handler will force the
connection to be terminated, as if the error was FATAL. It's not
implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted
to PANIC in critical sections, because we want to be able to use
PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes
advantage of that to prevent an OOM error from terminating the connection.
To prevent unnecessary connection terminations, add a holdoff mechanism
similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel
interrupts, but still allow die interrupts. The rules on which interrupts
are processed when are now a bit more complicated, so refactor
ProcessInterrupts() and the calls to it in signal handlers so that the
signal handlers always call it if ImmediateInterruptOK is set, and
ProcessInterrupts() can decide to not do anything if the other conditions
are not met.
Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund.
Backpatch to all supported versions.
Security: CVE-2015-0244
2015-02-02 17:08:45 +02:00
|
|
|
LockErrorCleanup();
|
2011-01-03 12:46:03 +01:00
|
|
|
pgstat_report_recovery_conflict(RecoveryConflictReason);
|
Be more careful to not lose sync in the FE/BE protocol.
If any error occurred while we were in the middle of reading a protocol
message from the client, we could lose sync, and incorrectly try to
interpret a part of another message as a new protocol message. That will
usually lead to an "invalid frontend message" error that terminates the
connection. However, this is a security issue because an attacker might
be able to deliberately cause an error, inject a Query message in what's
supposed to be just user data, and have the server execute it.
We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other
operations that could ereport(ERROR) in the middle of processing a message,
but a query cancel interrupt or statement timeout could nevertheless cause
it to happen. Also, the V2 fastpath and COPY handling were not so careful.
It's very difficult to recover in the V2 COPY protocol, so we will just
terminate the connection on error. In practice, that's what happened
previously anyway, as we lost protocol sync.
To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set
whenever we're in the middle of reading a message. When it's set, we cannot
safely ERROR out and continue running, because we might've read only part
of a message. PqCommReadingMsg acts somewhat similarly to critical sections
in that if an error occurs while it's set, the error handler will force the
connection to be terminated, as if the error was FATAL. It's not
implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted
to PANIC in critical sections, because we want to be able to use
PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes
advantage of that to prevent an OOM error from terminating the connection.
To prevent unnecessary connection terminations, add a holdoff mechanism
similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel
interrupts, but still allow die interrupts. The rules on which interrupts
are processed when are now a bit more complicated, so refactor
ProcessInterrupts() and the calls to it in signal handlers so that the
signal handlers always call it if ImmediateInterruptOK is set, and
ProcessInterrupts() can decide to not do anything if the other conditions
are not met.
Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund.
Backpatch to all supported versions.
Security: CVE-2015-0244
2015-02-02 17:08:45 +02:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
|
2010-02-26 02:01:40 +00:00
|
|
|
errmsg("canceling statement due to conflict with recovery"),
|
Be more careful to not lose sync in the FE/BE protocol.
If any error occurred while we were in the middle of reading a protocol
message from the client, we could lose sync, and incorrectly try to
interpret a part of another message as a new protocol message. That will
usually lead to an "invalid frontend message" error that terminates the
connection. However, this is a security issue because an attacker might
be able to deliberately cause an error, inject a Query message in what's
supposed to be just user data, and have the server execute it.
We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other
operations that could ereport(ERROR) in the middle of processing a message,
but a query cancel interrupt or statement timeout could nevertheless cause
it to happen. Also, the V2 fastpath and COPY handling were not so careful.
It's very difficult to recover in the V2 COPY protocol, so we will just
terminate the connection on error. In practice, that's what happened
previously anyway, as we lost protocol sync.
To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set
whenever we're in the middle of reading a message. When it's set, we cannot
safely ERROR out and continue running, because we might've read only part
of a message. PqCommReadingMsg acts somewhat similarly to critical sections
in that if an error occurs while it's set, the error handler will force the
connection to be terminated, as if the error was FATAL. It's not
implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted
to PANIC in critical sections, because we want to be able to use
PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes
advantage of that to prevent an OOM error from terminating the connection.
To prevent unnecessary connection terminations, add a holdoff mechanism
similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel
interrupts, but still allow die interrupts. The rules on which interrupts
are processed when are now a bit more complicated, so refactor
ProcessInterrupts() and the calls to it in signal handlers so that the
signal handlers always call it if ImmediateInterruptOK is set, and
ProcessInterrupts() can decide to not do anything if the other conditions
are not met.
Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund.
Backpatch to all supported versions.
Security: CVE-2015-0244
2015-02-02 17:08:45 +02:00
|
|
|
errdetail_recovery_conflict()));
|
2010-01-07 16:29:58 +00:00
|
|
|
}
|
Allow read only connections during recovery, known as Hot Standby.
Enabled by recovery_connections = on (default) and forcing archive recovery using a recovery.conf. Recovery processing now emulates the original transactions as they are replayed, providing full locking and MVCC behaviour for read only queries. Recovery must enter consistent state before connections are allowed, so there is a delay, typically short, before connections succeed. Replay of recovering transactions can conflict and in some cases deadlock with queries during recovery; these result in query cancellation after max_standby_delay seconds have expired. Infrastructure changes have minor effects on normal running, though introduce four new types of WAL record.
New test mode "make standbycheck" allows regression tests of static command behaviour on a standby server while in recovery. Typical and extreme dynamic behaviours have been checked via code inspection and manual testing. Few port specific behaviours have been utilised, though primary testing has been on Linux only so far.
This commit is the basic patch. Additional changes will follow in this release to enhance some aspects of behaviour, notably improved handling of conflicts, deadlock detection and query cancellation. Changes to VACUUM FULL are also required.
Simon Riggs, with significant and lengthy review by Heikki Linnakangas, including streamlined redesign of snapshot creation and two-phase commit.
Important contributions from Florian Pflug, Mark Kirkwood, Merlin Moncure, Greg Stark, Gianni Ciolli, Gabriele Bartolini, Hannu Krosing, Robert Haas, Tatsuo Ishii, Hiroyuki Yamada plus support and feedback from many other community members.
2009-12-19 01:32:45 +00:00
|
|
|
|
2010-01-07 16:29:58 +00:00
|
|
|
/*
|
2010-02-26 02:01:40 +00:00
|
|
|
* If we are reading a command from the client, just ignore the cancel
|
|
|
|
* request --- sending an extra error message won't accomplish
|
|
|
|
* anything. Otherwise, go ahead and throw the error.
|
2010-01-07 16:29:58 +00:00
|
|
|
*/
|
|
|
|
if (!DoingCommandRead)
|
|
|
|
{
|
Be more careful to not lose sync in the FE/BE protocol.
If any error occurred while we were in the middle of reading a protocol
message from the client, we could lose sync, and incorrectly try to
interpret a part of another message as a new protocol message. That will
usually lead to an "invalid frontend message" error that terminates the
connection. However, this is a security issue because an attacker might
be able to deliberately cause an error, inject a Query message in what's
supposed to be just user data, and have the server execute it.
We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other
operations that could ereport(ERROR) in the middle of processing a message,
but a query cancel interrupt or statement timeout could nevertheless cause
it to happen. Also, the V2 fastpath and COPY handling were not so careful.
It's very difficult to recover in the V2 COPY protocol, so we will just
terminate the connection on error. In practice, that's what happened
previously anyway, as we lost protocol sync.
To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set
whenever we're in the middle of reading a message. When it's set, we cannot
safely ERROR out and continue running, because we might've read only part
of a message. PqCommReadingMsg acts somewhat similarly to critical sections
in that if an error occurs while it's set, the error handler will force the
connection to be terminated, as if the error was FATAL. It's not
implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted
to PANIC in critical sections, because we want to be able to use
PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes
advantage of that to prevent an OOM error from terminating the connection.
To prevent unnecessary connection terminations, add a holdoff mechanism
similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel
interrupts, but still allow die interrupts. The rules on which interrupts
are processed when are now a bit more complicated, so refactor
ProcessInterrupts() and the calls to it in signal handlers so that the
signal handlers always call it if ImmediateInterruptOK is set, and
ProcessInterrupts() can decide to not do anything if the other conditions
are not met.
Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund.
Backpatch to all supported versions.
Security: CVE-2015-0244
2015-02-02 17:08:45 +02:00
|
|
|
LockErrorCleanup();
|
2005-09-19 17:21:49 +00:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_QUERY_CANCELED),
|
|
|
|
errmsg("canceling statement due to user request")));
|
Allow read only connections during recovery, known as Hot Standby.
Enabled by recovery_connections = on (default) and forcing archive recovery using a recovery.conf. Recovery processing now emulates the original transactions as they are replayed, providing full locking and MVCC behaviour for read only queries. Recovery must enter consistent state before connections are allowed, so there is a delay, typically short, before connections succeed. Replay of recovering transactions can conflict and in some cases deadlock with queries during recovery; these result in query cancellation after max_standby_delay seconds have expired. Infrastructure changes have minor effects on normal running, though introduce four new types of WAL record.
New test mode "make standbycheck" allows regression tests of static command behaviour on a standby server while in recovery. Typical and extreme dynamic behaviours have been checked via code inspection and manual testing. Few port specific behaviours have been utilised, though primary testing has been on Linux only so far.
This commit is the basic patch. Additional changes will follow in this release to enhance some aspects of behaviour, notably improved handling of conflicts, deadlock detection and query cancellation. Changes to VACUUM FULL are also required.
Simon Riggs, with significant and lengthy review by Heikki Linnakangas, including streamlined redesign of snapshot creation and two-phase commit.
Important contributions from Florian Pflug, Mark Kirkwood, Merlin Moncure, Greg Stark, Gianni Ciolli, Gabriele Bartolini, Hannu Krosing, Robert Haas, Tatsuo Ishii, Hiroyuki Yamada plus support and feedback from many other community members.
2009-12-19 01:32:45 +00:00
|
|
|
}
|
2001-01-14 05:08:17 +00:00
|
|
|
}
|
Introduce and use infrastructure for interrupt processing during client reads.
Up to now large swathes of backend code ran inside signal handlers
while reading commands from the client, to allow for speedy reaction to
asynchronous events. Most prominently shared invalidation and NOTIFY
handling. That means that complex code like the starting/stopping of
transactions is run in signal handlers... The required code was
fragile and verbose, and is likely to contain bugs.
That approach also severely limited what could be done while
communicating with the client. As the read might be from within
openssl it wasn't safely possible to trigger an error, e.g. to cancel
a backend in idle-in-transaction state. We did that in some cases,
namely fatal errors, nonetheless.
Now that FE/BE communication in the backend employs non-blocking
sockets and latches to block, we can quite simply interrupt reads from
signal handlers by setting the latch. That allows us to signal an
interrupted read, which is supposed to be retried after returning from
within the ssl library.
As signal handlers now only need to set the latch to guarantee timely
interrupt processing, remove a fair amount of complicated & fragile
code from async.c and sinval.c.
We could now actually start to process some kinds of interrupts, like
sinval ones, more often that before, but that seems better done
separately.
This work will hopefully allow to handle cases like being blocked by
sending data, interrupting idle transactions and similar to be
implemented without too much effort. In addition to allowing getting
rid of ImmediateInterruptOK, that is.
Author: Andres Freund
Reviewed-By: Heikki Linnakangas
2015-02-03 22:25:20 +01:00
|
|
|
|
2016-03-16 11:30:45 -04:00
|
|
|
if (IdleInTransactionSessionTimeoutPending)
|
|
|
|
{
|
|
|
|
/* Has the timeout setting changed since last we looked? */
|
|
|
|
if (IdleInTransactionSessionTimeout > 0)
|
|
|
|
ereport(FATAL,
|
|
|
|
(errcode(ERRCODE_IDLE_IN_TRANSACTION_SESSION_TIMEOUT),
|
|
|
|
errmsg("terminating connection due to idle-in-transaction timeout")));
|
|
|
|
else
|
|
|
|
IdleInTransactionSessionTimeoutPending = false;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
Create an infrastructure for parallel computation in PostgreSQL.
This does four basic things. First, it provides convenience routines
to coordinate the startup and shutdown of parallel workers. Second,
it synchronizes various pieces of state (e.g. GUCs, combo CID
mappings, transaction snapshot) from the parallel group leader to the
worker processes. Third, it prohibits various operations that would
result in unsafe changes to that state while parallelism is active.
Finally, it propagates events that would result in an ErrorResponse,
NoticeResponse, or NotifyResponse message being sent to the client
from the parallel workers back to the master, from which they can then
be sent on to the client.
Robert Haas, Amit Kapila, Noah Misch, Rushabh Lathia, Jeevan Chalke.
Suggestions and review from Andres Freund, Heikki Linnakangas, Noah
Misch, Simon Riggs, Euler Taveira, and Jim Nasby.
2015-04-30 15:02:14 -04:00
|
|
|
if (ParallelMessagePending)
|
|
|
|
HandleParallelMessages();
|
2001-01-14 05:08:17 +00:00
|
|
|
}
|
|
|
|
|
2000-11-14 18:11:32 +00:00
|
|
|
|
2010-11-06 19:36:29 -04:00
|
|
|
/*
|
|
|
|
* IA64-specific code to fetch the AR.BSP register for stack depth checks.
|
|
|
|
*
|
2015-08-31 14:43:10 -04:00
|
|
|
* We currently support gcc, icc, and HP-UX's native compiler here.
|
2015-08-31 18:10:04 -04:00
|
|
|
*
|
|
|
|
* Note: while icc accepts gcc asm blocks on x86[_64], this is not true on
|
|
|
|
* ia64 (at least not in icc versions before 12.x). So we have to carry a
|
|
|
|
* separate implementation for it.
|
2010-11-06 19:36:29 -04:00
|
|
|
*/
|
|
|
|
#if defined(__ia64__) || defined(__ia64)
|
|
|
|
|
2015-08-31 14:43:10 -04:00
|
|
|
#if defined(__hpux) && !defined(__GNUC__) && !defined(__INTEL_COMPILER)
|
|
|
|
/* Assume it's HP-UX native compiler */
|
2011-04-13 11:43:22 +03:00
|
|
|
#include <ia64/sys/inline.h>
|
|
|
|
#define ia64_get_bsp() ((char *) (_Asm_mov_from_ar(_AREG_BSP, _NO_FENCE)))
|
2015-08-31 18:10:04 -04:00
|
|
|
#elif defined(__INTEL_COMPILER)
|
|
|
|
/* icc */
|
|
|
|
#include <asm/ia64regs.h>
|
|
|
|
#define ia64_get_bsp() ((char *) __getReg(_IA64_REG_AR_BSP))
|
2011-04-13 11:43:22 +03:00
|
|
|
#else
|
2015-08-31 18:10:04 -04:00
|
|
|
/* gcc */
|
2010-11-06 19:36:29 -04:00
|
|
|
static __inline__ char *
|
|
|
|
ia64_get_bsp(void)
|
|
|
|
{
|
|
|
|
char *ret;
|
|
|
|
|
|
|
|
/* the ;; is a "stop", seems to be required before fetching BSP */
|
2017-06-21 14:39:04 -04:00
|
|
|
__asm__ __volatile__(
|
|
|
|
";;\n"
|
|
|
|
" mov %0=ar.bsp \n"
|
|
|
|
: "=r"(ret));
|
2015-08-31 14:43:10 -04:00
|
|
|
|
2011-04-10 11:42:00 -04:00
|
|
|
return ret;
|
2010-11-06 19:36:29 -04:00
|
|
|
}
|
2011-04-13 11:43:22 +03:00
|
|
|
#endif
|
2011-04-10 11:42:00 -04:00
|
|
|
#endif /* IA64 */
|
2010-11-06 19:36:29 -04:00
|
|
|
|
|
|
|
|
Do stack-depth checking in all postmaster children.
We used to only initialize the stack base pointer when starting up a regular
backend, not in other processes. In particular, autovacuum workers can run
arbitrary user code, and without stack-depth checking, infinite recursion
in e.g an index expression will bring down the whole cluster.
The comment about PL/Java using set_stack_base() is not yet true. As the
code stands, PL/java still modifies the stack_base_ptr variable directly.
However, it's been discussed in the PL/Java mailing list that it should be
changed to use the function, because PL/Java is currently oblivious to the
register stack used on Itanium. There's another issues with PL/Java, namely
that the stack base pointer it sets is not really the base of the stack, it
could be something close to the bottom of the stack. That's a separate issue
that might need some further changes to this code, but that's a different
story.
Backpatch to all supported releases.
2012-04-08 18:28:12 +03:00
|
|
|
/*
|
|
|
|
* set_stack_base: set up reference point for stack depth checking
|
|
|
|
*
|
|
|
|
* Returns the old reference point, if any.
|
|
|
|
*/
|
|
|
|
pg_stack_base_t
|
|
|
|
set_stack_base(void)
|
|
|
|
{
|
|
|
|
char stack_base;
|
|
|
|
pg_stack_base_t old;
|
|
|
|
|
|
|
|
#if defined(__ia64__) || defined(__ia64)
|
|
|
|
old.stack_base_ptr = stack_base_ptr;
|
|
|
|
old.register_stack_base_ptr = register_stack_base_ptr;
|
|
|
|
#else
|
|
|
|
old = stack_base_ptr;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Set up reference point for stack depth checking */
|
|
|
|
stack_base_ptr = &stack_base;
|
|
|
|
#if defined(__ia64__) || defined(__ia64)
|
|
|
|
register_stack_base_ptr = ia64_get_bsp();
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return old;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* restore_stack_base: restore reference point for stack depth checking
|
|
|
|
*
|
|
|
|
* This can be used after set_stack_base() to restore the old value. This
|
|
|
|
* is currently only used in PL/Java. When PL/Java calls a backend function
|
|
|
|
* from different thread, the thread's stack is at a different location than
|
|
|
|
* the main thread's stack, so it sets the base pointer before the call, and
|
|
|
|
* restores it afterwards.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
restore_stack_base(pg_stack_base_t base)
|
|
|
|
{
|
|
|
|
#if defined(__ia64__) || defined(__ia64)
|
|
|
|
stack_base_ptr = base.stack_base_ptr;
|
|
|
|
register_stack_base_ptr = base.register_stack_base_ptr;
|
|
|
|
#else
|
|
|
|
stack_base_ptr = base;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2004-03-24 22:40:29 +00:00
|
|
|
/*
|
2015-10-02 14:51:58 -04:00
|
|
|
* check_stack_depth/stack_is_too_deep: check for excessively deep recursion
|
2004-03-24 22:40:29 +00:00
|
|
|
*
|
|
|
|
* This should be called someplace in any recursive routine that might possibly
|
|
|
|
* recurse deep enough to overflow the stack. Most Unixen treat stack
|
|
|
|
* overflow as an unrecoverable SIGSEGV, so we want to error out ourselves
|
2006-10-07 19:25:29 +00:00
|
|
|
* before hitting the hardware limit.
|
2015-10-02 14:51:58 -04:00
|
|
|
*
|
|
|
|
* check_stack_depth() just throws an error summarily. stack_is_too_deep()
|
|
|
|
* can be used by code that wants to handle the error condition itself.
|
2004-03-24 22:40:29 +00:00
|
|
|
*/
|
|
|
|
void
|
|
|
|
check_stack_depth(void)
|
2015-10-02 14:51:58 -04:00
|
|
|
{
|
|
|
|
if (stack_is_too_deep())
|
|
|
|
{
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_STATEMENT_TOO_COMPLEX),
|
|
|
|
errmsg("stack depth limit exceeded"),
|
|
|
|
errhint("Increase the configuration parameter \"max_stack_depth\" (currently %dkB), "
|
|
|
|
"after ensuring the platform's stack depth limit is adequate.",
|
|
|
|
max_stack_depth)));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
stack_is_too_deep(void)
|
2004-03-24 22:40:29 +00:00
|
|
|
{
|
2004-08-29 05:07:03 +00:00
|
|
|
char stack_top_loc;
|
2006-10-07 16:43:28 +00:00
|
|
|
long stack_depth;
|
2004-03-24 22:40:29 +00:00
|
|
|
|
|
|
|
/*
|
2012-05-02 10:20:27 +03:00
|
|
|
* Compute distance from reference point to my local variables
|
2004-03-24 22:40:29 +00:00
|
|
|
*/
|
2006-10-07 16:43:28 +00:00
|
|
|
stack_depth = (long) (stack_base_ptr - &stack_top_loc);
|
2004-08-29 05:07:03 +00:00
|
|
|
|
2004-03-24 22:40:29 +00:00
|
|
|
/*
|
2005-10-15 02:49:52 +00:00
|
|
|
* Take abs value, since stacks grow up on some machines, down on others
|
2004-03-24 22:40:29 +00:00
|
|
|
*/
|
|
|
|
if (stack_depth < 0)
|
|
|
|
stack_depth = -stack_depth;
|
2004-08-29 05:07:03 +00:00
|
|
|
|
2004-03-24 22:40:29 +00:00
|
|
|
/*
|
|
|
|
* Trouble?
|
|
|
|
*
|
2005-11-22 18:17:34 +00:00
|
|
|
* The test on stack_base_ptr prevents us from erroring out if called
|
|
|
|
* during process setup or in a non-backend process. Logically it should
|
|
|
|
* be done first, but putting it here avoids wasting cycles during normal
|
|
|
|
* cases.
|
2004-03-24 22:40:29 +00:00
|
|
|
*/
|
|
|
|
if (stack_depth > max_stack_depth_bytes &&
|
|
|
|
stack_base_ptr != NULL)
|
2015-10-02 14:51:58 -04:00
|
|
|
return true;
|
2010-11-06 19:36:29 -04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* On IA64 there is a separate "register" stack that requires its own
|
|
|
|
* independent check. For this, we have to measure the change in the
|
|
|
|
* "BSP" pointer from PostgresMain to here. Logic is just as above,
|
|
|
|
* except that we know IA64's register stack grows up.
|
|
|
|
*
|
|
|
|
* Note we assume that the same max_stack_depth applies to both stacks.
|
|
|
|
*/
|
|
|
|
#if defined(__ia64__) || defined(__ia64)
|
|
|
|
stack_depth = (long) (ia64_get_bsp() - register_stack_base_ptr);
|
|
|
|
|
|
|
|
if (stack_depth > max_stack_depth_bytes &&
|
|
|
|
register_stack_base_ptr != NULL)
|
2015-10-02 14:51:58 -04:00
|
|
|
return true;
|
2011-04-10 11:42:00 -04:00
|
|
|
#endif /* IA64 */
|
2015-10-02 14:51:58 -04:00
|
|
|
|
|
|
|
return false;
|
2004-03-24 22:40:29 +00:00
|
|
|
}
|
|
|
|
|
2011-04-07 00:11:01 -04:00
|
|
|
/* GUC check hook for max_stack_depth */
|
2004-03-24 22:40:29 +00:00
|
|
|
bool
|
2011-04-07 00:11:01 -04:00
|
|
|
check_max_stack_depth(int *newval, void **extra, GucSource source)
|
2004-03-24 22:40:29 +00:00
|
|
|
{
|
2011-04-07 00:11:01 -04:00
|
|
|
long newval_bytes = *newval * 1024L;
|
2006-10-07 19:25:29 +00:00
|
|
|
long stack_rlimit = get_stack_depth_rlimit();
|
|
|
|
|
|
|
|
if (stack_rlimit > 0 && newval_bytes > stack_rlimit - STACK_DEPTH_SLOP)
|
|
|
|
{
|
2011-04-07 00:11:01 -04:00
|
|
|
GUC_check_errdetail("\"max_stack_depth\" must not exceed %ldkB.",
|
|
|
|
(stack_rlimit - STACK_DEPTH_SLOP) / 1024L);
|
|
|
|
GUC_check_errhint("Increase the platform's stack depth limit via \"ulimit -s\" or local equivalent.");
|
2006-10-07 19:25:29 +00:00
|
|
|
return false;
|
|
|
|
}
|
2004-03-24 22:40:29 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2011-04-07 00:11:01 -04:00
|
|
|
/* GUC assign hook for max_stack_depth */
|
|
|
|
void
|
|
|
|
assign_max_stack_depth(int newval, void *extra)
|
|
|
|
{
|
|
|
|
long newval_bytes = newval * 1024L;
|
|
|
|
|
|
|
|
max_stack_depth_bytes = newval_bytes;
|
|
|
|
}
|
|
|
|
|
2004-03-24 22:40:29 +00:00
|
|
|
|
2004-11-14 19:35:35 +00:00
|
|
|
/*
|
|
|
|
* set_debug_options --- apply "-d N" command line option
|
|
|
|
*
|
|
|
|
* -d is not quite the same as setting log_min_messages because it enables
|
|
|
|
* other output options.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
set_debug_options(int debug_flag, GucContext context, GucSource source)
|
|
|
|
{
|
|
|
|
if (debug_flag > 0)
|
|
|
|
{
|
|
|
|
char debugstr[64];
|
|
|
|
|
|
|
|
sprintf(debugstr, "debug%d", debug_flag);
|
|
|
|
SetConfigOption("log_min_messages", debugstr, context, source);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
SetConfigOption("log_min_messages", "notice", context, source);
|
|
|
|
|
|
|
|
if (debug_flag >= 1 && context == PGC_POSTMASTER)
|
|
|
|
{
|
|
|
|
SetConfigOption("log_connections", "true", context, source);
|
|
|
|
SetConfigOption("log_disconnections", "true", context, source);
|
|
|
|
}
|
|
|
|
if (debug_flag >= 2)
|
|
|
|
SetConfigOption("log_statement", "all", context, source);
|
|
|
|
if (debug_flag >= 3)
|
|
|
|
SetConfigOption("debug_print_parse", "true", context, source);
|
|
|
|
if (debug_flag >= 4)
|
|
|
|
SetConfigOption("debug_print_plan", "true", context, source);
|
|
|
|
if (debug_flag >= 5)
|
|
|
|
SetConfigOption("debug_print_rewritten", "true", context, source);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-01-05 10:07:46 +00:00
|
|
|
bool
|
|
|
|
set_plan_disabling_options(const char *arg, GucContext context, GucSource source)
|
|
|
|
{
|
2011-10-07 20:13:02 -04:00
|
|
|
const char *tmp = NULL;
|
2006-01-05 10:07:46 +00:00
|
|
|
|
|
|
|
switch (arg[0])
|
|
|
|
{
|
2006-10-04 00:30:14 +00:00
|
|
|
case 's': /* seqscan */
|
2006-01-05 10:07:46 +00:00
|
|
|
tmp = "enable_seqscan";
|
|
|
|
break;
|
2006-10-04 00:30:14 +00:00
|
|
|
case 'i': /* indexscan */
|
2006-01-05 10:07:46 +00:00
|
|
|
tmp = "enable_indexscan";
|
|
|
|
break;
|
2011-10-07 20:13:02 -04:00
|
|
|
case 'o': /* indexonlyscan */
|
|
|
|
tmp = "enable_indexonlyscan";
|
|
|
|
break;
|
2006-10-04 00:30:14 +00:00
|
|
|
case 'b': /* bitmapscan */
|
2006-01-05 10:07:46 +00:00
|
|
|
tmp = "enable_bitmapscan";
|
|
|
|
break;
|
2006-10-04 00:30:14 +00:00
|
|
|
case 't': /* tidscan */
|
2006-01-05 10:07:46 +00:00
|
|
|
tmp = "enable_tidscan";
|
|
|
|
break;
|
2006-10-04 00:30:14 +00:00
|
|
|
case 'n': /* nestloop */
|
2006-01-05 10:07:46 +00:00
|
|
|
tmp = "enable_nestloop";
|
|
|
|
break;
|
2006-10-04 00:30:14 +00:00
|
|
|
case 'm': /* mergejoin */
|
2006-01-05 10:07:46 +00:00
|
|
|
tmp = "enable_mergejoin";
|
|
|
|
break;
|
2006-10-04 00:30:14 +00:00
|
|
|
case 'h': /* hashjoin */
|
2006-01-05 10:07:46 +00:00
|
|
|
tmp = "enable_hashjoin";
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (tmp)
|
|
|
|
{
|
|
|
|
SetConfigOption(tmp, "false", context, source);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
const char *
|
|
|
|
get_stats_option_name(const char *arg)
|
|
|
|
{
|
|
|
|
switch (arg[0])
|
|
|
|
{
|
|
|
|
case 'p':
|
2006-10-04 00:30:14 +00:00
|
|
|
if (optarg[1] == 'a') /* "parser" */
|
2006-01-05 10:07:46 +00:00
|
|
|
return "log_parser_stats";
|
2006-10-04 00:30:14 +00:00
|
|
|
else if (optarg[1] == 'l') /* "planner" */
|
2006-01-05 10:07:46 +00:00
|
|
|
return "log_planner_stats";
|
|
|
|
break;
|
|
|
|
|
2006-10-04 00:30:14 +00:00
|
|
|
case 'e': /* "executor" */
|
2006-01-05 10:07:46 +00:00
|
|
|
return "log_executor_stats";
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
1996-07-09 06:22:35 +00:00
|
|
|
/* ----------------------------------------------------------------
|
2009-08-29 19:26:52 +00:00
|
|
|
* process_postgres_switches
|
|
|
|
* Parse command line arguments for PostgresMain
|
1999-05-22 17:47:54 +00:00
|
|
|
*
|
2009-08-29 19:26:52 +00:00
|
|
|
* This is called twice, once for the "secure" options coming from the
|
|
|
|
* postmaster or command line, and once for the "insecure" options coming
|
|
|
|
* from the client's startup packet. The latter have the same syntax but
|
|
|
|
* may be restricted in what they can do.
|
|
|
|
*
|
2009-09-01 00:09:42 +00:00
|
|
|
* argv[0] is ignored in either case (it's assumed to be the program name).
|
2009-08-29 19:26:52 +00:00
|
|
|
*
|
|
|
|
* ctx is PGC_POSTMASTER for secure options, PGC_BACKEND for insecure options
|
2014-09-13 21:01:49 -04:00
|
|
|
* coming from the client, or PGC_SU_BACKEND for insecure options coming from
|
2009-08-29 19:26:52 +00:00
|
|
|
* a superuser client.
|
|
|
|
*
|
2013-04-01 14:00:51 -04:00
|
|
|
* If a database name is present in the command line arguments, it's
|
|
|
|
* returned into *dbname (this is allowed only if *dbname is initially NULL).
|
1996-07-09 06:22:35 +00:00
|
|
|
* ----------------------------------------------------------------
|
|
|
|
*/
|
2013-04-01 14:00:51 -04:00
|
|
|
void
|
|
|
|
process_postgres_switches(int argc, char *argv[], GucContext ctx,
|
|
|
|
const char **dbname)
|
1996-07-09 06:22:35 +00:00
|
|
|
{
|
2009-08-29 19:26:52 +00:00
|
|
|
bool secure = (ctx == PGC_POSTMASTER);
|
1998-09-01 04:40:42 +00:00
|
|
|
int errs = 0;
|
2002-02-23 01:31:37 +00:00
|
|
|
GucSource gucsource;
|
2009-08-29 19:26:52 +00:00
|
|
|
int flag;
|
2004-03-24 22:40:29 +00:00
|
|
|
|
2009-08-29 19:26:52 +00:00
|
|
|
if (secure)
|
2004-05-28 05:13:32 +00:00
|
|
|
{
|
2010-02-26 02:01:40 +00:00
|
|
|
gucsource = PGC_S_ARGV; /* switches came from command line */
|
1999-05-22 17:47:54 +00:00
|
|
|
|
2009-08-29 19:26:52 +00:00
|
|
|
/* Ignore the initial --single argument, if present */
|
|
|
|
if (argc > 1 && strcmp(argv[1], "--single") == 0)
|
|
|
|
{
|
|
|
|
argv++;
|
|
|
|
argc--;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
2006-06-18 15:38:37 +00:00
|
|
|
{
|
2009-08-29 19:26:52 +00:00
|
|
|
gucsource = PGC_S_CLIENT; /* switches came from client */
|
2006-06-18 15:38:37 +00:00
|
|
|
}
|
|
|
|
|
2012-03-11 01:52:05 +02:00
|
|
|
#ifdef HAVE_INT_OPTERR
|
2012-06-10 15:20:04 -04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Turn this off because it's either printed to stderr and not the log
|
|
|
|
* where we'd want it, or argv[0] is now "--single", which would make for
|
|
|
|
* a weird error message. We print our own error message below.
|
|
|
|
*/
|
2012-03-11 01:52:05 +02:00
|
|
|
opterr = 0;
|
|
|
|
#endif
|
|
|
|
|
2007-01-04 00:57:51 +00:00
|
|
|
/*
|
2014-05-06 12:12:18 -04:00
|
|
|
* Parse command-line options. CAUTION: keep this in sync with
|
2007-11-15 21:14:46 +00:00
|
|
|
* postmaster/postmaster.c (the option sets should not conflict) and with
|
|
|
|
* the common help() function in main/main.c.
|
2007-01-04 00:57:51 +00:00
|
|
|
*/
|
2014-06-20 11:06:42 +02:00
|
|
|
while ((flag = getopt(argc, argv, "B:bc:C:D:d:EeFf:h:ijk:lN:nOo:Pp:r:S:sTt:v:W:-:")) != -1)
|
2004-11-14 19:35:35 +00:00
|
|
|
{
|
1997-09-07 05:04:48 +00:00
|
|
|
switch (flag)
|
|
|
|
{
|
1997-09-08 02:41:22 +00:00
|
|
|
case 'B':
|
2002-02-23 01:31:37 +00:00
|
|
|
SetConfigOption("shared_buffers", optarg, ctx, gucsource);
|
1997-09-08 02:41:22 +00:00
|
|
|
break;
|
1997-09-07 05:04:48 +00:00
|
|
|
|
2011-04-25 12:00:21 -04:00
|
|
|
case 'b':
|
|
|
|
/* Undocumented flag used for binary upgrades */
|
2013-04-01 14:00:51 -04:00
|
|
|
if (secure)
|
|
|
|
IsBinaryUpgrade = true;
|
2011-04-25 12:00:21 -04:00
|
|
|
break;
|
|
|
|
|
2011-10-06 09:38:39 -04:00
|
|
|
case 'C':
|
|
|
|
/* ignored for consistency with the postmaster */
|
|
|
|
break;
|
|
|
|
|
2006-01-05 10:07:46 +00:00
|
|
|
case 'D':
|
1999-05-22 17:47:54 +00:00
|
|
|
if (secure)
|
2009-08-29 19:26:52 +00:00
|
|
|
userDoption = strdup(optarg);
|
1998-10-13 19:51:50 +00:00
|
|
|
break;
|
1997-09-07 05:04:48 +00:00
|
|
|
|
2006-01-05 10:07:46 +00:00
|
|
|
case 'd':
|
2009-08-29 19:26:52 +00:00
|
|
|
set_debug_options(atoi(optarg), ctx, gucsource);
|
1997-09-07 05:04:48 +00:00
|
|
|
break;
|
1997-09-08 02:41:22 +00:00
|
|
|
|
|
|
|
case 'E':
|
2013-04-01 14:00:51 -04:00
|
|
|
if (secure)
|
|
|
|
EchoQuery = true;
|
1997-09-07 05:04:48 +00:00
|
|
|
break;
|
1997-09-08 02:41:22 +00:00
|
|
|
|
|
|
|
case 'e':
|
2002-05-17 01:19:19 +00:00
|
|
|
SetConfigOption("datestyle", "euro", ctx, gucsource);
|
1997-09-07 05:04:48 +00:00
|
|
|
break;
|
1997-09-08 02:41:22 +00:00
|
|
|
|
|
|
|
case 'F':
|
2002-02-23 01:31:37 +00:00
|
|
|
SetConfigOption("fsync", "false", ctx, gucsource);
|
1997-09-07 05:04:48 +00:00
|
|
|
break;
|
1997-09-08 02:41:22 +00:00
|
|
|
|
|
|
|
case 'f':
|
2006-01-05 10:07:46 +00:00
|
|
|
if (!set_plan_disabling_options(optarg, ctx, gucsource))
|
|
|
|
errs++;
|
|
|
|
break;
|
2001-03-22 06:16:21 +00:00
|
|
|
|
2006-01-05 10:07:46 +00:00
|
|
|
case 'h':
|
|
|
|
SetConfigOption("listen_addresses", optarg, ctx, gucsource);
|
1997-09-07 05:04:48 +00:00
|
|
|
break;
|
|
|
|
|
2006-01-05 10:07:46 +00:00
|
|
|
case 'i':
|
|
|
|
SetConfigOption("listen_addresses", "*", ctx, gucsource);
|
|
|
|
break;
|
2001-03-22 06:16:21 +00:00
|
|
|
|
2006-01-05 10:07:46 +00:00
|
|
|
case 'j':
|
2013-04-01 14:00:51 -04:00
|
|
|
if (secure)
|
Adjust behavior of single-user -j mode for better initdb error reporting.
Previously, -j caused the entire input file to be read in and executed as
a single command string. That's undesirable, not least because any error
causes the entire file to be regurgitated as the "failing query". Some
experimentation suggests a better rule: end the command string when we see
a semicolon immediately followed by two newlines, ie, an empty line after
a query. This serves nicely to break up the existing examples such as
information_schema.sql and system_views.sql. A limitation is that it's
no longer possible to write such a sequence within a string literal or
multiline comment in a file meant to be read with -j; but there are no
instances of such a problem within the data currently used by initdb.
(If someone does make such a mistake in future, it'll be obvious because
they'll get an unterminated-literal or unterminated-comment syntax error.)
Other than that, there shouldn't be any negative consequences; you're not
forced to end statements that way, it's just a better idea in most cases.
In passing, remove src/include/tcop/tcopdebug.h, which is dead code
because it's not included anywhere, and hasn't been for more than
ten years. One of the debug-support symbols it purported to describe
has been unreferenced for at least the same amount of time, and the
other is removed by this commit on the grounds that it was useless:
forcing -j mode all the time would have broken initdb. The lack of
complaints about that, or about the missing inclusion, shows that
no one has tried to use TCOP_DONTUSENEWLINE in many years.
2015-12-17 19:34:15 -05:00
|
|
|
UseSemiNewlineNewline = true;
|
1997-09-08 02:41:22 +00:00
|
|
|
break;
|
1997-09-07 05:04:48 +00:00
|
|
|
|
2006-01-05 10:07:46 +00:00
|
|
|
case 'k':
|
2012-08-10 17:26:44 -04:00
|
|
|
SetConfigOption("unix_socket_directories", optarg, ctx, gucsource);
|
2006-01-05 10:07:46 +00:00
|
|
|
break;
|
2001-03-22 06:16:21 +00:00
|
|
|
|
2006-01-05 10:07:46 +00:00
|
|
|
case 'l':
|
|
|
|
SetConfigOption("ssl", "true", ctx, gucsource);
|
1999-03-17 22:53:31 +00:00
|
|
|
break;
|
|
|
|
|
2006-01-05 10:07:46 +00:00
|
|
|
case 'N':
|
|
|
|
SetConfigOption("max_connections", optarg, ctx, gucsource);
|
|
|
|
break;
|
2001-03-22 06:16:21 +00:00
|
|
|
|
2006-01-05 10:07:46 +00:00
|
|
|
case 'n':
|
|
|
|
/* ignored for consistency with postmaster */
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 'O':
|
|
|
|
SetConfigOption("allow_system_table_mods", "true", ctx, gucsource);
|
2000-02-18 09:30:20 +00:00
|
|
|
break;
|
|
|
|
|
1999-05-01 17:16:25 +00:00
|
|
|
case 'o':
|
2006-01-05 10:07:46 +00:00
|
|
|
errs++;
|
|
|
|
break;
|
2001-03-22 06:16:21 +00:00
|
|
|
|
2006-01-05 10:07:46 +00:00
|
|
|
case 'P':
|
|
|
|
SetConfigOption("ignore_system_indexes", "true", ctx, gucsource);
|
1999-05-01 17:16:25 +00:00
|
|
|
break;
|
|
|
|
|
1999-05-22 17:47:54 +00:00
|
|
|
case 'p':
|
2006-01-05 10:07:46 +00:00
|
|
|
SetConfigOption("port", optarg, ctx, gucsource);
|
|
|
|
break;
|
2004-08-29 05:07:03 +00:00
|
|
|
|
2006-01-05 10:07:46 +00:00
|
|
|
case 'r':
|
|
|
|
/* send output (stdout and stderr) to the given file */
|
1999-05-22 17:47:54 +00:00
|
|
|
if (secure)
|
2007-02-10 14:58:55 +00:00
|
|
|
strlcpy(OutputFileName, optarg, MAXPGPATH);
|
1997-09-08 02:41:22 +00:00
|
|
|
break;
|
1997-09-07 05:04:48 +00:00
|
|
|
|
1997-09-08 02:41:22 +00:00
|
|
|
case 'S':
|
2004-02-03 17:34:04 +00:00
|
|
|
SetConfigOption("work_mem", optarg, ctx, gucsource);
|
1997-09-07 05:04:48 +00:00
|
|
|
break;
|
1997-09-08 02:41:22 +00:00
|
|
|
|
|
|
|
case 's':
|
2009-08-29 19:26:52 +00:00
|
|
|
SetConfigOption("log_statement_stats", "true", ctx, gucsource);
|
1998-08-25 21:34:10 +00:00
|
|
|
break;
|
|
|
|
|
2006-01-05 10:07:46 +00:00
|
|
|
case 'T':
|
2011-10-06 09:38:39 -04:00
|
|
|
/* ignored for consistency with the postmaster */
|
2006-01-05 10:07:46 +00:00
|
|
|
break;
|
|
|
|
|
1997-09-08 02:41:22 +00:00
|
|
|
case 't':
|
2004-11-24 19:51:05 +00:00
|
|
|
{
|
2006-10-04 00:30:14 +00:00
|
|
|
const char *tmp = get_stats_option_name(optarg);
|
|
|
|
|
|
|
|
if (tmp)
|
2009-08-29 19:26:52 +00:00
|
|
|
SetConfigOption(tmp, "true", ctx, gucsource);
|
2004-11-24 19:51:05 +00:00
|
|
|
else
|
2006-10-04 00:30:14 +00:00
|
|
|
errs++;
|
|
|
|
break;
|
2004-11-24 19:51:05 +00:00
|
|
|
}
|
1997-09-07 05:04:48 +00:00
|
|
|
|
1998-01-26 01:42:53 +00:00
|
|
|
case 'v':
|
2010-02-26 02:01:40 +00:00
|
|
|
|
2009-08-28 18:23:53 +00:00
|
|
|
/*
|
|
|
|
* -v is no longer used in normal operation, since
|
2010-02-26 02:01:40 +00:00
|
|
|
* FrontendProtocol is already set before we get here. We keep
|
|
|
|
* the switch only for possible use in standalone operation,
|
|
|
|
* in case we ever support using normal FE/BE protocol with a
|
|
|
|
* standalone backend.
|
2009-08-28 18:23:53 +00:00
|
|
|
*/
|
1999-05-22 17:47:54 +00:00
|
|
|
if (secure)
|
|
|
|
FrontendProtocol = (ProtocolVersion) atoi(optarg);
|
1998-01-26 01:42:53 +00:00
|
|
|
break;
|
|
|
|
|
1998-08-25 21:04:41 +00:00
|
|
|
case 'W':
|
2006-01-05 10:07:46 +00:00
|
|
|
SetConfigOption("post_auth_delay", optarg, ctx, gucsource);
|
|
|
|
break;
|
2001-03-22 06:16:21 +00:00
|
|
|
|
2000-11-08 17:57:46 +00:00
|
|
|
case 'c':
|
2000-05-31 00:28:42 +00:00
|
|
|
case '-':
|
2000-11-08 17:57:46 +00:00
|
|
|
{
|
2001-03-22 04:01:46 +00:00
|
|
|
char *name,
|
|
|
|
*value;
|
2000-07-03 20:46:10 +00:00
|
|
|
|
2001-03-22 04:01:46 +00:00
|
|
|
ParseLongOption(optarg, &name, &value);
|
|
|
|
if (!value)
|
|
|
|
{
|
|
|
|
if (flag == '-')
|
2003-07-22 19:00:12 +00:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_SYNTAX_ERROR),
|
|
|
|
errmsg("--%s requires a value",
|
|
|
|
optarg)));
|
2001-03-22 04:01:46 +00:00
|
|
|
else
|
2003-07-22 19:00:12 +00:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_SYNTAX_ERROR),
|
|
|
|
errmsg("-c %s requires a value",
|
|
|
|
optarg)));
|
2001-03-22 04:01:46 +00:00
|
|
|
}
|
2009-08-29 19:26:52 +00:00
|
|
|
SetConfigOption(name, value, ctx, gucsource);
|
2001-03-22 04:01:46 +00:00
|
|
|
free(name);
|
|
|
|
if (value)
|
|
|
|
free(value);
|
|
|
|
break;
|
|
|
|
}
|
2000-05-31 00:28:42 +00:00
|
|
|
|
1997-09-08 02:41:22 +00:00
|
|
|
default:
|
|
|
|
errs++;
|
1999-05-01 17:16:25 +00:00
|
|
|
break;
|
1997-09-07 05:04:48 +00:00
|
|
|
}
|
2012-03-11 01:52:05 +02:00
|
|
|
|
|
|
|
if (errs)
|
|
|
|
break;
|
2003-04-17 22:26:02 +00:00
|
|
|
}
|
|
|
|
|
2004-11-24 19:51:05 +00:00
|
|
|
/*
|
2013-04-01 14:00:51 -04:00
|
|
|
* Optional database name should be there only if *dbname is NULL.
|
2004-11-24 19:51:05 +00:00
|
|
|
*/
|
2013-04-01 14:00:51 -04:00
|
|
|
if (!errs && dbname && *dbname == NULL && argc - optind >= 1)
|
|
|
|
*dbname = strdup(argv[optind++]);
|
2012-03-11 01:52:05 +02:00
|
|
|
|
|
|
|
if (errs || argc != optind)
|
2004-11-24 19:51:05 +00:00
|
|
|
{
|
2012-03-11 01:52:05 +02:00
|
|
|
if (errs)
|
|
|
|
optind--; /* complain about the previous argument */
|
|
|
|
|
2009-08-29 19:26:52 +00:00
|
|
|
/* spell the error message a bit differently depending on context */
|
|
|
|
if (IsUnderPostmaster)
|
|
|
|
ereport(FATAL,
|
|
|
|
(errcode(ERRCODE_SYNTAX_ERROR),
|
2012-06-10 15:20:04 -04:00
|
|
|
errmsg("invalid command-line argument for server process: %s", argv[optind]),
|
2010-02-26 02:01:40 +00:00
|
|
|
errhint("Try \"%s --help\" for more information.", progname)));
|
2009-08-29 19:26:52 +00:00
|
|
|
else
|
|
|
|
ereport(FATAL,
|
|
|
|
(errcode(ERRCODE_SYNTAX_ERROR),
|
2012-03-11 01:52:05 +02:00
|
|
|
errmsg("%s: invalid command-line argument: %s",
|
|
|
|
progname, argv[optind]),
|
2010-02-26 02:01:40 +00:00
|
|
|
errhint("Try \"%s --help\" for more information.", progname)));
|
2009-08-29 19:26:52 +00:00
|
|
|
}
|
2004-11-24 19:51:05 +00:00
|
|
|
|
2009-08-29 19:26:52 +00:00
|
|
|
/*
|
|
|
|
* Reset getopt(3) library so that it will work correctly in subprocesses
|
|
|
|
* or when this function is called a second time with another array.
|
|
|
|
*/
|
|
|
|
optind = 1;
|
2010-12-16 16:22:05 -05:00
|
|
|
#ifdef HAVE_INT_OPTRESET
|
2009-08-29 19:26:52 +00:00
|
|
|
optreset = 1; /* some systems need this too */
|
|
|
|
#endif
|
|
|
|
}
|
2004-11-24 19:51:05 +00:00
|
|
|
|
2009-08-29 19:26:52 +00:00
|
|
|
|
|
|
|
/* ----------------------------------------------------------------
|
|
|
|
* PostgresMain
|
|
|
|
* postgres main loop -- all backends, interactive or otherwise start here
|
|
|
|
*
|
|
|
|
* argc/argv are the command line arguments to be used. (When being forked
|
|
|
|
* by the postmaster, these are not the original argv array of the process.)
|
2013-04-01 14:00:51 -04:00
|
|
|
* dbname is the name of the database to connect to, or NULL if the database
|
|
|
|
* name should be extracted from the command line arguments or defaulted.
|
|
|
|
* username is the PostgreSQL user name to be used for the session.
|
2009-08-29 19:26:52 +00:00
|
|
|
* ----------------------------------------------------------------
|
|
|
|
*/
|
2012-06-25 21:25:26 +03:00
|
|
|
void
|
2013-04-01 14:00:51 -04:00
|
|
|
PostgresMain(int argc, char *argv[],
|
|
|
|
const char *dbname,
|
|
|
|
const char *username)
|
2009-08-29 19:26:52 +00:00
|
|
|
{
|
|
|
|
int firstchar;
|
|
|
|
StringInfoData input_message;
|
|
|
|
sigjmp_buf local_sigjmp_buf;
|
|
|
|
volatile bool send_ready_for_query = true;
|
2016-03-16 11:30:45 -04:00
|
|
|
bool disable_idle_in_transaction_timeout = false;
|
2009-08-29 19:26:52 +00:00
|
|
|
|
2015-01-13 13:12:37 +01:00
|
|
|
/* Initialize startup process environment if necessary. */
|
2009-08-29 19:26:52 +00:00
|
|
|
if (!IsUnderPostmaster)
|
2015-01-13 13:12:37 +01:00
|
|
|
InitStandaloneProcess(argv[0]);
|
2009-08-29 19:26:52 +00:00
|
|
|
|
|
|
|
SetProcessingMode(InitProcessing);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set default values for command-line options.
|
|
|
|
*/
|
|
|
|
if (!IsUnderPostmaster)
|
|
|
|
InitializeGUCOptions();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Parse command-line options.
|
|
|
|
*/
|
2013-04-01 14:00:51 -04:00
|
|
|
process_postgres_switches(argc, argv, PGC_POSTMASTER, &dbname);
|
2009-08-29 19:26:52 +00:00
|
|
|
|
|
|
|
/* Must have gotten a database name, or have a default (the username) */
|
|
|
|
if (dbname == NULL)
|
|
|
|
{
|
|
|
|
dbname = username;
|
|
|
|
if (dbname == NULL)
|
|
|
|
ereport(FATAL,
|
2010-02-26 02:01:40 +00:00
|
|
|
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
|
|
|
errmsg("%s: no database nor user name specified",
|
|
|
|
progname)));
|
2004-11-24 19:51:05 +00:00
|
|
|
}
|
|
|
|
|
2004-05-28 05:13:32 +00:00
|
|
|
/* Acquire configuration parameters, unless inherited from postmaster */
|
|
|
|
if (!IsUnderPostmaster)
|
2003-09-02 19:04:12 +00:00
|
|
|
{
|
2009-09-01 00:09:42 +00:00
|
|
|
if (!SelectConfigFiles(userDoption, progname))
|
2004-10-08 01:36:36 +00:00
|
|
|
proc_exit(1);
|
2004-05-28 05:13:32 +00:00
|
|
|
}
|
2004-05-21 05:08:06 +00:00
|
|
|
|
1999-10-06 21:58:18 +00:00
|
|
|
/*
|
2000-12-20 21:51:52 +00:00
|
|
|
* Set up signal handlers and masks.
|
1999-10-06 21:58:18 +00:00
|
|
|
*
|
2005-11-22 18:17:34 +00:00
|
|
|
* Note that postmaster blocked all signals before forking child process,
|
|
|
|
* so there is no race condition whereby we might receive a signal before
|
|
|
|
* we have set up the handler.
|
XLOG (and related) changes:
* Store two past checkpoint locations, not just one, in pg_control.
On startup, we fall back to the older checkpoint if the newer one
is unreadable. Also, a physical copy of the newest checkpoint record
is kept in pg_control for possible use in disaster recovery (ie,
complete loss of pg_xlog). Also add a version number for pg_control
itself. Remove archdir from pg_control; it ought to be a GUC
parameter, not a special case (not that it's implemented yet anyway).
* Suppress successive checkpoint records when nothing has been entered
in the WAL log since the last one. This is not so much to avoid I/O
as to make it actually useful to keep track of the last two
checkpoints. If the things are right next to each other then there's
not a lot of redundancy gained...
* Change CRC scheme to a true 64-bit CRC, not a pair of 32-bit CRCs
on alternate bytes. Polynomial borrowed from ECMA DLT1 standard.
* Fix XLOG record length handling so that it will work at BLCKSZ = 32k.
* Change XID allocation to work more like OID allocation. (This is of
dubious necessity, but I think it's a good idea anyway.)
* Fix a number of minor bugs, such as off-by-one logic for XLOG file
wraparound at the 4 gig mark.
* Add documentation and clean up some coding infelicities; move file
format declarations out to include files where planned contrib
utilities can get at them.
* Checkpoint will now occur every CHECKPOINT_SEGMENTS log segments or
every CHECKPOINT_TIMEOUT seconds, whichever comes first. It is also
possible to force a checkpoint by sending SIGUSR1 to the postmaster
(undocumented feature...)
* Defend against kill -9 postmaster by storing shmem block's key and ID
in postmaster.pid lockfile, and checking at startup to ensure that no
processes are still connected to old shmem block (if it still exists).
* Switch backends to accept SIGQUIT rather than SIGUSR1 for emergency
stop, for symmetry with postmaster and xlog utilities. Clean up signal
handling in bootstrap.c so that xlog utilities launched by postmaster
will react to signals better.
* Standalone bootstrap now grabs lockfile in target directory, as added
insurance against running it in parallel with live postmaster.
2001-03-13 01:17:06 +00:00
|
|
|
*
|
2005-10-15 02:49:52 +00:00
|
|
|
* Also note: it's best not to use any signals that are SIG_IGNored in the
|
2014-05-06 12:12:18 -04:00
|
|
|
* postmaster. If such a signal arrives before we are able to change the
|
2005-10-15 02:49:52 +00:00
|
|
|
* handler to non-SIG_IGN, it'll get dropped. Instead, make a dummy
|
|
|
|
* handler in the postmaster to reserve the signal. (Of course, this isn't
|
|
|
|
* an issue for signals that are locally generated, such as SIGALRM and
|
|
|
|
* SIGPIPE.)
|
1999-10-06 21:58:18 +00:00
|
|
|
*/
|
2010-01-15 09:19:10 +00:00
|
|
|
if (am_walsender)
|
|
|
|
WalSndSignals();
|
2007-07-09 01:15:14 +00:00
|
|
|
else
|
2010-01-15 09:19:10 +00:00
|
|
|
{
|
2017-06-13 13:05:59 -04:00
|
|
|
pqsignal(SIGHUP, PostgresSigHupHandler); /* set flag to read
|
|
|
|
* config file */
|
2010-02-26 02:01:40 +00:00
|
|
|
pqsignal(SIGINT, StatementCancelHandler); /* cancel current query */
|
|
|
|
pqsignal(SIGTERM, die); /* cancel current query and exit */
|
2000-04-12 17:17:23 +00:00
|
|
|
|
2010-01-15 09:19:10 +00:00
|
|
|
/*
|
|
|
|
* In a standalone backend, SIGQUIT can be generated from the keyboard
|
2010-02-26 02:01:40 +00:00
|
|
|
* easily, while SIGTERM cannot, so we make both signals do die()
|
|
|
|
* rather than quickdie().
|
2010-01-15 09:19:10 +00:00
|
|
|
*/
|
|
|
|
if (IsUnderPostmaster)
|
2010-02-26 02:01:40 +00:00
|
|
|
pqsignal(SIGQUIT, quickdie); /* hard crash time */
|
2010-01-15 09:19:10 +00:00
|
|
|
else
|
2010-02-26 02:01:40 +00:00
|
|
|
pqsignal(SIGQUIT, die); /* cancel current query and exit */
|
2013-05-29 16:58:43 -04:00
|
|
|
InitializeTimeouts(); /* establishes SIGALRM handler */
|
2001-01-14 05:08:17 +00:00
|
|
|
|
2010-01-15 09:19:10 +00:00
|
|
|
/*
|
|
|
|
* Ignore failure to write to frontend. Note: if frontend closes
|
|
|
|
* connection, we will notice it and exit cleanly when control next
|
2010-02-26 02:01:40 +00:00
|
|
|
* returns to outer loop. This seems safer than forcing exit in the
|
|
|
|
* midst of output during who-knows-what operation...
|
2010-01-15 09:19:10 +00:00
|
|
|
*/
|
|
|
|
pqsignal(SIGPIPE, SIG_IGN);
|
|
|
|
pqsignal(SIGUSR1, procsignal_sigusr1_handler);
|
|
|
|
pqsignal(SIGUSR2, SIG_IGN);
|
|
|
|
pqsignal(SIGFPE, FloatExceptionHandler);
|
|
|
|
|
|
|
|
/*
|
2010-02-26 02:01:40 +00:00
|
|
|
* Reset some signals that are accepted by postmaster but not by
|
|
|
|
* backend
|
2010-01-15 09:19:10 +00:00
|
|
|
*/
|
2010-02-26 02:01:40 +00:00
|
|
|
pqsignal(SIGCHLD, SIG_DFL); /* system() requires this on some
|
|
|
|
* platforms */
|
2010-01-15 09:19:10 +00:00
|
|
|
}
|
1999-10-06 21:58:18 +00:00
|
|
|
|
2000-12-20 21:51:52 +00:00
|
|
|
pqinitmask();
|
|
|
|
|
2007-07-09 01:15:14 +00:00
|
|
|
if (IsUnderPostmaster)
|
|
|
|
{
|
|
|
|
/* We allow SIGQUIT (quickdie) at all times */
|
|
|
|
sigdelset(&BlockSig, SIGQUIT);
|
|
|
|
}
|
2000-12-20 21:51:52 +00:00
|
|
|
|
XLOG (and related) changes:
* Store two past checkpoint locations, not just one, in pg_control.
On startup, we fall back to the older checkpoint if the newer one
is unreadable. Also, a physical copy of the newest checkpoint record
is kept in pg_control for possible use in disaster recovery (ie,
complete loss of pg_xlog). Also add a version number for pg_control
itself. Remove archdir from pg_control; it ought to be a GUC
parameter, not a special case (not that it's implemented yet anyway).
* Suppress successive checkpoint records when nothing has been entered
in the WAL log since the last one. This is not so much to avoid I/O
as to make it actually useful to keep track of the last two
checkpoints. If the things are right next to each other then there's
not a lot of redundancy gained...
* Change CRC scheme to a true 64-bit CRC, not a pair of 32-bit CRCs
on alternate bytes. Polynomial borrowed from ECMA DLT1 standard.
* Fix XLOG record length handling so that it will work at BLCKSZ = 32k.
* Change XID allocation to work more like OID allocation. (This is of
dubious necessity, but I think it's a good idea anyway.)
* Fix a number of minor bugs, such as off-by-one logic for XLOG file
wraparound at the 4 gig mark.
* Add documentation and clean up some coding infelicities; move file
format declarations out to include files where planned contrib
utilities can get at them.
* Checkpoint will now occur every CHECKPOINT_SEGMENTS log segments or
every CHECKPOINT_TIMEOUT seconds, whichever comes first. It is also
possible to force a checkpoint by sending SIGUSR1 to the postmaster
(undocumented feature...)
* Defend against kill -9 postmaster by storing shmem block's key and ID
in postmaster.pid lockfile, and checking at startup to ensure that no
processes are still connected to old shmem block (if it still exists).
* Switch backends to accept SIGQUIT rather than SIGUSR1 for emergency
stop, for symmetry with postmaster and xlog utilities. Clean up signal
handling in bootstrap.c so that xlog utilities launched by postmaster
will react to signals better.
* Standalone bootstrap now grabs lockfile in target directory, as added
insurance against running it in parallel with live postmaster.
2001-03-13 01:17:06 +00:00
|
|
|
PG_SETMASK(&BlockSig); /* block everything except SIGQUIT */
|
2000-12-20 21:51:52 +00:00
|
|
|
|
2010-04-20 01:38:52 +00:00
|
|
|
if (!IsUnderPostmaster)
|
1997-09-07 05:04:48 +00:00
|
|
|
{
|
2001-10-19 17:03:08 +00:00
|
|
|
/*
|
2005-10-15 02:49:52 +00:00
|
|
|
* Validate we have been given a reasonable-looking DataDir (if under
|
|
|
|
* postmaster, assume postmaster did this already).
|
2001-10-19 17:03:08 +00:00
|
|
|
*/
|
2004-10-08 01:36:36 +00:00
|
|
|
Assert(DataDir);
|
2001-10-19 17:03:08 +00:00
|
|
|
ValidatePgVersion(DataDir);
|
|
|
|
|
2005-07-04 04:51:52 +00:00
|
|
|
/* Change into DataDir (if under postmaster, was done already) */
|
|
|
|
ChangeToDataDir();
|
|
|
|
|
2000-01-09 12:17:33 +00:00
|
|
|
/*
|
2000-11-29 20:59:54 +00:00
|
|
|
* Create lockfile for data directory.
|
2000-01-09 12:17:33 +00:00
|
|
|
*/
|
2005-07-04 04:51:52 +00:00
|
|
|
CreateDataDirLockFile(false);
|
2013-01-02 17:49:06 -03:00
|
|
|
|
2013-01-02 18:39:20 -03:00
|
|
|
/* Initialize MaxBackends (if under postmaster, was done already) */
|
2013-01-02 17:49:06 -03:00
|
|
|
InitializeMaxBackends();
|
1997-09-07 05:04:48 +00:00
|
|
|
}
|
|
|
|
|
2010-04-20 01:38:52 +00:00
|
|
|
/* Early initialization */
|
|
|
|
BaseInit();
|
|
|
|
|
2006-01-04 21:06:32 +00:00
|
|
|
/*
|
2006-10-04 00:30:14 +00:00
|
|
|
* Create a per-backend PGPROC struct in shared memory, except in the
|
|
|
|
* EXEC_BACKEND case where this was done in SubPostmasterMain. We must do
|
|
|
|
* this before we can use LWLocks (and in the EXEC_BACKEND case we already
|
|
|
|
* had to do some stuff with LWLocks).
|
2006-01-04 21:06:32 +00:00
|
|
|
*/
|
|
|
|
#ifdef EXEC_BACKEND
|
|
|
|
if (!IsUnderPostmaster)
|
|
|
|
InitProcess();
|
|
|
|
#else
|
|
|
|
InitProcess();
|
|
|
|
#endif
|
|
|
|
|
2009-08-29 19:26:52 +00:00
|
|
|
/* We need to allow SIGINT, etc during the initial transaction */
|
|
|
|
PG_SETMASK(&UnBlockSig);
|
|
|
|
|
1999-10-06 21:58:18 +00:00
|
|
|
/*
|
2000-12-18 00:44:50 +00:00
|
|
|
* General initialization.
|
|
|
|
*
|
2005-11-22 18:17:34 +00:00
|
|
|
* NOTE: if you are tempted to add code in this vicinity, consider putting
|
|
|
|
* it inside InitPostgres() instead. In particular, anything that
|
|
|
|
* involves database access should be there, not here.
|
1999-05-22 17:47:54 +00:00
|
|
|
*/
|
2015-02-02 16:23:59 -05:00
|
|
|
InitPostgres(dbname, InvalidOid, username, InvalidOid, NULL);
|
1997-09-07 05:04:48 +00:00
|
|
|
|
2009-08-29 19:26:52 +00:00
|
|
|
/*
|
|
|
|
* If the PostmasterContext is still around, recycle the space; we don't
|
|
|
|
* need it anymore after InitPostgres completes. Note this does not trash
|
|
|
|
* *MyProcPort, because ConnCreate() allocated that space with malloc()
|
|
|
|
* ... else we'd need to copy the Port data first. Also, subsidiary data
|
|
|
|
* such as the username isn't lost either; see ProcessStartupPacket().
|
|
|
|
*/
|
|
|
|
if (PostmasterContext)
|
|
|
|
{
|
|
|
|
MemoryContextDelete(PostmasterContext);
|
|
|
|
PostmasterContext = NULL;
|
|
|
|
}
|
|
|
|
|
2000-12-18 00:44:50 +00:00
|
|
|
SetProcessingMode(NormalProcessing);
|
1999-02-02 03:45:56 +00:00
|
|
|
|
2004-11-14 19:35:35 +00:00
|
|
|
/*
|
|
|
|
* Now all GUC states are fully set up. Report them to client if
|
|
|
|
* appropriate.
|
|
|
|
*/
|
|
|
|
BeginReportingGUCOptions();
|
|
|
|
|
2004-11-24 19:51:05 +00:00
|
|
|
/*
|
2005-10-15 02:49:52 +00:00
|
|
|
* Also set up handler to log session end; we have to wait till now to be
|
|
|
|
* sure Log_disconnections has its final value.
|
2004-11-24 19:51:05 +00:00
|
|
|
*/
|
|
|
|
if (IsUnderPostmaster && Log_disconnections)
|
|
|
|
on_proc_exit(log_disconnections, 0);
|
|
|
|
|
2012-10-05 17:13:07 +03:00
|
|
|
/* Perform initialization specific to a WAL sender process. */
|
2010-01-15 09:19:10 +00:00
|
|
|
if (am_walsender)
|
2012-10-05 17:13:07 +03:00
|
|
|
InitWalSender();
|
2010-01-15 09:19:10 +00:00
|
|
|
|
2006-08-15 18:26:59 +00:00
|
|
|
/*
|
2006-10-04 00:30:14 +00:00
|
|
|
* process any libraries that should be preloaded at backend start (this
|
|
|
|
* likewise can't be done until GUC settings are complete)
|
2006-08-15 18:26:59 +00:00
|
|
|
*/
|
2013-06-12 22:28:24 -04:00
|
|
|
process_session_preload_libraries();
|
2006-08-15 18:26:59 +00:00
|
|
|
|
2000-04-12 17:17:23 +00:00
|
|
|
/*
|
|
|
|
* Send this backend's cancellation info to the frontend.
|
1998-10-02 01:14:14 +00:00
|
|
|
*/
|
2016-10-11 12:19:18 -04:00
|
|
|
if (whereToSendOutput == DestRemote)
|
1998-07-09 03:29:11 +00:00
|
|
|
{
|
1999-04-25 03:19:27 +00:00
|
|
|
StringInfoData buf;
|
1999-05-25 16:15:34 +00:00
|
|
|
|
2003-04-22 00:08:07 +00:00
|
|
|
pq_beginmessage(&buf, 'K');
|
1999-04-25 03:19:27 +00:00
|
|
|
pq_sendint(&buf, (int32) MyProcPid, sizeof(int32));
|
|
|
|
pq_sendint(&buf, (int32) MyCancelKey, sizeof(int32));
|
|
|
|
pq_endmessage(&buf);
|
1998-07-09 03:29:11 +00:00
|
|
|
/* Need not flush since ReadyForQuery will do it. */
|
|
|
|
}
|
|
|
|
|
2004-01-28 21:02:40 +00:00
|
|
|
/* Welcome banner for standalone case */
|
2005-11-03 17:11:40 +00:00
|
|
|
if (whereToSendOutput == DestDebug)
|
2003-11-29 21:40:43 +00:00
|
|
|
printf("\nPostgreSQL stand-alone backend %s\n", PG_VERSION);
|
1998-10-02 01:14:14 +00:00
|
|
|
|
2000-06-28 03:33:33 +00:00
|
|
|
/*
|
|
|
|
* Create the memory context we will use in the main loop.
|
|
|
|
*
|
2003-05-02 20:54:36 +00:00
|
|
|
* MessageContext is reset once per iteration of the main loop, ie, upon
|
|
|
|
* completion of processing of each command message from the client.
|
2000-06-28 03:33:33 +00:00
|
|
|
*/
|
2003-05-02 20:54:36 +00:00
|
|
|
MessageContext = AllocSetContextCreate(TopMemoryContext,
|
|
|
|
"MessageContext",
|
Add macros to make AllocSetContextCreate() calls simpler and safer.
I found that half a dozen (nearly 5%) of our AllocSetContextCreate calls
had typos in the context-sizing parameters. While none of these led to
especially significant problems, they did create minor inefficiencies,
and it's now clear that expecting people to copy-and-paste those calls
accurately is not a great idea. Let's reduce the risk of future errors
by introducing single macros that encapsulate the common use-cases.
Three such macros are enough to cover all but two special-purpose contexts;
those two calls can be left as-is, I think.
While this patch doesn't in itself improve matters for third-party
extensions, it doesn't break anything for them either, and they can
gradually adopt the simplified notation over time.
In passing, change TopMemoryContext to use the default allocation
parameters. Formerly it could only be extended 8K at a time. That was
probably reasonable when this code was written; but nowadays we create
many more contexts than we did then, so that it's not unusual to have a
couple hundred K in TopMemoryContext, even without considering various
dubious code that sticks other things there. There seems no good reason
not to let it use growing blocks like most other contexts.
Back-patch to 9.6, mostly because that's still close enough to HEAD that
it's easy to do so, and keeping the branches in sync can be expected to
avoid some future back-patching pain. The bugs fixed by these changes
don't seem to be significant enough to justify fixing them further back.
Discussion: <21072.1472321324@sss.pgh.pa.us>
2016-08-27 17:50:38 -04:00
|
|
|
ALLOCSET_DEFAULT_SIZES);
|
2000-06-28 03:33:33 +00:00
|
|
|
|
2005-06-14 21:04:42 +00:00
|
|
|
/*
|
2005-06-29 22:51:57 +00:00
|
|
|
* Remember stand-alone backend startup time
|
2005-06-14 21:04:42 +00:00
|
|
|
*/
|
|
|
|
if (!IsUnderPostmaster)
|
2005-06-29 22:51:57 +00:00
|
|
|
PgStartTime = GetCurrentTimestamp();
|
2005-06-14 21:04:42 +00:00
|
|
|
|
1999-10-06 21:58:18 +00:00
|
|
|
/*
|
|
|
|
* POSTGRES main processing loop begins here
|
1997-09-07 05:04:48 +00:00
|
|
|
*
|
2005-10-15 02:49:52 +00:00
|
|
|
* If an exception is encountered, processing resumes here so we abort the
|
|
|
|
* current transaction and start a new one.
|
2004-07-31 00:45:57 +00:00
|
|
|
*
|
2005-11-22 18:17:34 +00:00
|
|
|
* You might wonder why this isn't coded as an infinite loop around a
|
|
|
|
* PG_TRY construct. The reason is that this is the bottom of the
|
|
|
|
* exception stack, and so with PG_TRY there would be no exception handler
|
|
|
|
* in force at all during the CATCH part. By leaving the outermost setjmp
|
|
|
|
* always active, we have at least some chance of recovering from an error
|
|
|
|
* during error recovery. (If we get into an infinite loop thereby, it
|
|
|
|
* will soon be stopped by overflow of elog.c's internal state stack.)
|
Fix assorted race conditions in the new timeout infrastructure.
Prevent handle_sig_alarm from losing control partway through due to a query
cancel (either an asynchronous SIGINT, or a cancel triggered by one of the
timeout handler functions). That would at least result in failure to
schedule any required future interrupt, and might result in actual
corruption of timeout.c's data structures, if the interrupt happened while
we were updating those.
We could still lose control if an asynchronous SIGINT arrives just as the
function is entered. This wouldn't break any data structures, but it would
have the same effect as if the SIGALRM interrupt had been silently lost:
we'd not fire any currently-due handlers, nor schedule any new interrupt.
To forestall that scenario, forcibly reschedule any pending timer interrupt
during AbortTransaction and AbortSubTransaction. We can avoid any extra
kernel call in most cases by not doing that until we've allowed
LockErrorCleanup to kill the DEADLOCK_TIMEOUT and LOCK_TIMEOUT events.
Another hazard is that some platforms (at least Linux and *BSD) block a
signal before calling its handler and then unblock it on return. When we
longjmp out of the handler, the unblock doesn't happen, and the signal is
left blocked indefinitely. Again, we can fix that by forcibly unblocking
signals during AbortTransaction and AbortSubTransaction.
These latter two problems do not manifest when the longjmp reaches
postgres.c, because the error recovery code there kills all pending timeout
events anyway, and it uses sigsetjmp(..., 1) so that the appropriate signal
mask is restored. So errors thrown outside any transaction should be OK
already, and cleaning up in AbortTransaction and AbortSubTransaction should
be enough to fix these issues. (We're assuming that any code that catches
a query cancel error and doesn't re-throw it will do at least a
subtransaction abort to clean up; but that was pretty much required already
by other subsystems.)
Lastly, ProcSleep should not clear the LOCK_TIMEOUT indicator flag when
disabling that event: if a lock timeout interrupt happened after the lock
was granted, the ensuing query cancel is still going to happen at the next
CHECK_FOR_INTERRUPTS, and we want to report it as a lock timeout not a user
cancel.
Per reports from Dan Wood.
Back-patch to 9.3 where the new timeout handling infrastructure was
introduced. We may at some point decide to back-patch the signal
unblocking changes further, but I'll desist from that until we hear
actual field complaints about it.
2013-11-29 16:41:00 -05:00
|
|
|
*
|
|
|
|
* Note that we use sigsetjmp(..., 1), so that this function's signal mask
|
|
|
|
* (to wit, UnBlockSig) will be restored when longjmp'ing to here. This
|
|
|
|
* is essential in case we longjmp'd out of a signal handler on a platform
|
|
|
|
* where that leaves the signal blocked. It's not redundant with the
|
|
|
|
* unblock in AbortTransaction() because the latter is only called if we
|
|
|
|
* were inside a transaction.
|
1997-09-07 05:04:48 +00:00
|
|
|
*/
|
|
|
|
|
2004-07-31 00:45:57 +00:00
|
|
|
if (sigsetjmp(local_sigjmp_buf, 1) != 0)
|
1997-09-07 05:04:48 +00:00
|
|
|
{
|
2000-06-28 03:33:33 +00:00
|
|
|
/*
|
2000-12-18 00:44:50 +00:00
|
|
|
* NOTE: if you are tempted to add more code in this if-block,
|
2004-07-31 00:45:57 +00:00
|
|
|
* consider the high probability that it should be in
|
2014-05-06 12:12:18 -04:00
|
|
|
* AbortTransaction() instead. The only stuff done directly here
|
2005-10-15 02:49:52 +00:00
|
|
|
* should be stuff that is guaranteed to apply *only* for outer-level
|
|
|
|
* error recovery, such as adjusting the FE/BE protocol status.
|
2004-07-31 00:45:57 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
/* Since not using PG_TRY, must reset error stack by hand */
|
|
|
|
error_context_stack = NULL;
|
|
|
|
|
|
|
|
/* Prevent interrupts while cleaning up */
|
|
|
|
HOLD_INTERRUPTS();
|
|
|
|
|
|
|
|
/*
|
2005-10-15 02:49:52 +00:00
|
|
|
* Forget any pending QueryCancel request, since we're returning to
|
Fix assorted race conditions in the new timeout infrastructure.
Prevent handle_sig_alarm from losing control partway through due to a query
cancel (either an asynchronous SIGINT, or a cancel triggered by one of the
timeout handler functions). That would at least result in failure to
schedule any required future interrupt, and might result in actual
corruption of timeout.c's data structures, if the interrupt happened while
we were updating those.
We could still lose control if an asynchronous SIGINT arrives just as the
function is entered. This wouldn't break any data structures, but it would
have the same effect as if the SIGALRM interrupt had been silently lost:
we'd not fire any currently-due handlers, nor schedule any new interrupt.
To forestall that scenario, forcibly reschedule any pending timer interrupt
during AbortTransaction and AbortSubTransaction. We can avoid any extra
kernel call in most cases by not doing that until we've allowed
LockErrorCleanup to kill the DEADLOCK_TIMEOUT and LOCK_TIMEOUT events.
Another hazard is that some platforms (at least Linux and *BSD) block a
signal before calling its handler and then unblock it on return. When we
longjmp out of the handler, the unblock doesn't happen, and the signal is
left blocked indefinitely. Again, we can fix that by forcibly unblocking
signals during AbortTransaction and AbortSubTransaction.
These latter two problems do not manifest when the longjmp reaches
postgres.c, because the error recovery code there kills all pending timeout
events anyway, and it uses sigsetjmp(..., 1) so that the appropriate signal
mask is restored. So errors thrown outside any transaction should be OK
already, and cleaning up in AbortTransaction and AbortSubTransaction should
be enough to fix these issues. (We're assuming that any code that catches
a query cancel error and doesn't re-throw it will do at least a
subtransaction abort to clean up; but that was pretty much required already
by other subsystems.)
Lastly, ProcSleep should not clear the LOCK_TIMEOUT indicator flag when
disabling that event: if a lock timeout interrupt happened after the lock
was granted, the ensuing query cancel is still going to happen at the next
CHECK_FOR_INTERRUPTS, and we want to report it as a lock timeout not a user
cancel.
Per reports from Dan Wood.
Back-patch to 9.3 where the new timeout handling infrastructure was
introduced. We may at some point decide to back-patch the signal
unblocking changes further, but I'll desist from that until we hear
actual field complaints about it.
2013-11-29 16:41:00 -05:00
|
|
|
* the idle loop anyway, and cancel any active timeout requests. (In
|
|
|
|
* future we might want to allow some timeout requests to survive, but
|
|
|
|
* at minimum it'd be necessary to do reschedule_timeouts(), in case
|
|
|
|
* we got here because of a query cancel interrupting the SIGALRM
|
|
|
|
* interrupt handler.) Note in particular that we must clear the
|
|
|
|
* statement and lock timeout indicators, to prevent any future plain
|
|
|
|
* query cancels from being misreported as timeouts in case we're
|
|
|
|
* forgetting a timeout cancel.
|
2001-01-14 05:08:17 +00:00
|
|
|
*/
|
Introduce timeout handling framework
Management of timeouts was getting a little cumbersome; what we
originally had was more than enough back when we were only concerned
about deadlocks and query cancel; however, when we added timeouts for
standby processes, the code got considerably messier. Since there are
plans to add more complex timeouts, this seems a good time to introduce
a central timeout handling module.
External modules register their timeout handlers during process
initialization, and later enable and disable them as they see fit using
a simple API; timeout.c is in charge of keeping track of which timeouts
are in effect at any time, installing a common SIGALRM signal handler,
and calling setitimer() as appropriate to ensure timely firing of
external handlers.
timeout.c additionally supports pluggable modules to add their own
timeouts, though this capability isn't exercised anywhere yet.
Additionally, as of this commit, walsender processes are aware of
timeouts; we had a preexisting bug there that made those ignore SIGALRM,
thus being subject to unhandled deadlocks, particularly during the
authentication phase. This has already been fixed in back branches in
commit 0bf8eb2a, which see for more details.
Main author: Zoltán Böszörményi
Some review and cleanup by Álvaro Herrera
Extensive reworking by Tom Lane
2012-07-16 18:43:21 -04:00
|
|
|
disable_all_timeouts(false);
|
Fix assorted race conditions in the new timeout infrastructure.
Prevent handle_sig_alarm from losing control partway through due to a query
cancel (either an asynchronous SIGINT, or a cancel triggered by one of the
timeout handler functions). That would at least result in failure to
schedule any required future interrupt, and might result in actual
corruption of timeout.c's data structures, if the interrupt happened while
we were updating those.
We could still lose control if an asynchronous SIGINT arrives just as the
function is entered. This wouldn't break any data structures, but it would
have the same effect as if the SIGALRM interrupt had been silently lost:
we'd not fire any currently-due handlers, nor schedule any new interrupt.
To forestall that scenario, forcibly reschedule any pending timer interrupt
during AbortTransaction and AbortSubTransaction. We can avoid any extra
kernel call in most cases by not doing that until we've allowed
LockErrorCleanup to kill the DEADLOCK_TIMEOUT and LOCK_TIMEOUT events.
Another hazard is that some platforms (at least Linux and *BSD) block a
signal before calling its handler and then unblock it on return. When we
longjmp out of the handler, the unblock doesn't happen, and the signal is
left blocked indefinitely. Again, we can fix that by forcibly unblocking
signals during AbortTransaction and AbortSubTransaction.
These latter two problems do not manifest when the longjmp reaches
postgres.c, because the error recovery code there kills all pending timeout
events anyway, and it uses sigsetjmp(..., 1) so that the appropriate signal
mask is restored. So errors thrown outside any transaction should be OK
already, and cleaning up in AbortTransaction and AbortSubTransaction should
be enough to fix these issues. (We're assuming that any code that catches
a query cancel error and doesn't re-throw it will do at least a
subtransaction abort to clean up; but that was pretty much required already
by other subsystems.)
Lastly, ProcSleep should not clear the LOCK_TIMEOUT indicator flag when
disabling that event: if a lock timeout interrupt happened after the lock
was granted, the ensuing query cancel is still going to happen at the next
CHECK_FOR_INTERRUPTS, and we want to report it as a lock timeout not a user
cancel.
Per reports from Dan Wood.
Back-patch to 9.3 where the new timeout handling infrastructure was
introduced. We may at some point decide to back-patch the signal
unblocking changes further, but I'll desist from that until we hear
actual field complaints about it.
2013-11-29 16:41:00 -05:00
|
|
|
QueryCancelPending = false; /* second to avoid race condition */
|
2004-07-31 00:45:57 +00:00
|
|
|
|
Introduce and use infrastructure for interrupt processing during client reads.
Up to now large swathes of backend code ran inside signal handlers
while reading commands from the client, to allow for speedy reaction to
asynchronous events. Most prominently shared invalidation and NOTIFY
handling. That means that complex code like the starting/stopping of
transactions is run in signal handlers... The required code was
fragile and verbose, and is likely to contain bugs.
That approach also severely limited what could be done while
communicating with the client. As the read might be from within
openssl it wasn't safely possible to trigger an error, e.g. to cancel
a backend in idle-in-transaction state. We did that in some cases,
namely fatal errors, nonetheless.
Now that FE/BE communication in the backend employs non-blocking
sockets and latches to block, we can quite simply interrupt reads from
signal handlers by setting the latch. That allows us to signal an
interrupted read, which is supposed to be retried after returning from
within the ssl library.
As signal handlers now only need to set the latch to guarantee timely
interrupt processing, remove a fair amount of complicated & fragile
code from async.c and sinval.c.
We could now actually start to process some kinds of interrupts, like
sinval ones, more often that before, but that seems better done
separately.
This work will hopefully allow to handle cases like being blocked by
sending data, interrupting idle transactions and similar to be
implemented without too much effort. In addition to allowing getting
rid of ImmediateInterruptOK, that is.
Author: Andres Freund
Reviewed-By: Heikki Linnakangas
2015-02-03 22:25:20 +01:00
|
|
|
/* Not reading from the client anymore. */
|
2005-06-02 21:03:25 +00:00
|
|
|
DoingCommandRead = false;
|
2004-07-31 00:45:57 +00:00
|
|
|
|
2004-09-26 00:26:28 +00:00
|
|
|
/* Make sure libpq is in a good state */
|
|
|
|
pq_comm_reset();
|
|
|
|
|
2004-07-31 00:45:57 +00:00
|
|
|
/* Report the error to the client and/or server log */
|
|
|
|
EmitErrorReport();
|
2001-01-14 05:08:17 +00:00
|
|
|
|
2004-07-17 03:32:14 +00:00
|
|
|
/*
|
2005-10-15 02:49:52 +00:00
|
|
|
* Make sure debug_query_string gets reset before we possibly clobber
|
|
|
|
* the storage it points at.
|
2004-07-17 03:32:14 +00:00
|
|
|
*/
|
2004-07-31 00:45:57 +00:00
|
|
|
debug_query_string = NULL;
|
2004-07-17 03:32:14 +00:00
|
|
|
|
2001-01-14 05:08:17 +00:00
|
|
|
/*
|
2004-07-31 00:45:57 +00:00
|
|
|
* Abort the current transaction in order to recover.
|
2000-06-28 03:33:33 +00:00
|
|
|
*/
|
1997-09-07 05:04:48 +00:00
|
|
|
AbortCurrentTransaction();
|
2000-06-28 03:33:33 +00:00
|
|
|
|
2012-10-05 17:13:07 +03:00
|
|
|
if (am_walsender)
|
|
|
|
WalSndErrorCleanup();
|
|
|
|
|
Introduce logical decoding.
This feature, building on previous commits, allows the write-ahead log
stream to be decoded into a series of logical changes; that is,
inserts, updates, and deletes and the transactions which contain them.
It is capable of handling decoding even across changes to the schema
of the effected tables. The output format is controlled by a
so-called "output plugin"; an example is included. To make use of
this in a real replication system, the output plugin will need to be
modified to produce output in the format appropriate to that system,
and to perform filtering.
Currently, information can be extracted from the logical decoding
system only via SQL; future commits will add the ability to stream
changes via walsender.
Andres Freund, with review and other contributions from many other
people, including Álvaro Herrera, Abhijit Menon-Sen, Peter Gheogegan,
Kevin Grittner, Robert Haas, Heikki Linnakangas, Fujii Masao, Abhijit
Menon-Sen, Michael Paquier, Simon Riggs, Craig Ringer, and Steve
Singer.
2014-03-03 16:32:18 -05:00
|
|
|
/*
|
|
|
|
* We can't release replication slots inside AbortTransaction() as we
|
|
|
|
* need to be able to start and abort transactions while having a slot
|
|
|
|
* acquired. But we never need to hold them across top level errors,
|
|
|
|
* so releasing here is fine. There's another cleanup in ProcKill()
|
|
|
|
* ensuring we'll correctly cleanup on FATAL errors as well.
|
|
|
|
*/
|
|
|
|
if (MyReplicationSlot != NULL)
|
|
|
|
ReplicationSlotRelease();
|
|
|
|
|
2016-12-08 12:00:00 -05:00
|
|
|
/* We also want to cleanup temporary slots on error. */
|
|
|
|
ReplicationSlotCleanup();
|
|
|
|
|
2000-06-28 03:33:33 +00:00
|
|
|
/*
|
2005-10-15 02:49:52 +00:00
|
|
|
* Now return to normal top-level context and clear ErrorContext for
|
|
|
|
* next time.
|
2000-06-28 03:33:33 +00:00
|
|
|
*/
|
|
|
|
MemoryContextSwitchTo(TopMemoryContext);
|
2004-07-31 00:45:57 +00:00
|
|
|
FlushErrorState();
|
2000-12-18 00:44:50 +00:00
|
|
|
|
2003-05-05 00:44:56 +00:00
|
|
|
/*
|
2005-10-15 02:49:52 +00:00
|
|
|
* If we were handling an extended-query-protocol message, initiate
|
|
|
|
* skip till next Sync. This also causes us not to issue
|
2003-08-04 00:43:34 +00:00
|
|
|
* ReadyForQuery (until we get Sync).
|
2003-05-05 00:44:56 +00:00
|
|
|
*/
|
|
|
|
if (doing_extended_query_message)
|
|
|
|
ignore_till_sync = true;
|
2001-01-14 05:08:17 +00:00
|
|
|
|
2004-07-31 00:45:57 +00:00
|
|
|
/* We don't have a transaction command open anymore */
|
|
|
|
xact_started = false;
|
2006-06-20 22:52:00 +00:00
|
|
|
|
Be more careful to not lose sync in the FE/BE protocol.
If any error occurred while we were in the middle of reading a protocol
message from the client, we could lose sync, and incorrectly try to
interpret a part of another message as a new protocol message. That will
usually lead to an "invalid frontend message" error that terminates the
connection. However, this is a security issue because an attacker might
be able to deliberately cause an error, inject a Query message in what's
supposed to be just user data, and have the server execute it.
We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other
operations that could ereport(ERROR) in the middle of processing a message,
but a query cancel interrupt or statement timeout could nevertheless cause
it to happen. Also, the V2 fastpath and COPY handling were not so careful.
It's very difficult to recover in the V2 COPY protocol, so we will just
terminate the connection on error. In practice, that's what happened
previously anyway, as we lost protocol sync.
To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set
whenever we're in the middle of reading a message. When it's set, we cannot
safely ERROR out and continue running, because we might've read only part
of a message. PqCommReadingMsg acts somewhat similarly to critical sections
in that if an error occurs while it's set, the error handler will force the
connection to be terminated, as if the error was FATAL. It's not
implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted
to PANIC in critical sections, because we want to be able to use
PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes
advantage of that to prevent an OOM error from terminating the connection.
To prevent unnecessary connection terminations, add a holdoff mechanism
similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel
interrupts, but still allow die interrupts. The rules on which interrupts
are processed when are now a bit more complicated, so refactor
ProcessInterrupts() and the calls to it in signal handlers so that the
signal handlers always call it if ImmediateInterruptOK is set, and
ProcessInterrupts() can decide to not do anything if the other conditions
are not met.
Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund.
Backpatch to all supported versions.
Security: CVE-2015-0244
2015-02-02 17:08:45 +02:00
|
|
|
/*
|
|
|
|
* If an error occurred while we were reading a message from the
|
|
|
|
* client, we have potentially lost track of where the previous
|
|
|
|
* message ends and the next one begins. Even though we have
|
|
|
|
* otherwise recovered from the error, we cannot safely read any more
|
|
|
|
* messages from the client, so there isn't much we can do with the
|
|
|
|
* connection anymore.
|
|
|
|
*/
|
|
|
|
if (pq_is_reading_msg())
|
|
|
|
ereport(FATAL,
|
|
|
|
(errcode(ERRCODE_PROTOCOL_VIOLATION),
|
Adjust behavior of single-user -j mode for better initdb error reporting.
Previously, -j caused the entire input file to be read in and executed as
a single command string. That's undesirable, not least because any error
causes the entire file to be regurgitated as the "failing query". Some
experimentation suggests a better rule: end the command string when we see
a semicolon immediately followed by two newlines, ie, an empty line after
a query. This serves nicely to break up the existing examples such as
information_schema.sql and system_views.sql. A limitation is that it's
no longer possible to write such a sequence within a string literal or
multiline comment in a file meant to be read with -j; but there are no
instances of such a problem within the data currently used by initdb.
(If someone does make such a mistake in future, it'll be obvious because
they'll get an unterminated-literal or unterminated-comment syntax error.)
Other than that, there shouldn't be any negative consequences; you're not
forced to end statements that way, it's just a better idea in most cases.
In passing, remove src/include/tcop/tcopdebug.h, which is dead code
because it's not included anywhere, and hasn't been for more than
ten years. One of the debug-support symbols it purported to describe
has been unreferenced for at least the same amount of time, and the
other is removed by this commit on the grounds that it was useless:
forcing -j mode all the time would have broken initdb. The lack of
complaints about that, or about the missing inclusion, shows that
no one has tried to use TCOP_DONTUSENEWLINE in many years.
2015-12-17 19:34:15 -05:00
|
|
|
errmsg("terminating connection because protocol synchronization was lost")));
|
Be more careful to not lose sync in the FE/BE protocol.
If any error occurred while we were in the middle of reading a protocol
message from the client, we could lose sync, and incorrectly try to
interpret a part of another message as a new protocol message. That will
usually lead to an "invalid frontend message" error that terminates the
connection. However, this is a security issue because an attacker might
be able to deliberately cause an error, inject a Query message in what's
supposed to be just user data, and have the server execute it.
We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other
operations that could ereport(ERROR) in the middle of processing a message,
but a query cancel interrupt or statement timeout could nevertheless cause
it to happen. Also, the V2 fastpath and COPY handling were not so careful.
It's very difficult to recover in the V2 COPY protocol, so we will just
terminate the connection on error. In practice, that's what happened
previously anyway, as we lost protocol sync.
To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set
whenever we're in the middle of reading a message. When it's set, we cannot
safely ERROR out and continue running, because we might've read only part
of a message. PqCommReadingMsg acts somewhat similarly to critical sections
in that if an error occurs while it's set, the error handler will force the
connection to be terminated, as if the error was FATAL. It's not
implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted
to PANIC in critical sections, because we want to be able to use
PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes
advantage of that to prevent an OOM error from terminating the connection.
To prevent unnecessary connection terminations, add a holdoff mechanism
similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel
interrupts, but still allow die interrupts. The rules on which interrupts
are processed when are now a bit more complicated, so refactor
ProcessInterrupts() and the calls to it in signal handlers so that the
signal handlers always call it if ImmediateInterruptOK is set, and
ProcessInterrupts() can decide to not do anything if the other conditions
are not met.
Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund.
Backpatch to all supported versions.
Security: CVE-2015-0244
2015-02-02 17:08:45 +02:00
|
|
|
|
2004-07-31 00:45:57 +00:00
|
|
|
/* Now we can allow interrupts again */
|
2001-01-19 22:08:47 +00:00
|
|
|
RESUME_INTERRUPTS();
|
1997-09-07 05:04:48 +00:00
|
|
|
}
|
1998-05-19 18:05:58 +00:00
|
|
|
|
2004-07-31 00:45:57 +00:00
|
|
|
/* We can now handle ereport(ERROR) */
|
|
|
|
PG_exception_stack = &local_sigjmp_buf;
|
1999-11-16 06:13:36 +00:00
|
|
|
|
2003-06-20 21:58:02 +00:00
|
|
|
if (!ignore_till_sync)
|
2006-10-04 00:30:14 +00:00
|
|
|
send_ready_for_query = true; /* initially, or after error */
|
2003-04-19 00:02:30 +00:00
|
|
|
|
1998-10-02 01:14:14 +00:00
|
|
|
/*
|
|
|
|
* Non-error queries loop here.
|
1997-09-07 05:04:48 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
for (;;)
|
|
|
|
{
|
2003-05-05 00:44:56 +00:00
|
|
|
/*
|
2003-08-04 00:43:34 +00:00
|
|
|
* At top of loop, reset extended-query-message flag, so that any
|
|
|
|
* errors encountered in "idle" state don't provoke skip.
|
2003-05-05 00:44:56 +00:00
|
|
|
*/
|
|
|
|
doing_extended_query_message = false;
|
|
|
|
|
2000-06-28 03:33:33 +00:00
|
|
|
/*
|
2005-10-15 02:49:52 +00:00
|
|
|
* Release storage left over from prior query cycle, and create a new
|
|
|
|
* query input buffer in the cleared MessageContext.
|
2000-06-28 03:33:33 +00:00
|
|
|
*/
|
2003-05-02 20:54:36 +00:00
|
|
|
MemoryContextSwitchTo(MessageContext);
|
|
|
|
MemoryContextResetAndDeleteChildren(MessageContext);
|
2000-06-28 03:33:33 +00:00
|
|
|
|
2003-08-12 18:52:38 +00:00
|
|
|
initStringInfo(&input_message);
|
1998-10-02 01:14:14 +00:00
|
|
|
|
Account for catalog snapshot in PGXACT->xmin updates.
The CatalogSnapshot was not plugged into SnapshotResetXmin()'s accounting
for whether MyPgXact->xmin could be cleared or advanced. In normal
transactions this was masked by the fact that the transaction snapshot
would be older, but during backend startup and certain utility commands
it was possible to re-use the CatalogSnapshot after MyPgXact->xmin had
been cleared, meaning that recently-deleted rows could be pruned even
though this snapshot could still see them, causing unexpected catalog
lookup failures. This effect appears to be the explanation for a recent
failure on buildfarm member piculet.
To fix, add the CatalogSnapshot to the RegisteredSnapshots heap whenever
it is valid.
In the previous logic, it was possible for the CatalogSnapshot to remain
valid across waits for client input, but with this change that would mean
it delays advance of global xmin in cases where it did not before. To
avoid possibly causing new table-bloat problems with clients that sit idle
for long intervals, add code to invalidate the CatalogSnapshot before
waiting for client input. (When the backend is busy, it's unlikely that
the CatalogSnapshot would be the oldest snap for very long, so we don't
worry about forcing early invalidation of it otherwise.)
In passing, remove the CatalogSnapshotStale flag in favor of using
"CatalogSnapshot != NULL" to represent validity, as we do for the other
special snapshots in snapmgr.c. And improve some obsolete comments.
No regression test because I don't know a deterministic way to cause this
failure. But the stress test shown in the original discussion provokes
"cache lookup failed for relation 1255" within a few dozen seconds for me.
Back-patch to 9.4 where MVCC catalog scans were introduced. (Note: it's
quite easy to produce similar failures with the same test case in branches
before 9.4. But MVCC catalog scans were supposed to fix that.)
Discussion: <16447.1478818294@sss.pgh.pa.us>
2016-11-15 15:55:35 -05:00
|
|
|
/*
|
|
|
|
* Also consider releasing our catalog snapshot if any, so that it's
|
|
|
|
* not preventing advance of global xmin while we wait for the client.
|
|
|
|
*/
|
|
|
|
InvalidateCatalogSnapshotConditionally();
|
|
|
|
|
2001-03-22 06:16:21 +00:00
|
|
|
/*
|
2005-10-15 02:49:52 +00:00
|
|
|
* (1) If we've reached idle state, tell the frontend we're ready for
|
|
|
|
* a new query.
|
1998-10-02 01:14:14 +00:00
|
|
|
*
|
2001-03-22 06:16:21 +00:00
|
|
|
* Note: this includes fflush()'ing the last of the prior output.
|
2003-08-12 18:52:38 +00:00
|
|
|
*
|
|
|
|
* This is also a good time to send collected statistics to the
|
|
|
|
* collector, and to update the PS stats display. We avoid doing
|
2005-10-15 02:49:52 +00:00
|
|
|
* those every time through the message loop because it'd slow down
|
|
|
|
* processing of batched messages, and because we don't want to report
|
2014-05-06 12:12:18 -04:00
|
|
|
* uncommitted updates (that confuses autovacuum). The notification
|
2010-02-16 22:34:57 +00:00
|
|
|
* processor wants a call too, if we are not in a transaction block.
|
1998-05-06 23:51:16 +00:00
|
|
|
*/
|
2005-12-30 23:49:48 +00:00
|
|
|
if (send_ready_for_query)
|
2003-04-19 00:02:30 +00:00
|
|
|
{
|
2010-01-16 10:05:59 +00:00
|
|
|
if (IsAbortedTransactionBlockState())
|
|
|
|
{
|
|
|
|
set_ps_display("idle in transaction (aborted)", false);
|
2012-01-19 14:19:20 +01:00
|
|
|
pgstat_report_activity(STATE_IDLEINTRANSACTION_ABORTED, NULL);
|
2016-03-16 11:30:45 -04:00
|
|
|
|
|
|
|
/* Start the idle-in-transaction timer */
|
|
|
|
if (IdleInTransactionSessionTimeout > 0)
|
|
|
|
{
|
|
|
|
disable_idle_in_transaction_timeout = true;
|
|
|
|
enable_timeout_after(IDLE_IN_TRANSACTION_SESSION_TIMEOUT,
|
|
|
|
IdleInTransactionSessionTimeout);
|
|
|
|
}
|
2010-01-16 10:05:59 +00:00
|
|
|
}
|
|
|
|
else if (IsTransactionOrTransactionBlock())
|
2003-08-12 18:52:38 +00:00
|
|
|
{
|
2006-06-27 22:16:44 +00:00
|
|
|
set_ps_display("idle in transaction", false);
|
2012-01-19 14:19:20 +01:00
|
|
|
pgstat_report_activity(STATE_IDLEINTRANSACTION, NULL);
|
2016-03-16 11:30:45 -04:00
|
|
|
|
|
|
|
/* Start the idle-in-transaction timer */
|
|
|
|
if (IdleInTransactionSessionTimeout > 0)
|
|
|
|
{
|
|
|
|
disable_idle_in_transaction_timeout = true;
|
|
|
|
enable_timeout_after(IDLE_IN_TRANSACTION_SESSION_TIMEOUT,
|
|
|
|
IdleInTransactionSessionTimeout);
|
|
|
|
}
|
2003-08-12 18:52:38 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2010-02-16 22:34:57 +00:00
|
|
|
ProcessCompletedNotifies();
|
2008-05-15 00:17:41 +00:00
|
|
|
pgstat_report_stat(false);
|
2004-11-20 00:48:58 +00:00
|
|
|
|
2006-06-27 22:16:44 +00:00
|
|
|
set_ps_display("idle", false);
|
2012-01-19 14:19:20 +01:00
|
|
|
pgstat_report_activity(STATE_IDLE, NULL);
|
2003-08-12 18:52:38 +00:00
|
|
|
}
|
2001-06-22 19:16:24 +00:00
|
|
|
|
2003-08-12 18:52:38 +00:00
|
|
|
ReadyForQuery(whereToSendOutput);
|
2005-12-30 23:49:48 +00:00
|
|
|
send_ready_for_query = false;
|
2001-06-22 19:16:24 +00:00
|
|
|
}
|
2001-02-18 04:28:31 +00:00
|
|
|
|
2001-03-22 06:16:21 +00:00
|
|
|
/*
|
2005-10-15 02:49:52 +00:00
|
|
|
* (2) Allow asynchronous signals to be executed immediately if they
|
|
|
|
* come in while we are waiting for client input. (This must be
|
|
|
|
* conditional since we don't want, say, reads on behalf of COPY FROM
|
|
|
|
* STDIN doing the same thing.)
|
1998-10-06 02:40:09 +00:00
|
|
|
*/
|
2005-06-02 21:03:25 +00:00
|
|
|
DoingCommandRead = true;
|
2001-01-14 05:08:17 +00:00
|
|
|
|
2001-03-22 06:16:21 +00:00
|
|
|
/*
|
|
|
|
* (3) read a command (loop blocks here)
|
1997-09-07 05:04:48 +00:00
|
|
|
*/
|
2004-07-28 22:05:47 +00:00
|
|
|
firstchar = ReadCommand(&input_message);
|
1998-05-19 18:05:58 +00:00
|
|
|
|
2001-03-22 06:16:21 +00:00
|
|
|
/*
|
|
|
|
* (4) disable async signal conditions again.
|
Be more careful to not lose sync in the FE/BE protocol.
If any error occurred while we were in the middle of reading a protocol
message from the client, we could lose sync, and incorrectly try to
interpret a part of another message as a new protocol message. That will
usually lead to an "invalid frontend message" error that terminates the
connection. However, this is a security issue because an attacker might
be able to deliberately cause an error, inject a Query message in what's
supposed to be just user data, and have the server execute it.
We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other
operations that could ereport(ERROR) in the middle of processing a message,
but a query cancel interrupt or statement timeout could nevertheless cause
it to happen. Also, the V2 fastpath and COPY handling were not so careful.
It's very difficult to recover in the V2 COPY protocol, so we will just
terminate the connection on error. In practice, that's what happened
previously anyway, as we lost protocol sync.
To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set
whenever we're in the middle of reading a message. When it's set, we cannot
safely ERROR out and continue running, because we might've read only part
of a message. PqCommReadingMsg acts somewhat similarly to critical sections
in that if an error occurs while it's set, the error handler will force the
connection to be terminated, as if the error was FATAL. It's not
implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted
to PANIC in critical sections, because we want to be able to use
PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes
advantage of that to prevent an OOM error from terminating the connection.
To prevent unnecessary connection terminations, add a holdoff mechanism
similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel
interrupts, but still allow die interrupts. The rules on which interrupts
are processed when are now a bit more complicated, so refactor
ProcessInterrupts() and the calls to it in signal handlers so that the
signal handlers always call it if ImmediateInterruptOK is set, and
ProcessInterrupts() can decide to not do anything if the other conditions
are not met.
Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund.
Backpatch to all supported versions.
Security: CVE-2015-0244
2015-02-02 17:08:45 +02:00
|
|
|
*
|
|
|
|
* Query cancel is supposed to be a no-op when there is no query in
|
|
|
|
* progress, so if a query cancel arrived while we were idle, just
|
|
|
|
* reset QueryCancelPending. ProcessInterrupts() has that effect when
|
|
|
|
* it's called when DoingCommandRead is set, so check for interrupts
|
|
|
|
* before resetting DoingCommandRead.
|
1998-10-06 02:40:09 +00:00
|
|
|
*/
|
Be more careful to not lose sync in the FE/BE protocol.
If any error occurred while we were in the middle of reading a protocol
message from the client, we could lose sync, and incorrectly try to
interpret a part of another message as a new protocol message. That will
usually lead to an "invalid frontend message" error that terminates the
connection. However, this is a security issue because an attacker might
be able to deliberately cause an error, inject a Query message in what's
supposed to be just user data, and have the server execute it.
We were quite careful to not have CHECK_FOR_INTERRUPTS() calls or other
operations that could ereport(ERROR) in the middle of processing a message,
but a query cancel interrupt or statement timeout could nevertheless cause
it to happen. Also, the V2 fastpath and COPY handling were not so careful.
It's very difficult to recover in the V2 COPY protocol, so we will just
terminate the connection on error. In practice, that's what happened
previously anyway, as we lost protocol sync.
To fix, add a new variable in pqcomm.c, PqCommReadingMsg, that is set
whenever we're in the middle of reading a message. When it's set, we cannot
safely ERROR out and continue running, because we might've read only part
of a message. PqCommReadingMsg acts somewhat similarly to critical sections
in that if an error occurs while it's set, the error handler will force the
connection to be terminated, as if the error was FATAL. It's not
implemented by promoting ERROR to FATAL in elog.c, like ERROR is promoted
to PANIC in critical sections, because we want to be able to use
PG_TRY/CATCH to recover and regain protocol sync. pq_getmessage() takes
advantage of that to prevent an OOM error from terminating the connection.
To prevent unnecessary connection terminations, add a holdoff mechanism
similar to HOLD/RESUME_INTERRUPTS() that can be used hold off query cancel
interrupts, but still allow die interrupts. The rules on which interrupts
are processed when are now a bit more complicated, so refactor
ProcessInterrupts() and the calls to it in signal handlers so that the
signal handlers always call it if ImmediateInterruptOK is set, and
ProcessInterrupts() can decide to not do anything if the other conditions
are not met.
Reported by Emil Lenngren. Patch reviewed by Noah Misch and Andres Freund.
Backpatch to all supported versions.
Security: CVE-2015-0244
2015-02-02 17:08:45 +02:00
|
|
|
CHECK_FOR_INTERRUPTS();
|
2005-06-02 21:03:25 +00:00
|
|
|
DoingCommandRead = false;
|
1998-10-06 02:40:09 +00:00
|
|
|
|
2001-03-22 06:16:21 +00:00
|
|
|
/*
|
2016-03-16 11:30:45 -04:00
|
|
|
* (5) turn off the idle-in-transaction timeout
|
|
|
|
*/
|
|
|
|
if (disable_idle_in_transaction_timeout)
|
|
|
|
{
|
|
|
|
disable_timeout(IDLE_IN_TRANSACTION_SESSION_TIMEOUT, false);
|
|
|
|
disable_idle_in_transaction_timeout = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* (6) check for any other interesting events that happened while we
|
2005-10-15 02:49:52 +00:00
|
|
|
* slept.
|
2000-10-24 21:33:52 +00:00
|
|
|
*/
|
2017-06-05 18:53:41 -07:00
|
|
|
if (ConfigReloadPending)
|
2000-10-24 21:33:52 +00:00
|
|
|
{
|
2017-06-05 18:53:41 -07:00
|
|
|
ConfigReloadPending = false;
|
2000-10-24 21:33:52 +00:00
|
|
|
ProcessConfigFile(PGC_SIGHUP);
|
|
|
|
}
|
|
|
|
|
2001-03-22 06:16:21 +00:00
|
|
|
/*
|
2016-03-16 11:30:45 -04:00
|
|
|
* (7) process the command. But ignore it if we're skipping till
|
2003-08-04 00:43:34 +00:00
|
|
|
* Sync.
|
1998-10-02 01:14:14 +00:00
|
|
|
*/
|
2003-05-14 18:40:37 +00:00
|
|
|
if (ignore_till_sync && firstchar != EOF)
|
2003-05-05 00:44:56 +00:00
|
|
|
continue;
|
|
|
|
|
1997-09-07 05:04:48 +00:00
|
|
|
switch (firstchar)
|
|
|
|
{
|
2003-04-19 00:02:30 +00:00
|
|
|
case 'Q': /* simple query */
|
2003-04-27 20:09:44 +00:00
|
|
|
{
|
2003-05-05 00:44:56 +00:00
|
|
|
const char *query_string;
|
|
|
|
|
2006-06-20 22:52:00 +00:00
|
|
|
/* Set statement_timestamp() */
|
|
|
|
SetCurrentStatementStartTimestamp();
|
|
|
|
|
2003-08-12 18:52:38 +00:00
|
|
|
query_string = pq_getmsgstring(&input_message);
|
|
|
|
pq_getmsgend(&input_message);
|
2003-04-27 20:09:44 +00:00
|
|
|
|
2012-10-05 17:13:07 +03:00
|
|
|
if (am_walsender)
|
2017-03-23 08:36:36 -04:00
|
|
|
{
|
|
|
|
if (!exec_replication_command(query_string))
|
|
|
|
exec_simple_query(query_string);
|
|
|
|
}
|
2012-10-05 17:13:07 +03:00
|
|
|
else
|
|
|
|
exec_simple_query(query_string);
|
2003-04-19 00:02:30 +00:00
|
|
|
|
2005-12-30 23:49:48 +00:00
|
|
|
send_ready_for_query = true;
|
2003-04-27 20:09:44 +00:00
|
|
|
}
|
2003-04-19 00:02:30 +00:00
|
|
|
break;
|
|
|
|
|
2003-05-05 00:44:56 +00:00
|
|
|
case 'P': /* parse */
|
|
|
|
{
|
|
|
|
const char *stmt_name;
|
|
|
|
const char *query_string;
|
|
|
|
int numParams;
|
|
|
|
Oid *paramTypes = NULL;
|
|
|
|
|
2012-10-05 17:13:07 +03:00
|
|
|
forbidden_in_wal_sender(firstchar);
|
|
|
|
|
2006-06-20 22:52:00 +00:00
|
|
|
/* Set statement_timestamp() */
|
|
|
|
SetCurrentStatementStartTimestamp();
|
|
|
|
|
2003-08-12 18:52:38 +00:00
|
|
|
stmt_name = pq_getmsgstring(&input_message);
|
|
|
|
query_string = pq_getmsgstring(&input_message);
|
|
|
|
numParams = pq_getmsgint(&input_message, 2);
|
2003-05-05 00:44:56 +00:00
|
|
|
if (numParams > 0)
|
|
|
|
{
|
2003-08-04 00:43:34 +00:00
|
|
|
int i;
|
2003-05-05 00:44:56 +00:00
|
|
|
|
|
|
|
paramTypes = (Oid *) palloc(numParams * sizeof(Oid));
|
|
|
|
for (i = 0; i < numParams; i++)
|
2003-08-12 18:52:38 +00:00
|
|
|
paramTypes[i] = pq_getmsgint(&input_message, 4);
|
2003-05-05 00:44:56 +00:00
|
|
|
}
|
2003-08-12 18:52:38 +00:00
|
|
|
pq_getmsgend(&input_message);
|
2003-05-05 00:44:56 +00:00
|
|
|
|
|
|
|
exec_parse_message(query_string, stmt_name,
|
|
|
|
paramTypes, numParams);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 'B': /* bind */
|
2012-10-05 17:13:07 +03:00
|
|
|
forbidden_in_wal_sender(firstchar);
|
|
|
|
|
2006-06-20 22:52:00 +00:00
|
|
|
/* Set statement_timestamp() */
|
|
|
|
SetCurrentStatementStartTimestamp();
|
2003-08-04 00:43:34 +00:00
|
|
|
|
2003-05-05 00:44:56 +00:00
|
|
|
/*
|
2005-10-15 02:49:52 +00:00
|
|
|
* this message is complex enough that it seems best to put
|
|
|
|
* the field extraction out-of-line
|
2003-05-05 00:44:56 +00:00
|
|
|
*/
|
2003-08-12 18:52:38 +00:00
|
|
|
exec_bind_message(&input_message);
|
2003-05-05 00:44:56 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case 'E': /* execute */
|
|
|
|
{
|
|
|
|
const char *portal_name;
|
2006-09-03 03:19:45 +00:00
|
|
|
int max_rows;
|
2003-05-05 00:44:56 +00:00
|
|
|
|
2012-10-05 17:13:07 +03:00
|
|
|
forbidden_in_wal_sender(firstchar);
|
|
|
|
|
2006-06-20 22:52:00 +00:00
|
|
|
/* Set statement_timestamp() */
|
|
|
|
SetCurrentStatementStartTimestamp();
|
|
|
|
|
2003-08-12 18:52:38 +00:00
|
|
|
portal_name = pq_getmsgstring(&input_message);
|
2006-09-03 03:19:45 +00:00
|
|
|
max_rows = pq_getmsgint(&input_message, 4);
|
2003-08-12 18:52:38 +00:00
|
|
|
pq_getmsgend(&input_message);
|
2003-05-05 00:44:56 +00:00
|
|
|
|
2003-05-08 18:16:37 +00:00
|
|
|
exec_execute_message(portal_name, max_rows);
|
2003-05-05 00:44:56 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2003-04-19 00:02:30 +00:00
|
|
|
case 'F': /* fastpath function call */
|
2012-10-05 17:13:07 +03:00
|
|
|
forbidden_in_wal_sender(firstchar);
|
|
|
|
|
2006-06-20 22:52:00 +00:00
|
|
|
/* Set statement_timestamp() */
|
|
|
|
SetCurrentStatementStartTimestamp();
|
|
|
|
|
2010-12-17 18:51:22 -03:00
|
|
|
/* Report query to various monitoring facilities. */
|
2012-01-19 14:19:20 +01:00
|
|
|
pgstat_report_activity(STATE_FASTPATH, NULL);
|
2010-12-17 18:51:22 -03:00
|
|
|
set_ps_display("<FASTPATH>", false);
|
2001-06-22 19:16:24 +00:00
|
|
|
|
1997-09-08 02:41:22 +00:00
|
|
|
/* start an xact for this function invocation */
|
2006-06-20 22:52:00 +00:00
|
|
|
start_xact_command();
|
1998-10-02 01:14:14 +00:00
|
|
|
|
2006-06-11 15:49:28 +00:00
|
|
|
/*
|
|
|
|
* Note: we may at this point be inside an aborted
|
2006-10-04 00:30:14 +00:00
|
|
|
* transaction. We can't throw error for that until we've
|
|
|
|
* finished reading the function-call message, so
|
2006-06-11 15:49:28 +00:00
|
|
|
* HandleFunctionRequest() must check for it after doing so.
|
|
|
|
* Be careful not to do anything that assumes we're inside a
|
|
|
|
* valid transaction here.
|
|
|
|
*/
|
|
|
|
|
2003-05-09 18:08:48 +00:00
|
|
|
/* switch back to message context */
|
|
|
|
MemoryContextSwitchTo(MessageContext);
|
|
|
|
|
2017-04-06 09:09:39 +03:00
|
|
|
HandleFunctionRequest(&input_message);
|
2000-10-07 00:58:23 +00:00
|
|
|
|
|
|
|
/* commit the function-invocation transaction */
|
2006-06-20 22:52:00 +00:00
|
|
|
finish_xact_command();
|
1997-09-08 02:41:22 +00:00
|
|
|
|
2005-12-30 23:49:48 +00:00
|
|
|
send_ready_for_query = true;
|
1997-09-08 02:41:22 +00:00
|
|
|
break;
|
|
|
|
|
2003-08-04 00:43:34 +00:00
|
|
|
case 'C': /* close */
|
2003-05-05 00:44:56 +00:00
|
|
|
{
|
2003-08-04 00:43:34 +00:00
|
|
|
int close_type;
|
2003-05-05 00:44:56 +00:00
|
|
|
const char *close_target;
|
|
|
|
|
2012-10-05 17:13:07 +03:00
|
|
|
forbidden_in_wal_sender(firstchar);
|
|
|
|
|
2003-08-12 18:52:38 +00:00
|
|
|
close_type = pq_getmsgbyte(&input_message);
|
|
|
|
close_target = pq_getmsgstring(&input_message);
|
|
|
|
pq_getmsgend(&input_message);
|
2003-05-05 00:44:56 +00:00
|
|
|
|
|
|
|
switch (close_type)
|
|
|
|
{
|
|
|
|
case 'S':
|
|
|
|
if (close_target[0] != '\0')
|
|
|
|
DropPreparedStatement(close_target, false);
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* special-case the unnamed statement */
|
2007-03-13 00:33:44 +00:00
|
|
|
drop_unnamed_stmt();
|
2003-05-05 00:44:56 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 'P':
|
|
|
|
{
|
|
|
|
Portal portal;
|
|
|
|
|
|
|
|
portal = GetPortalByName(close_target);
|
|
|
|
if (PortalIsValid(portal))
|
|
|
|
PortalDrop(portal, false);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
2003-07-22 19:00:12 +00:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_PROTOCOL_VIOLATION),
|
2005-10-15 02:49:52 +00:00
|
|
|
errmsg("invalid CLOSE message subtype %d",
|
|
|
|
close_type)));
|
2003-05-05 00:44:56 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2005-11-03 17:11:40 +00:00
|
|
|
if (whereToSendOutput == DestRemote)
|
2003-08-04 00:43:34 +00:00
|
|
|
pq_putemptymessage('3'); /* CloseComplete */
|
2003-05-05 00:44:56 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 'D': /* describe */
|
|
|
|
{
|
2003-08-04 00:43:34 +00:00
|
|
|
int describe_type;
|
2003-05-05 00:44:56 +00:00
|
|
|
const char *describe_target;
|
|
|
|
|
2012-10-05 17:13:07 +03:00
|
|
|
forbidden_in_wal_sender(firstchar);
|
|
|
|
|
2006-06-20 22:52:00 +00:00
|
|
|
/* Set statement_timestamp() (needed for xact) */
|
|
|
|
SetCurrentStatementStartTimestamp();
|
|
|
|
|
2003-08-12 18:52:38 +00:00
|
|
|
describe_type = pq_getmsgbyte(&input_message);
|
|
|
|
describe_target = pq_getmsgstring(&input_message);
|
|
|
|
pq_getmsgend(&input_message);
|
2003-05-05 00:44:56 +00:00
|
|
|
|
|
|
|
switch (describe_type)
|
|
|
|
{
|
|
|
|
case 'S':
|
|
|
|
exec_describe_statement_message(describe_target);
|
|
|
|
break;
|
|
|
|
case 'P':
|
|
|
|
exec_describe_portal_message(describe_target);
|
|
|
|
break;
|
|
|
|
default:
|
2003-07-22 19:00:12 +00:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_PROTOCOL_VIOLATION),
|
2005-10-15 02:49:52 +00:00
|
|
|
errmsg("invalid DESCRIBE message subtype %d",
|
|
|
|
describe_type)));
|
2003-05-05 00:44:56 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2003-08-04 00:43:34 +00:00
|
|
|
case 'H': /* flush */
|
2003-08-12 18:52:38 +00:00
|
|
|
pq_getmsgend(&input_message);
|
2005-11-03 17:11:40 +00:00
|
|
|
if (whereToSendOutput == DestRemote)
|
2003-05-05 00:44:56 +00:00
|
|
|
pq_flush();
|
|
|
|
break;
|
|
|
|
|
2003-08-04 00:43:34 +00:00
|
|
|
case 'S': /* sync */
|
2003-08-12 18:52:38 +00:00
|
|
|
pq_getmsgend(&input_message);
|
2006-06-20 22:52:00 +00:00
|
|
|
finish_xact_command();
|
2005-12-30 23:49:48 +00:00
|
|
|
send_ready_for_query = true;
|
2003-05-05 00:44:56 +00:00
|
|
|
break;
|
|
|
|
|
2002-03-04 01:46:04 +00:00
|
|
|
/*
|
2005-10-15 02:49:52 +00:00
|
|
|
* 'X' means that the frontend is closing down the socket. EOF
|
|
|
|
* means unexpected loss of frontend connection. Either way,
|
|
|
|
* perform normal shutdown.
|
1997-09-08 02:41:22 +00:00
|
|
|
*/
|
|
|
|
case 'X':
|
1999-07-22 02:40:07 +00:00
|
|
|
case EOF:
|
2002-09-04 20:31:48 +00:00
|
|
|
|
2002-03-04 01:46:04 +00:00
|
|
|
/*
|
2005-10-15 02:49:52 +00:00
|
|
|
* Reset whereToSendOutput to prevent ereport from attempting
|
|
|
|
* to send any more messages to client.
|
2002-03-04 01:46:04 +00:00
|
|
|
*/
|
2005-11-03 17:11:40 +00:00
|
|
|
if (whereToSendOutput == DestRemote)
|
|
|
|
whereToSendOutput = DestNone;
|
2001-03-22 04:01:46 +00:00
|
|
|
|
2000-12-18 00:44:50 +00:00
|
|
|
/*
|
|
|
|
* NOTE: if you are tempted to add more code here, DON'T!
|
2001-03-22 04:01:46 +00:00
|
|
|
* Whatever you had in mind to do should be set up as an
|
2005-10-15 02:49:52 +00:00
|
|
|
* on_proc_exit or on_shmem_exit callback, instead. Otherwise
|
|
|
|
* it will fail to be called during other backend-shutdown
|
|
|
|
* scenarios.
|
2000-12-18 00:44:50 +00:00
|
|
|
*/
|
2004-07-28 22:05:47 +00:00
|
|
|
proc_exit(0);
|
1997-09-08 02:41:22 +00:00
|
|
|
|
2003-08-04 00:43:34 +00:00
|
|
|
case 'd': /* copy data */
|
|
|
|
case 'c': /* copy done */
|
|
|
|
case 'f': /* copy fail */
|
|
|
|
|
2003-04-19 00:02:30 +00:00
|
|
|
/*
|
2003-08-04 00:43:34 +00:00
|
|
|
* Accept but ignore these messages, per protocol spec; we
|
2005-10-15 02:49:52 +00:00
|
|
|
* probably got here because a COPY failed, and the frontend
|
|
|
|
* is still sending data.
|
2003-04-19 00:02:30 +00:00
|
|
|
*/
|
|
|
|
break;
|
|
|
|
|
1997-09-08 02:41:22 +00:00
|
|
|
default:
|
2003-07-22 19:00:12 +00:00
|
|
|
ereport(FATAL,
|
|
|
|
(errcode(ERRCODE_PROTOCOL_VIOLATION),
|
|
|
|
errmsg("invalid frontend message type %d",
|
|
|
|
firstchar)));
|
1997-09-07 05:04:48 +00:00
|
|
|
}
|
2000-10-07 00:58:23 +00:00
|
|
|
} /* end of input-reading loop */
|
1996-07-09 06:22:35 +00:00
|
|
|
}
|
|
|
|
|
2012-10-05 17:13:07 +03:00
|
|
|
/*
|
|
|
|
* Throw an error if we're a WAL sender process.
|
|
|
|
*
|
|
|
|
* This is used to forbid anything else than simple query protocol messages
|
|
|
|
* in a WAL sender process. 'firstchar' specifies what kind of a forbidden
|
|
|
|
* message was received, and is used to construct the error message.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
forbidden_in_wal_sender(char firstchar)
|
|
|
|
{
|
|
|
|
if (am_walsender)
|
|
|
|
{
|
|
|
|
if (firstchar == 'F')
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_PROTOCOL_VIOLATION),
|
|
|
|
errmsg("fastpath function calls not supported in a replication connection")));
|
|
|
|
else
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_PROTOCOL_VIOLATION),
|
|
|
|
errmsg("extended query protocol not supported in a replication connection")));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-10-07 19:25:29 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Obtain platform stack depth limit (in bytes)
|
|
|
|
*
|
2010-11-06 16:50:18 -04:00
|
|
|
* Return -1 if unknown
|
2006-10-07 19:25:29 +00:00
|
|
|
*/
|
|
|
|
long
|
|
|
|
get_stack_depth_rlimit(void)
|
|
|
|
{
|
|
|
|
#if defined(HAVE_GETRLIMIT) && defined(RLIMIT_STACK)
|
|
|
|
static long val = 0;
|
|
|
|
|
|
|
|
/* This won't change after process launch, so check just once */
|
|
|
|
if (val == 0)
|
|
|
|
{
|
|
|
|
struct rlimit rlim;
|
|
|
|
|
|
|
|
if (getrlimit(RLIMIT_STACK, &rlim) < 0)
|
|
|
|
val = -1;
|
|
|
|
else if (rlim.rlim_cur == RLIM_INFINITY)
|
2010-11-06 16:50:18 -04:00
|
|
|
val = LONG_MAX;
|
|
|
|
/* rlim_cur is probably of an unsigned type, so check for overflow */
|
|
|
|
else if (rlim.rlim_cur >= LONG_MAX)
|
|
|
|
val = LONG_MAX;
|
2006-10-07 19:25:29 +00:00
|
|
|
else
|
|
|
|
val = rlim.rlim_cur;
|
|
|
|
}
|
|
|
|
return val;
|
2007-11-15 21:14:46 +00:00
|
|
|
#else /* no getrlimit */
|
2006-10-08 17:15:34 +00:00
|
|
|
#if defined(WIN32) || defined(__CYGWIN__)
|
|
|
|
/* On Windows we set the backend stack size in src/backend/Makefile */
|
|
|
|
return WIN32_STACK_RLIMIT;
|
2007-11-15 21:14:46 +00:00
|
|
|
#else /* not windows ... give up */
|
2006-10-07 19:25:29 +00:00
|
|
|
return -1;
|
|
|
|
#endif
|
2006-10-08 17:15:34 +00:00
|
|
|
#endif
|
2006-10-07 19:25:29 +00:00
|
|
|
}
|
|
|
|
|
1996-07-09 06:22:35 +00:00
|
|
|
|
2003-10-19 23:43:51 +00:00
|
|
|
static struct rusage Save_r;
|
|
|
|
static struct timeval Save_t;
|
1996-07-09 06:22:35 +00:00
|
|
|
|
|
|
|
void
|
1996-11-10 03:06:38 +00:00
|
|
|
ResetUsage(void)
|
1996-07-09 06:22:35 +00:00
|
|
|
{
|
1997-09-07 05:04:48 +00:00
|
|
|
getrusage(RUSAGE_SELF, &Save_r);
|
2002-10-24 23:19:13 +00:00
|
|
|
gettimeofday(&Save_t, NULL);
|
1996-07-09 06:22:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2001-11-10 23:51:14 +00:00
|
|
|
ShowUsage(const char *title)
|
1996-07-09 06:22:35 +00:00
|
|
|
{
|
2001-11-10 23:51:14 +00:00
|
|
|
StringInfoData str;
|
1997-09-08 02:41:22 +00:00
|
|
|
struct timeval user,
|
|
|
|
sys;
|
|
|
|
struct timeval elapse_t;
|
|
|
|
struct rusage r;
|
1997-09-07 05:04:48 +00:00
|
|
|
|
|
|
|
getrusage(RUSAGE_SELF, &r);
|
2002-10-24 23:19:13 +00:00
|
|
|
gettimeofday(&elapse_t, NULL);
|
2001-11-10 23:51:14 +00:00
|
|
|
memcpy((char *) &user, (char *) &r.ru_utime, sizeof(user));
|
|
|
|
memcpy((char *) &sys, (char *) &r.ru_stime, sizeof(sys));
|
1997-09-07 05:04:48 +00:00
|
|
|
if (elapse_t.tv_usec < Save_t.tv_usec)
|
|
|
|
{
|
|
|
|
elapse_t.tv_sec--;
|
|
|
|
elapse_t.tv_usec += 1000000;
|
|
|
|
}
|
|
|
|
if (r.ru_utime.tv_usec < Save_r.ru_utime.tv_usec)
|
|
|
|
{
|
|
|
|
r.ru_utime.tv_sec--;
|
|
|
|
r.ru_utime.tv_usec += 1000000;
|
|
|
|
}
|
|
|
|
if (r.ru_stime.tv_usec < Save_r.ru_stime.tv_usec)
|
|
|
|
{
|
|
|
|
r.ru_stime.tv_sec--;
|
|
|
|
r.ru_stime.tv_usec += 1000000;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* the only stats we don't show here are for memory usage -- i can't
|
2005-10-15 02:49:52 +00:00
|
|
|
* figure out how to interpret the relevant fields in the rusage struct,
|
|
|
|
* and they change names across o/s platforms, anyway. if you can figure
|
|
|
|
* out what the entries mean, you can somehow extract resident set size,
|
|
|
|
* shared text size, and unshared data and stack sizes.
|
1997-09-07 05:04:48 +00:00
|
|
|
*/
|
2001-11-10 23:51:14 +00:00
|
|
|
initStringInfo(&str);
|
1997-09-07 05:04:48 +00:00
|
|
|
|
2013-10-31 10:55:59 -04:00
|
|
|
appendStringInfoString(&str, "! system usage stats:\n");
|
2001-11-10 23:51:14 +00:00
|
|
|
appendStringInfo(&str,
|
Account for catalog snapshot in PGXACT->xmin updates.
The CatalogSnapshot was not plugged into SnapshotResetXmin()'s accounting
for whether MyPgXact->xmin could be cleared or advanced. In normal
transactions this was masked by the fact that the transaction snapshot
would be older, but during backend startup and certain utility commands
it was possible to re-use the CatalogSnapshot after MyPgXact->xmin had
been cleared, meaning that recently-deleted rows could be pruned even
though this snapshot could still see them, causing unexpected catalog
lookup failures. This effect appears to be the explanation for a recent
failure on buildfarm member piculet.
To fix, add the CatalogSnapshot to the RegisteredSnapshots heap whenever
it is valid.
In the previous logic, it was possible for the CatalogSnapshot to remain
valid across waits for client input, but with this change that would mean
it delays advance of global xmin in cases where it did not before. To
avoid possibly causing new table-bloat problems with clients that sit idle
for long intervals, add code to invalidate the CatalogSnapshot before
waiting for client input. (When the backend is busy, it's unlikely that
the CatalogSnapshot would be the oldest snap for very long, so we don't
worry about forcing early invalidation of it otherwise.)
In passing, remove the CatalogSnapshotStale flag in favor of using
"CatalogSnapshot != NULL" to represent validity, as we do for the other
special snapshots in snapmgr.c. And improve some obsolete comments.
No regression test because I don't know a deterministic way to cause this
failure. But the stress test shown in the original discussion provokes
"cache lookup failed for relation 1255" within a few dozen seconds for me.
Back-patch to 9.4 where MVCC catalog scans were introduced. (Note: it's
quite easy to produce similar failures with the same test case in branches
before 9.4. But MVCC catalog scans were supposed to fix that.)
Discussion: <16447.1478818294@sss.pgh.pa.us>
2016-11-15 15:55:35 -05:00
|
|
|
"!\t%ld.%06ld s user, %ld.%06ld s system, %ld.%06ld s elapsed\n",
|
2002-10-24 23:19:13 +00:00
|
|
|
(long) (r.ru_utime.tv_sec - Save_r.ru_utime.tv_sec),
|
2005-10-15 02:49:52 +00:00
|
|
|
(long) (r.ru_utime.tv_usec - Save_r.ru_utime.tv_usec),
|
2002-10-24 23:19:13 +00:00
|
|
|
(long) (r.ru_stime.tv_sec - Save_r.ru_stime.tv_sec),
|
2016-10-19 12:00:00 -04:00
|
|
|
(long) (r.ru_stime.tv_usec - Save_r.ru_stime.tv_usec),
|
|
|
|
(long) (elapse_t.tv_sec - Save_t.tv_sec),
|
|
|
|
(long) (elapse_t.tv_usec - Save_t.tv_usec));
|
2001-11-10 23:51:14 +00:00
|
|
|
appendStringInfo(&str,
|
2016-10-19 12:00:00 -04:00
|
|
|
"!\t[%ld.%06ld s user, %ld.%06ld s system total]\n",
|
2002-10-24 23:19:13 +00:00
|
|
|
(long) user.tv_sec,
|
|
|
|
(long) user.tv_usec,
|
|
|
|
(long) sys.tv_sec,
|
|
|
|
(long) sys.tv_usec);
|
2000-12-18 18:45:05 +00:00
|
|
|
#if defined(HAVE_GETRUSAGE)
|
2001-11-10 23:51:14 +00:00
|
|
|
appendStringInfo(&str,
|
2002-09-04 20:31:48 +00:00
|
|
|
"!\t%ld/%ld [%ld/%ld] filesystem blocks in/out\n",
|
|
|
|
r.ru_inblock - Save_r.ru_inblock,
|
1997-09-07 05:04:48 +00:00
|
|
|
/* they only drink coffee at dec */
|
2002-09-04 20:31:48 +00:00
|
|
|
r.ru_oublock - Save_r.ru_oublock,
|
|
|
|
r.ru_inblock, r.ru_oublock);
|
2001-11-10 23:51:14 +00:00
|
|
|
appendStringInfo(&str,
|
2005-10-15 02:49:52 +00:00
|
|
|
"!\t%ld/%ld [%ld/%ld] page faults/reclaims, %ld [%ld] swaps\n",
|
2002-09-04 20:31:48 +00:00
|
|
|
r.ru_majflt - Save_r.ru_majflt,
|
|
|
|
r.ru_minflt - Save_r.ru_minflt,
|
|
|
|
r.ru_majflt, r.ru_minflt,
|
|
|
|
r.ru_nswap - Save_r.ru_nswap,
|
|
|
|
r.ru_nswap);
|
2001-11-10 23:51:14 +00:00
|
|
|
appendStringInfo(&str,
|
2005-10-15 02:49:52 +00:00
|
|
|
"!\t%ld [%ld] signals rcvd, %ld/%ld [%ld/%ld] messages rcvd/sent\n",
|
2002-09-04 20:31:48 +00:00
|
|
|
r.ru_nsignals - Save_r.ru_nsignals,
|
|
|
|
r.ru_nsignals,
|
|
|
|
r.ru_msgrcv - Save_r.ru_msgrcv,
|
|
|
|
r.ru_msgsnd - Save_r.ru_msgsnd,
|
|
|
|
r.ru_msgrcv, r.ru_msgsnd);
|
2001-11-10 23:51:14 +00:00
|
|
|
appendStringInfo(&str,
|
2005-10-15 02:49:52 +00:00
|
|
|
"!\t%ld/%ld [%ld/%ld] voluntary/involuntary context switches\n",
|
2002-09-04 20:31:48 +00:00
|
|
|
r.ru_nvcsw - Save_r.ru_nvcsw,
|
|
|
|
r.ru_nivcsw - Save_r.ru_nivcsw,
|
|
|
|
r.ru_nvcsw, r.ru_nivcsw);
|
2001-11-05 17:46:40 +00:00
|
|
|
#endif /* HAVE_GETRUSAGE */
|
2001-11-10 23:51:14 +00:00
|
|
|
|
|
|
|
/* remove trailing newline */
|
2002-09-04 20:31:48 +00:00
|
|
|
if (str.data[str.len - 1] == '\n')
|
2001-11-10 23:51:14 +00:00
|
|
|
str.data[--str.len] = '\0';
|
|
|
|
|
2003-07-22 19:00:12 +00:00
|
|
|
ereport(LOG,
|
|
|
|
(errmsg_internal("%s", title),
|
2011-07-16 14:21:12 -04:00
|
|
|
errdetail_internal("%s", str.data)));
|
2001-11-10 23:51:14 +00:00
|
|
|
|
|
|
|
pfree(str.data);
|
1996-07-09 06:22:35 +00:00
|
|
|
}
|
2004-02-17 03:54:57 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* on_proc_exit handler to log end of session
|
|
|
|
*/
|
2004-08-29 05:07:03 +00:00
|
|
|
static void
|
2004-02-17 04:09:26 +00:00
|
|
|
log_disconnections(int code, Datum arg)
|
2004-02-17 03:54:57 +00:00
|
|
|
{
|
2005-10-15 02:49:52 +00:00
|
|
|
Port *port = MyProcPort;
|
2006-06-20 22:52:00 +00:00
|
|
|
long secs;
|
|
|
|
int usecs;
|
|
|
|
int msecs;
|
2005-10-15 02:49:52 +00:00
|
|
|
int hours,
|
|
|
|
minutes,
|
|
|
|
seconds;
|
2004-02-17 03:54:57 +00:00
|
|
|
|
2006-06-20 22:52:00 +00:00
|
|
|
TimestampDifference(port->SessionStartTime,
|
|
|
|
GetCurrentTimestamp(),
|
|
|
|
&secs, &usecs);
|
|
|
|
msecs = usecs / 1000;
|
2004-02-17 03:54:57 +00:00
|
|
|
|
2006-06-20 22:52:00 +00:00
|
|
|
hours = secs / SECS_PER_HOUR;
|
|
|
|
secs %= SECS_PER_HOUR;
|
|
|
|
minutes = secs / SECS_PER_MINUTE;
|
|
|
|
seconds = secs % SECS_PER_MINUTE;
|
2004-02-17 03:54:57 +00:00
|
|
|
|
2005-10-05 23:46:06 +00:00
|
|
|
ereport(LOG,
|
2006-06-20 22:52:00 +00:00
|
|
|
(errmsg("disconnection: session time: %d:%02d:%02d.%03d "
|
2005-10-05 23:46:06 +00:00
|
|
|
"user=%s database=%s host=%s%s%s",
|
2006-06-20 22:52:00 +00:00
|
|
|
hours, minutes, seconds, msecs,
|
2005-10-05 23:46:06 +00:00
|
|
|
port->user_name, port->database_name, port->remote_host,
|
2006-10-04 00:30:14 +00:00
|
|
|
port->remote_port[0] ? " port=" : "", port->remote_port)));
|
2004-02-17 03:54:57 +00:00
|
|
|
}
|