2010-05-12 02:19:11 +00:00
|
|
|
/*
|
|
|
|
* version.c
|
|
|
|
*
|
|
|
|
* Postgres-version-specific routines
|
2010-07-03 14:23:14 +00:00
|
|
|
*
|
2014-01-07 16:05:30 -05:00
|
|
|
* Copyright (c) 2010-2014, PostgreSQL Global Development Group
|
2010-09-20 22:08:53 +02:00
|
|
|
* contrib/pg_upgrade/version.c
|
2010-05-12 02:19:11 +00:00
|
|
|
*/
|
|
|
|
|
Create libpgcommon, and move pg_malloc et al to it
libpgcommon is a new static library to allow sharing code among the
various frontend programs and backend; this lets us eliminate duplicate
implementations of common routines. We avoid libpgport, because that's
intended as a place for porting issues; per discussion, it seems better
to keep them separate.
The first use case, and the only implemented by this patch, is pg_malloc
and friends, which many frontend programs were already using.
At the same time, we can use this to provide palloc emulation functions
for the frontend; this way, some palloc-using files in the backend can
also be used by the frontend cleanly. To do this, we change palloc() in
the backend to be a function instead of a macro on top of
MemoryContextAlloc(). This was previously believed to cause loss of
performance, but this implementation has been tweaked by Tom and Andres
so that on modern compilers it provides a slight improvement over the
previous one.
This lets us clean up some places that were already with
localized hacks.
Most of the pg_malloc/palloc changes in this patch were authored by
Andres Freund. Zoltán Böszörményi also independently provided a form of
that. libpgcommon infrastructure was authored by Álvaro.
2013-02-12 10:33:40 -03:00
|
|
|
#include "postgres_fe.h"
|
2011-08-26 21:16:24 -04:00
|
|
|
|
2010-05-12 02:19:11 +00:00
|
|
|
#include "pg_upgrade.h"
|
|
|
|
|
2019-10-16 16:08:40 +02:00
|
|
|
#include "catalog/pg_class.h"
|
|
|
|
|
2010-05-12 02:19:11 +00:00
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* new_9_0_populate_pg_largeobject_metadata()
|
|
|
|
* new >= 9.0, old <= 8.4
|
|
|
|
* 9.0 has a new pg_largeobject permission table
|
|
|
|
*/
|
|
|
|
void
|
2011-01-01 12:06:36 -05:00
|
|
|
new_9_0_populate_pg_largeobject_metadata(ClusterInfo *cluster, bool check_mode)
|
2010-05-12 02:19:11 +00:00
|
|
|
{
|
|
|
|
int dbnum;
|
|
|
|
FILE *script = NULL;
|
|
|
|
bool found = false;
|
|
|
|
char output_path[MAXPGPATH];
|
|
|
|
|
2010-10-19 21:38:16 +00:00
|
|
|
prep_status("Checking for large objects");
|
2010-05-12 02:19:11 +00:00
|
|
|
|
2012-03-12 19:47:54 -04:00
|
|
|
snprintf(output_path, sizeof(output_path), "pg_largeobject.sql");
|
2010-05-12 02:19:11 +00:00
|
|
|
|
2011-01-01 12:06:36 -05:00
|
|
|
for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++)
|
2010-05-12 02:19:11 +00:00
|
|
|
{
|
|
|
|
PGresult *res;
|
|
|
|
int i_count;
|
2011-01-01 12:06:36 -05:00
|
|
|
DbInfo *active_db = &cluster->dbarr.dbs[dbnum];
|
|
|
|
PGconn *conn = connectToServer(cluster, active_db->db_name);
|
2010-05-12 02:19:11 +00:00
|
|
|
|
|
|
|
/* find if there are any large objects */
|
2010-10-19 21:38:16 +00:00
|
|
|
res = executeQueryOrDie(conn,
|
2010-05-12 02:19:11 +00:00
|
|
|
"SELECT count(*) "
|
|
|
|
"FROM pg_catalog.pg_largeobject ");
|
|
|
|
|
|
|
|
i_count = PQfnumber(res, "count");
|
|
|
|
if (atoi(PQgetvalue(res, 0, i_count)) != 0)
|
|
|
|
{
|
|
|
|
found = true;
|
|
|
|
if (!check_mode)
|
|
|
|
{
|
2016-08-08 10:07:46 -04:00
|
|
|
PQExpBufferData connectbuf;
|
|
|
|
|
2012-03-12 19:47:54 -04:00
|
|
|
if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
|
2015-11-24 17:18:28 -05:00
|
|
|
pg_fatal("could not open file \"%s\": %s\n", output_path, getErrorText());
|
2016-08-08 10:07:46 -04:00
|
|
|
|
|
|
|
initPQExpBuffer(&connectbuf);
|
|
|
|
appendPsqlMetaConnect(&connectbuf, active_db->db_name);
|
|
|
|
fputs(connectbuf.data, script);
|
|
|
|
termPQExpBuffer(&connectbuf);
|
|
|
|
|
2010-05-12 02:19:11 +00:00
|
|
|
fprintf(script,
|
|
|
|
"SELECT pg_catalog.lo_create(t.loid)\n"
|
|
|
|
"FROM (SELECT DISTINCT loid FROM pg_catalog.pg_largeobject) AS t;\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
PQclear(res);
|
|
|
|
PQfinish(conn);
|
|
|
|
}
|
|
|
|
|
2011-03-08 21:35:42 -05:00
|
|
|
if (script)
|
|
|
|
fclose(script);
|
|
|
|
|
2010-05-12 02:19:11 +00:00
|
|
|
if (found)
|
|
|
|
{
|
2010-10-19 21:38:16 +00:00
|
|
|
report_status(PG_WARNING, "warning");
|
2010-05-12 02:19:11 +00:00
|
|
|
if (check_mode)
|
2010-10-19 21:38:16 +00:00
|
|
|
pg_log(PG_WARNING, "\n"
|
2011-07-12 07:13:51 +03:00
|
|
|
"Your installation contains large objects. The new database has an\n"
|
|
|
|
"additional large object permission table. After upgrading, you will be\n"
|
|
|
|
"given a command to populate the pg_largeobject permission table with\n"
|
|
|
|
"default permissions.\n\n");
|
2010-05-12 02:19:11 +00:00
|
|
|
else
|
2010-10-19 21:38:16 +00:00
|
|
|
pg_log(PG_WARNING, "\n"
|
2011-07-12 07:13:51 +03:00
|
|
|
"Your installation contains large objects. The new database has an\n"
|
|
|
|
"additional large object permission table, so default permissions must be\n"
|
|
|
|
"defined for all large objects. The file\n"
|
|
|
|
" %s\n"
|
|
|
|
"when executed by psql by the database superuser will set the default\n"
|
|
|
|
"permissions.\n\n",
|
2010-05-12 02:19:11 +00:00
|
|
|
output_path);
|
|
|
|
}
|
|
|
|
else
|
2010-10-19 21:38:16 +00:00
|
|
|
check_ok();
|
2010-05-12 02:19:11 +00:00
|
|
|
}
|
2014-05-14 16:26:06 -04:00
|
|
|
|
|
|
|
|
|
|
|
/*
|
2019-11-13 11:35:37 -05:00
|
|
|
* check_for_data_type_usage
|
|
|
|
* Detect whether there are any stored columns depending on the given type
|
|
|
|
*
|
|
|
|
* If so, write a report to the given file name, and return true.
|
|
|
|
*
|
|
|
|
* We check for the type in tables, matviews, and indexes, but not views;
|
|
|
|
* there's no storage involved in a view.
|
2014-05-14 16:26:06 -04:00
|
|
|
*/
|
2019-11-13 11:35:37 -05:00
|
|
|
static bool
|
|
|
|
check_for_data_type_usage(ClusterInfo *cluster, const char *typename,
|
|
|
|
char *output_path)
|
2014-05-14 16:26:06 -04:00
|
|
|
{
|
|
|
|
bool found = false;
|
2019-11-13 11:35:37 -05:00
|
|
|
FILE *script = NULL;
|
|
|
|
int dbnum;
|
2014-05-14 16:26:06 -04:00
|
|
|
|
|
|
|
for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++)
|
|
|
|
{
|
2019-11-13 11:35:37 -05:00
|
|
|
DbInfo *active_db = &cluster->dbarr.dbs[dbnum];
|
|
|
|
PGconn *conn = connectToServer(cluster, active_db->db_name);
|
|
|
|
PQExpBufferData querybuf;
|
2014-05-14 16:26:06 -04:00
|
|
|
PGresult *res;
|
|
|
|
bool db_used = false;
|
|
|
|
int ntups;
|
|
|
|
int rowno;
|
|
|
|
int i_nspname,
|
|
|
|
i_relname,
|
|
|
|
i_attname;
|
|
|
|
|
2019-10-16 13:31:00 +02:00
|
|
|
/*
|
2019-11-13 11:35:37 -05:00
|
|
|
* The type of interest might be wrapped in a domain, array,
|
|
|
|
* composite, or range, and these container types can be nested (to
|
|
|
|
* varying extents depending on server version, but that's not of
|
|
|
|
* concern here). To handle all these cases we need a recursive CTE.
|
2019-10-16 13:31:00 +02:00
|
|
|
*/
|
2019-11-13 11:35:37 -05:00
|
|
|
initPQExpBuffer(&querybuf);
|
|
|
|
appendPQExpBuffer(&querybuf,
|
|
|
|
"WITH RECURSIVE oids AS ( "
|
|
|
|
/* the target type itself */
|
|
|
|
" SELECT '%s'::pg_catalog.regtype AS oid "
|
|
|
|
" UNION ALL "
|
|
|
|
" SELECT * FROM ( "
|
|
|
|
/* inner WITH because we can only reference the CTE once */
|
|
|
|
" WITH x AS (SELECT oid FROM oids) "
|
|
|
|
/* domains on any type selected so far */
|
|
|
|
" SELECT t.oid FROM pg_catalog.pg_type t, x WHERE typbasetype = x.oid AND typtype = 'd' "
|
|
|
|
" UNION ALL "
|
|
|
|
/* arrays over any type selected so far */
|
|
|
|
" SELECT t.oid FROM pg_catalog.pg_type t, x WHERE typelem = x.oid AND typtype = 'b' "
|
|
|
|
" UNION ALL "
|
|
|
|
/* composite types containing any type selected so far */
|
|
|
|
" SELECT t.oid FROM pg_catalog.pg_type t, pg_catalog.pg_class c, pg_catalog.pg_attribute a, x "
|
|
|
|
" WHERE t.typtype = 'c' AND "
|
|
|
|
" t.oid = c.reltype AND "
|
|
|
|
" c.oid = a.attrelid AND "
|
|
|
|
" NOT a.attisdropped AND "
|
|
|
|
" a.atttypid = x.oid ",
|
|
|
|
typename);
|
|
|
|
|
|
|
|
/* Ranges came in in 9.2 */
|
|
|
|
if (GET_MAJOR_VERSION(cluster->major_version) >= 902)
|
|
|
|
appendPQExpBuffer(&querybuf,
|
|
|
|
" UNION ALL "
|
|
|
|
/* ranges containing any type selected so far */
|
|
|
|
" SELECT t.oid FROM pg_catalog.pg_type t, pg_catalog.pg_range r, x "
|
|
|
|
" WHERE t.typtype = 'r' AND r.rngtypid = t.oid AND r.rngsubtype = x.oid");
|
|
|
|
|
|
|
|
appendPQExpBuffer(&querybuf,
|
|
|
|
" ) foo "
|
|
|
|
") "
|
|
|
|
/* now look for stored columns of any such type */
|
|
|
|
"SELECT n.nspname, c.relname, a.attname "
|
|
|
|
"FROM pg_catalog.pg_class c, "
|
|
|
|
" pg_catalog.pg_namespace n, "
|
|
|
|
" pg_catalog.pg_attribute a "
|
|
|
|
"WHERE c.oid = a.attrelid AND "
|
|
|
|
" NOT a.attisdropped AND "
|
|
|
|
" a.atttypid IN (SELECT oid FROM oids) AND "
|
|
|
|
" c.relkind IN ("
|
|
|
|
CppAsString2(RELKIND_RELATION) ", "
|
|
|
|
CppAsString2(RELKIND_MATVIEW) ", "
|
|
|
|
CppAsString2(RELKIND_INDEX) ") AND "
|
|
|
|
" c.relnamespace = n.oid AND "
|
2014-05-14 16:26:06 -04:00
|
|
|
/* exclude possible orphaned temp tables */
|
2019-11-13 11:35:37 -05:00
|
|
|
" n.nspname !~ '^pg_temp_' AND "
|
|
|
|
" n.nspname !~ '^pg_toast_temp_' AND "
|
|
|
|
/* exclude system catalogs, too */
|
|
|
|
" n.nspname NOT IN ('pg_catalog', 'information_schema')");
|
|
|
|
|
|
|
|
res = executeQueryOrDie(conn, "%s", querybuf.data);
|
2014-05-14 16:26:06 -04:00
|
|
|
|
|
|
|
ntups = PQntuples(res);
|
|
|
|
i_nspname = PQfnumber(res, "nspname");
|
|
|
|
i_relname = PQfnumber(res, "relname");
|
|
|
|
i_attname = PQfnumber(res, "attname");
|
|
|
|
for (rowno = 0; rowno < ntups; rowno++)
|
|
|
|
{
|
|
|
|
found = true;
|
|
|
|
if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
|
2015-11-24 17:18:28 -05:00
|
|
|
pg_fatal("could not open file \"%s\": %s\n", output_path, getErrorText());
|
2014-05-14 16:26:06 -04:00
|
|
|
if (!db_used)
|
|
|
|
{
|
2019-11-13 11:35:37 -05:00
|
|
|
fprintf(script, "In database: %s\n", active_db->db_name);
|
2014-05-14 16:26:06 -04:00
|
|
|
db_used = true;
|
|
|
|
}
|
|
|
|
fprintf(script, " %s.%s.%s\n",
|
|
|
|
PQgetvalue(res, rowno, i_nspname),
|
|
|
|
PQgetvalue(res, rowno, i_relname),
|
|
|
|
PQgetvalue(res, rowno, i_attname));
|
|
|
|
}
|
|
|
|
|
|
|
|
PQclear(res);
|
|
|
|
|
2019-11-13 11:35:37 -05:00
|
|
|
termPQExpBuffer(&querybuf);
|
|
|
|
|
2014-05-14 16:26:06 -04:00
|
|
|
PQfinish(conn);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (script)
|
|
|
|
fclose(script);
|
|
|
|
|
2019-11-13 11:35:37 -05:00
|
|
|
return found;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* old_9_3_check_for_line_data_type_usage()
|
|
|
|
* 9.3 -> 9.4
|
|
|
|
* Fully implement the 'line' data type in 9.4, which previously returned
|
|
|
|
* "not enabled" by default and was only functionally enabled with a
|
|
|
|
* compile-time switch; as of 9.4 "line" has a different on-disk
|
|
|
|
* representation format.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
old_9_3_check_for_line_data_type_usage(ClusterInfo *cluster)
|
|
|
|
{
|
|
|
|
char output_path[MAXPGPATH];
|
|
|
|
|
|
|
|
prep_status("Checking for invalid \"line\" user columns");
|
|
|
|
|
|
|
|
snprintf(output_path, sizeof(output_path), "tables_using_line.txt");
|
|
|
|
|
|
|
|
if (check_for_data_type_usage(cluster, "pg_catalog.line", output_path))
|
2014-05-14 16:26:06 -04:00
|
|
|
{
|
|
|
|
pg_log(PG_REPORT, "fatal\n");
|
|
|
|
pg_fatal("Your installation contains the \"line\" data type in user tables. This\n"
|
|
|
|
"data type changed its internal and input/output format between your old\n"
|
|
|
|
"and new clusters so this cluster cannot currently be upgraded. You can\n"
|
|
|
|
"remove the problem tables and restart the upgrade. A list of the problem\n"
|
|
|
|
"columns is in the file:\n"
|
|
|
|
" %s\n\n", output_path);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
check_ok();
|
|
|
|
}
|