1996-07-09 06:22:35 +00:00
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
*
|
1999-02-13 23:22:53 +00:00
|
|
|
* rtree.c
|
1997-09-07 05:04:48 +00:00
|
|
|
* interface routines for the postgres rtree indexed access method.
|
1996-07-09 06:22:35 +00:00
|
|
|
*
|
2002-06-20 20:29:54 +00:00
|
|
|
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
|
2000-01-26 05:58:53 +00:00
|
|
|
* Portions Copyright (c) 1994, Regents of the University of California
|
1996-07-09 06:22:35 +00:00
|
|
|
*
|
|
|
|
*
|
|
|
|
* IDENTIFICATION
|
2003-07-21 20:29:40 +00:00
|
|
|
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtree.c,v 1.78 2003/07/21 20:29:39 tgl Exp $
|
1996-07-09 06:22:35 +00:00
|
|
|
*
|
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
|
1999-07-15 23:04:24 +00:00
|
|
|
#include "postgres.h"
|
1996-11-05 10:54:20 +00:00
|
|
|
|
1999-07-15 23:04:24 +00:00
|
|
|
#include "access/genam.h"
|
1999-07-16 05:00:38 +00:00
|
|
|
#include "access/heapam.h"
|
1999-07-15 23:04:24 +00:00
|
|
|
#include "access/rtree.h"
|
2001-03-07 21:20:26 +00:00
|
|
|
#include "access/xlogutils.h"
|
1999-07-16 05:00:38 +00:00
|
|
|
#include "catalog/index.h"
|
1999-07-15 23:04:24 +00:00
|
|
|
#include "executor/executor.h"
|
1999-09-18 19:08:25 +00:00
|
|
|
#include "miscadmin.h"
|
1996-11-05 10:54:20 +00:00
|
|
|
|
1996-10-31 08:52:54 +00:00
|
|
|
|
2001-03-07 21:20:26 +00:00
|
|
|
/*
|
|
|
|
* XXX We assume that all datatypes indexable in rtrees are pass-by-reference.
|
|
|
|
* To fix this, you'd need to improve the IndexTupleGetDatum() macro, and
|
|
|
|
* do something with the various datum-pfreeing code. However, it's not that
|
|
|
|
* unreasonable an assumption in practice.
|
|
|
|
*/
|
|
|
|
#define IndexTupleGetDatum(itup) \
|
|
|
|
PointerGetDatum(((char *) (itup)) + sizeof(IndexTupleData))
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Space-allocation macros. Note we count the item's line pointer in its size.
|
|
|
|
*/
|
|
|
|
#define RTPageAvailSpace \
|
|
|
|
(BLCKSZ - (sizeof(PageHeaderData) - sizeof(ItemIdData)) \
|
|
|
|
- MAXALIGN(sizeof(RTreePageOpaqueData)))
|
|
|
|
#define IndexTupleTotalSize(itup) \
|
|
|
|
(MAXALIGN(IndexTupleSize(itup)) + sizeof(ItemIdData))
|
|
|
|
#define IndexTupleAttSize(itup) \
|
|
|
|
(IndexTupleSize(itup) - sizeof(IndexTupleData))
|
|
|
|
|
|
|
|
/* results of rtpicksplit() */
|
1997-09-07 05:04:48 +00:00
|
|
|
typedef struct SPLITVEC
|
|
|
|
{
|
1997-09-08 02:41:22 +00:00
|
|
|
OffsetNumber *spl_left;
|
|
|
|
int spl_nleft;
|
2001-03-07 21:20:26 +00:00
|
|
|
Datum spl_ldatum;
|
1997-09-08 02:41:22 +00:00
|
|
|
OffsetNumber *spl_right;
|
|
|
|
int spl_nright;
|
2001-03-07 21:20:26 +00:00
|
|
|
Datum spl_rdatum;
|
1997-09-08 21:56:23 +00:00
|
|
|
} SPLITVEC;
|
1997-09-07 05:04:48 +00:00
|
|
|
|
2001-09-29 03:46:12 +00:00
|
|
|
/* for sorting tuples by cost, for picking split */
|
|
|
|
typedef struct SPLITCOST
|
|
|
|
{
|
2001-10-25 05:50:21 +00:00
|
|
|
OffsetNumber offset_number;
|
|
|
|
float cost_differential;
|
|
|
|
bool choose_left;
|
2001-09-29 03:46:12 +00:00
|
|
|
} SPLITCOST;
|
|
|
|
|
1997-09-07 05:04:48 +00:00
|
|
|
typedef struct RTSTATE
|
|
|
|
{
|
1998-01-15 19:46:37 +00:00
|
|
|
FmgrInfo unionFn; /* union function */
|
|
|
|
FmgrInfo sizeFn; /* size function */
|
|
|
|
FmgrInfo interFn; /* intersection function */
|
1997-09-08 21:56:23 +00:00
|
|
|
} RTSTATE;
|
1996-07-09 06:22:35 +00:00
|
|
|
|
Restructure index AM interface for index building and index tuple deletion,
per previous discussion on pghackers. Most of the duplicate code in
different AMs' ambuild routines has been moved out to a common routine
in index.c; this means that all index types now do the right things about
inserting recently-dead tuples, etc. (I also removed support for EXTEND
INDEX in the ambuild routines, since that's about to go away anyway, and
it cluttered the code a lot.) The retail indextuple deletion routines have
been replaced by a "bulk delete" routine in which the indexscan is inside
the access method. I haven't pushed this change as far as it should go yet,
but it should allow considerable simplification of the internal bookkeeping
for deletions. Also, add flag columns to pg_am to eliminate various
hardcoded tests on AM OIDs, and remove unused pg_am columns.
Fix rtree and gist index types to not attempt to store NULLs; before this,
gist usually crashed, while rtree managed not to crash but computed wacko
bounding boxes for NULL entries (which might have had something to do with
the performance problems we've heard about occasionally).
Add AtEOXact routines to hash, rtree, and gist, all of which have static
state that needs to be reset after an error. We discovered this need long
ago for btree, but missed the other guys.
Oh, one more thing: concurrent VACUUM is now the default.
2001-07-15 22:48:19 +00:00
|
|
|
/* Working state for rtbuild and its callback */
|
|
|
|
typedef struct
|
|
|
|
{
|
|
|
|
RTSTATE rtState;
|
|
|
|
double indtuples;
|
|
|
|
} RTBuildState;
|
|
|
|
|
1996-07-09 06:22:35 +00:00
|
|
|
/* non-export function prototypes */
|
Restructure index AM interface for index building and index tuple deletion,
per previous discussion on pghackers. Most of the duplicate code in
different AMs' ambuild routines has been moved out to a common routine
in index.c; this means that all index types now do the right things about
inserting recently-dead tuples, etc. (I also removed support for EXTEND
INDEX in the ambuild routines, since that's about to go away anyway, and
it cluttered the code a lot.) The retail indextuple deletion routines have
been replaced by a "bulk delete" routine in which the indexscan is inside
the access method. I haven't pushed this change as far as it should go yet,
but it should allow considerable simplification of the internal bookkeeping
for deletions. Also, add flag columns to pg_am to eliminate various
hardcoded tests on AM OIDs, and remove unused pg_am columns.
Fix rtree and gist index types to not attempt to store NULLs; before this,
gist usually crashed, while rtree managed not to crash but computed wacko
bounding boxes for NULL entries (which might have had something to do with
the performance problems we've heard about occasionally).
Add AtEOXact routines to hash, rtree, and gist, all of which have static
state that needs to be reset after an error. We discovered this need long
ago for btree, but missed the other guys.
Oh, one more thing: concurrent VACUUM is now the default.
2001-07-15 22:48:19 +00:00
|
|
|
static void rtbuildCallback(Relation index,
|
2001-10-25 05:50:21 +00:00
|
|
|
HeapTuple htup,
|
|
|
|
Datum *attdata,
|
|
|
|
char *nulls,
|
|
|
|
bool tupleIsAlive,
|
|
|
|
void *state);
|
1998-09-01 04:40:42 +00:00
|
|
|
static InsertIndexResult rtdoinsert(Relation r, IndexTuple itup,
|
1997-09-08 21:56:23 +00:00
|
|
|
RTSTATE *rtstate);
|
2001-03-07 21:20:26 +00:00
|
|
|
static void rttighten(Relation r, RTSTACK *stk, Datum datum, int att_size,
|
1997-09-08 21:56:23 +00:00
|
|
|
RTSTATE *rtstate);
|
2001-03-07 21:20:26 +00:00
|
|
|
static InsertIndexResult rtdosplit(Relation r, Buffer buffer, RTSTACK *stack,
|
2001-03-22 04:01:46 +00:00
|
|
|
IndexTuple itup, RTSTATE *rtstate);
|
1998-09-01 04:40:42 +00:00
|
|
|
static void rtintinsert(Relation r, RTSTACK *stk, IndexTuple ltup,
|
1997-09-08 21:56:23 +00:00
|
|
|
IndexTuple rtup, RTSTATE *rtstate);
|
1997-09-08 02:41:22 +00:00
|
|
|
static void rtnewroot(Relation r, IndexTuple lt, IndexTuple rt);
|
2001-03-07 21:20:26 +00:00
|
|
|
static void rtpicksplit(Relation r, Page page, SPLITVEC *v, IndexTuple itup,
|
2001-03-22 04:01:46 +00:00
|
|
|
RTSTATE *rtstate);
|
1997-09-08 02:41:22 +00:00
|
|
|
static void RTInitBuffer(Buffer b, uint32 f);
|
1998-09-01 04:40:42 +00:00
|
|
|
static OffsetNumber choose(Relation r, Page p, IndexTuple it,
|
1997-09-08 21:56:23 +00:00
|
|
|
RTSTATE *rtstate);
|
1997-09-08 02:41:22 +00:00
|
|
|
static int nospace(Page p, IndexTuple it);
|
1997-09-08 21:56:23 +00:00
|
|
|
static void initRtstate(RTSTATE *rtstate, Relation index);
|
2001-10-25 05:50:21 +00:00
|
|
|
static int qsort_comp_splitcost(const void *a, const void *b);
|
1996-07-09 06:22:35 +00:00
|
|
|
|
|
|
|
|
Restructure index AM interface for index building and index tuple deletion,
per previous discussion on pghackers. Most of the duplicate code in
different AMs' ambuild routines has been moved out to a common routine
in index.c; this means that all index types now do the right things about
inserting recently-dead tuples, etc. (I also removed support for EXTEND
INDEX in the ambuild routines, since that's about to go away anyway, and
it cluttered the code a lot.) The retail indextuple deletion routines have
been replaced by a "bulk delete" routine in which the indexscan is inside
the access method. I haven't pushed this change as far as it should go yet,
but it should allow considerable simplification of the internal bookkeeping
for deletions. Also, add flag columns to pg_am to eliminate various
hardcoded tests on AM OIDs, and remove unused pg_am columns.
Fix rtree and gist index types to not attempt to store NULLs; before this,
gist usually crashed, while rtree managed not to crash but computed wacko
bounding boxes for NULL entries (which might have had something to do with
the performance problems we've heard about occasionally).
Add AtEOXact routines to hash, rtree, and gist, all of which have static
state that needs to be reset after an error. We discovered this need long
ago for btree, but missed the other guys.
Oh, one more thing: concurrent VACUUM is now the default.
2001-07-15 22:48:19 +00:00
|
|
|
/*
|
|
|
|
* routine to build an index. Basically calls insert over and over
|
|
|
|
*/
|
2000-06-13 07:35:40 +00:00
|
|
|
Datum
|
|
|
|
rtbuild(PG_FUNCTION_ARGS)
|
1996-07-09 06:22:35 +00:00
|
|
|
{
|
2001-03-22 04:01:46 +00:00
|
|
|
Relation heap = (Relation) PG_GETARG_POINTER(0);
|
|
|
|
Relation index = (Relation) PG_GETARG_POINTER(1);
|
|
|
|
IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2);
|
Restructure index AM interface for index building and index tuple deletion,
per previous discussion on pghackers. Most of the duplicate code in
different AMs' ambuild routines has been moved out to a common routine
in index.c; this means that all index types now do the right things about
inserting recently-dead tuples, etc. (I also removed support for EXTEND
INDEX in the ambuild routines, since that's about to go away anyway, and
it cluttered the code a lot.) The retail indextuple deletion routines have
been replaced by a "bulk delete" routine in which the indexscan is inside
the access method. I haven't pushed this change as far as it should go yet,
but it should allow considerable simplification of the internal bookkeeping
for deletions. Also, add flag columns to pg_am to eliminate various
hardcoded tests on AM OIDs, and remove unused pg_am columns.
Fix rtree and gist index types to not attempt to store NULLs; before this,
gist usually crashed, while rtree managed not to crash but computed wacko
bounding boxes for NULL entries (which might have had something to do with
the performance problems we've heard about occasionally).
Add AtEOXact routines to hash, rtree, and gist, all of which have static
state that needs to be reset after an error. We discovered this need long
ago for btree, but missed the other guys.
Oh, one more thing: concurrent VACUUM is now the default.
2001-07-15 22:48:19 +00:00
|
|
|
double reltuples;
|
|
|
|
RTBuildState buildstate;
|
|
|
|
Buffer buffer;
|
2001-03-22 04:01:46 +00:00
|
|
|
|
Restructure index AM interface for index building and index tuple deletion,
per previous discussion on pghackers. Most of the duplicate code in
different AMs' ambuild routines has been moved out to a common routine
in index.c; this means that all index types now do the right things about
inserting recently-dead tuples, etc. (I also removed support for EXTEND
INDEX in the ambuild routines, since that's about to go away anyway, and
it cluttered the code a lot.) The retail indextuple deletion routines have
been replaced by a "bulk delete" routine in which the indexscan is inside
the access method. I haven't pushed this change as far as it should go yet,
but it should allow considerable simplification of the internal bookkeeping
for deletions. Also, add flag columns to pg_am to eliminate various
hardcoded tests on AM OIDs, and remove unused pg_am columns.
Fix rtree and gist index types to not attempt to store NULLs; before this,
gist usually crashed, while rtree managed not to crash but computed wacko
bounding boxes for NULL entries (which might have had something to do with
the performance problems we've heard about occasionally).
Add AtEOXact routines to hash, rtree, and gist, all of which have static
state that needs to be reset after an error. We discovered this need long
ago for btree, but missed the other guys.
Oh, one more thing: concurrent VACUUM is now the default.
2001-07-15 22:48:19 +00:00
|
|
|
/* no locking is needed */
|
1997-09-07 05:04:48 +00:00
|
|
|
|
Restructure index AM interface for index building and index tuple deletion,
per previous discussion on pghackers. Most of the duplicate code in
different AMs' ambuild routines has been moved out to a common routine
in index.c; this means that all index types now do the right things about
inserting recently-dead tuples, etc. (I also removed support for EXTEND
INDEX in the ambuild routines, since that's about to go away anyway, and
it cluttered the code a lot.) The retail indextuple deletion routines have
been replaced by a "bulk delete" routine in which the indexscan is inside
the access method. I haven't pushed this change as far as it should go yet,
but it should allow considerable simplification of the internal bookkeeping
for deletions. Also, add flag columns to pg_am to eliminate various
hardcoded tests on AM OIDs, and remove unused pg_am columns.
Fix rtree and gist index types to not attempt to store NULLs; before this,
gist usually crashed, while rtree managed not to crash but computed wacko
bounding boxes for NULL entries (which might have had something to do with
the performance problems we've heard about occasionally).
Add AtEOXact routines to hash, rtree, and gist, all of which have static
state that needs to be reset after an error. We discovered this need long
ago for btree, but missed the other guys.
Oh, one more thing: concurrent VACUUM is now the default.
2001-07-15 22:48:19 +00:00
|
|
|
initRtstate(&buildstate.rtState, index);
|
1997-09-07 05:04:48 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We expect to be called exactly once for any index relation. If
|
|
|
|
* that's not the case, big trouble's what we have.
|
|
|
|
*/
|
Restructure index AM interface for index building and index tuple deletion,
per previous discussion on pghackers. Most of the duplicate code in
different AMs' ambuild routines has been moved out to a common routine
in index.c; this means that all index types now do the right things about
inserting recently-dead tuples, etc. (I also removed support for EXTEND
INDEX in the ambuild routines, since that's about to go away anyway, and
it cluttered the code a lot.) The retail indextuple deletion routines have
been replaced by a "bulk delete" routine in which the indexscan is inside
the access method. I haven't pushed this change as far as it should go yet,
but it should allow considerable simplification of the internal bookkeeping
for deletions. Also, add flag columns to pg_am to eliminate various
hardcoded tests on AM OIDs, and remove unused pg_am columns.
Fix rtree and gist index types to not attempt to store NULLs; before this,
gist usually crashed, while rtree managed not to crash but computed wacko
bounding boxes for NULL entries (which might have had something to do with
the performance problems we've heard about occasionally).
Add AtEOXact routines to hash, rtree, and gist, all of which have static
state that needs to be reset after an error. We discovered this need long
ago for btree, but missed the other guys.
Oh, one more thing: concurrent VACUUM is now the default.
2001-07-15 22:48:19 +00:00
|
|
|
if (RelationGetNumberOfBlocks(index) != 0)
|
2003-07-21 20:29:40 +00:00
|
|
|
elog(ERROR, "index \"%s\" already contains data",
|
Restructure index AM interface for index building and index tuple deletion,
per previous discussion on pghackers. Most of the duplicate code in
different AMs' ambuild routines has been moved out to a common routine
in index.c; this means that all index types now do the right things about
inserting recently-dead tuples, etc. (I also removed support for EXTEND
INDEX in the ambuild routines, since that's about to go away anyway, and
it cluttered the code a lot.) The retail indextuple deletion routines have
been replaced by a "bulk delete" routine in which the indexscan is inside
the access method. I haven't pushed this change as far as it should go yet,
but it should allow considerable simplification of the internal bookkeeping
for deletions. Also, add flag columns to pg_am to eliminate various
hardcoded tests on AM OIDs, and remove unused pg_am columns.
Fix rtree and gist index types to not attempt to store NULLs; before this,
gist usually crashed, while rtree managed not to crash but computed wacko
bounding boxes for NULL entries (which might have had something to do with
the performance problems we've heard about occasionally).
Add AtEOXact routines to hash, rtree, and gist, all of which have static
state that needs to be reset after an error. We discovered this need long
ago for btree, but missed the other guys.
Oh, one more thing: concurrent VACUUM is now the default.
2001-07-15 22:48:19 +00:00
|
|
|
RelationGetRelationName(index));
|
2000-07-14 22:18:02 +00:00
|
|
|
|
Restructure index AM interface for index building and index tuple deletion,
per previous discussion on pghackers. Most of the duplicate code in
different AMs' ambuild routines has been moved out to a common routine
in index.c; this means that all index types now do the right things about
inserting recently-dead tuples, etc. (I also removed support for EXTEND
INDEX in the ambuild routines, since that's about to go away anyway, and
it cluttered the code a lot.) The retail indextuple deletion routines have
been replaced by a "bulk delete" routine in which the indexscan is inside
the access method. I haven't pushed this change as far as it should go yet,
but it should allow considerable simplification of the internal bookkeeping
for deletions. Also, add flag columns to pg_am to eliminate various
hardcoded tests on AM OIDs, and remove unused pg_am columns.
Fix rtree and gist index types to not attempt to store NULLs; before this,
gist usually crashed, while rtree managed not to crash but computed wacko
bounding boxes for NULL entries (which might have had something to do with
the performance problems we've heard about occasionally).
Add AtEOXact routines to hash, rtree, and gist, all of which have static
state that needs to be reset after an error. We discovered this need long
ago for btree, but missed the other guys.
Oh, one more thing: concurrent VACUUM is now the default.
2001-07-15 22:48:19 +00:00
|
|
|
/* initialize the root page */
|
|
|
|
buffer = ReadBuffer(index, P_NEW);
|
|
|
|
RTInitBuffer(buffer, F_LEAF);
|
|
|
|
WriteBuffer(buffer);
|
1997-09-07 05:04:48 +00:00
|
|
|
|
Restructure index AM interface for index building and index tuple deletion,
per previous discussion on pghackers. Most of the duplicate code in
different AMs' ambuild routines has been moved out to a common routine
in index.c; this means that all index types now do the right things about
inserting recently-dead tuples, etc. (I also removed support for EXTEND
INDEX in the ambuild routines, since that's about to go away anyway, and
it cluttered the code a lot.) The retail indextuple deletion routines have
been replaced by a "bulk delete" routine in which the indexscan is inside
the access method. I haven't pushed this change as far as it should go yet,
but it should allow considerable simplification of the internal bookkeeping
for deletions. Also, add flag columns to pg_am to eliminate various
hardcoded tests on AM OIDs, and remove unused pg_am columns.
Fix rtree and gist index types to not attempt to store NULLs; before this,
gist usually crashed, while rtree managed not to crash but computed wacko
bounding boxes for NULL entries (which might have had something to do with
the performance problems we've heard about occasionally).
Add AtEOXact routines to hash, rtree, and gist, all of which have static
state that needs to be reset after an error. We discovered this need long
ago for btree, but missed the other guys.
Oh, one more thing: concurrent VACUUM is now the default.
2001-07-15 22:48:19 +00:00
|
|
|
/* build the index */
|
|
|
|
buildstate.indtuples = 0;
|
1997-09-07 05:04:48 +00:00
|
|
|
|
Restructure index AM interface for index building and index tuple deletion,
per previous discussion on pghackers. Most of the duplicate code in
different AMs' ambuild routines has been moved out to a common routine
in index.c; this means that all index types now do the right things about
inserting recently-dead tuples, etc. (I also removed support for EXTEND
INDEX in the ambuild routines, since that's about to go away anyway, and
it cluttered the code a lot.) The retail indextuple deletion routines have
been replaced by a "bulk delete" routine in which the indexscan is inside
the access method. I haven't pushed this change as far as it should go yet,
but it should allow considerable simplification of the internal bookkeeping
for deletions. Also, add flag columns to pg_am to eliminate various
hardcoded tests on AM OIDs, and remove unused pg_am columns.
Fix rtree and gist index types to not attempt to store NULLs; before this,
gist usually crashed, while rtree managed not to crash but computed wacko
bounding boxes for NULL entries (which might have had something to do with
the performance problems we've heard about occasionally).
Add AtEOXact routines to hash, rtree, and gist, all of which have static
state that needs to be reset after an error. We discovered this need long
ago for btree, but missed the other guys.
Oh, one more thing: concurrent VACUUM is now the default.
2001-07-15 22:48:19 +00:00
|
|
|
/* do the heap scan */
|
|
|
|
reltuples = IndexBuildHeapScan(heap, index, indexInfo,
|
|
|
|
rtbuildCallback, (void *) &buildstate);
|
1997-09-07 05:04:48 +00:00
|
|
|
|
|
|
|
/* okay, all heap tuples are indexed */
|
|
|
|
|
1996-07-09 06:22:35 +00:00
|
|
|
/*
|
1997-09-07 05:04:48 +00:00
|
|
|
* Since we just counted the tuples in the heap, we update its stats
|
1999-09-18 19:08:25 +00:00
|
|
|
* in pg_class to guarantee that the planner takes advantage of the
|
2000-04-12 17:17:23 +00:00
|
|
|
* index we just created. But, only update statistics during normal
|
|
|
|
* index definitions, not for indices on system catalogs created
|
|
|
|
* during bootstrap processing. We must close the relations before
|
|
|
|
* updating statistics to guarantee that the relcache entries are
|
|
|
|
* flushed when we increment the command counter in UpdateStats(). But
|
|
|
|
* we do not release any locks on the relations; those will be held
|
|
|
|
* until end of transaction.
|
1996-07-09 06:22:35 +00:00
|
|
|
*/
|
1999-09-18 19:08:25 +00:00
|
|
|
if (IsNormalProcessingMode())
|
1997-09-07 05:04:48 +00:00
|
|
|
{
|
2000-04-12 17:17:23 +00:00
|
|
|
Oid hrelid = RelationGetRelid(heap);
|
|
|
|
Oid irelid = RelationGetRelid(index);
|
1999-09-18 19:08:25 +00:00
|
|
|
|
|
|
|
heap_close(heap, NoLock);
|
|
|
|
index_close(index);
|
Restructure index AM interface for index building and index tuple deletion,
per previous discussion on pghackers. Most of the duplicate code in
different AMs' ambuild routines has been moved out to a common routine
in index.c; this means that all index types now do the right things about
inserting recently-dead tuples, etc. (I also removed support for EXTEND
INDEX in the ambuild routines, since that's about to go away anyway, and
it cluttered the code a lot.) The retail indextuple deletion routines have
been replaced by a "bulk delete" routine in which the indexscan is inside
the access method. I haven't pushed this change as far as it should go yet,
but it should allow considerable simplification of the internal bookkeeping
for deletions. Also, add flag columns to pg_am to eliminate various
hardcoded tests on AM OIDs, and remove unused pg_am columns.
Fix rtree and gist index types to not attempt to store NULLs; before this,
gist usually crashed, while rtree managed not to crash but computed wacko
bounding boxes for NULL entries (which might have had something to do with
the performance problems we've heard about occasionally).
Add AtEOXact routines to hash, rtree, and gist, all of which have static
state that needs to be reset after an error. We discovered this need long
ago for btree, but missed the other guys.
Oh, one more thing: concurrent VACUUM is now the default.
2001-07-15 22:48:19 +00:00
|
|
|
UpdateStats(hrelid, reltuples);
|
|
|
|
UpdateStats(irelid, buildstate.indtuples);
|
1996-07-09 06:22:35 +00:00
|
|
|
}
|
1997-09-07 05:04:48 +00:00
|
|
|
|
2000-06-14 05:24:50 +00:00
|
|
|
PG_RETURN_VOID();
|
1996-07-09 06:22:35 +00:00
|
|
|
}
|
|
|
|
|
Restructure index AM interface for index building and index tuple deletion,
per previous discussion on pghackers. Most of the duplicate code in
different AMs' ambuild routines has been moved out to a common routine
in index.c; this means that all index types now do the right things about
inserting recently-dead tuples, etc. (I also removed support for EXTEND
INDEX in the ambuild routines, since that's about to go away anyway, and
it cluttered the code a lot.) The retail indextuple deletion routines have
been replaced by a "bulk delete" routine in which the indexscan is inside
the access method. I haven't pushed this change as far as it should go yet,
but it should allow considerable simplification of the internal bookkeeping
for deletions. Also, add flag columns to pg_am to eliminate various
hardcoded tests on AM OIDs, and remove unused pg_am columns.
Fix rtree and gist index types to not attempt to store NULLs; before this,
gist usually crashed, while rtree managed not to crash but computed wacko
bounding boxes for NULL entries (which might have had something to do with
the performance problems we've heard about occasionally).
Add AtEOXact routines to hash, rtree, and gist, all of which have static
state that needs to be reset after an error. We discovered this need long
ago for btree, but missed the other guys.
Oh, one more thing: concurrent VACUUM is now the default.
2001-07-15 22:48:19 +00:00
|
|
|
/*
|
|
|
|
* Per-tuple callback from IndexBuildHeapScan
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
rtbuildCallback(Relation index,
|
|
|
|
HeapTuple htup,
|
|
|
|
Datum *attdata,
|
|
|
|
char *nulls,
|
|
|
|
bool tupleIsAlive,
|
|
|
|
void *state)
|
|
|
|
{
|
2001-10-25 05:50:21 +00:00
|
|
|
RTBuildState *buildstate = (RTBuildState *) state;
|
Restructure index AM interface for index building and index tuple deletion,
per previous discussion on pghackers. Most of the duplicate code in
different AMs' ambuild routines has been moved out to a common routine
in index.c; this means that all index types now do the right things about
inserting recently-dead tuples, etc. (I also removed support for EXTEND
INDEX in the ambuild routines, since that's about to go away anyway, and
it cluttered the code a lot.) The retail indextuple deletion routines have
been replaced by a "bulk delete" routine in which the indexscan is inside
the access method. I haven't pushed this change as far as it should go yet,
but it should allow considerable simplification of the internal bookkeeping
for deletions. Also, add flag columns to pg_am to eliminate various
hardcoded tests on AM OIDs, and remove unused pg_am columns.
Fix rtree and gist index types to not attempt to store NULLs; before this,
gist usually crashed, while rtree managed not to crash but computed wacko
bounding boxes for NULL entries (which might have had something to do with
the performance problems we've heard about occasionally).
Add AtEOXact routines to hash, rtree, and gist, all of which have static
state that needs to be reset after an error. We discovered this need long
ago for btree, but missed the other guys.
Oh, one more thing: concurrent VACUUM is now the default.
2001-07-15 22:48:19 +00:00
|
|
|
IndexTuple itup;
|
|
|
|
InsertIndexResult res;
|
|
|
|
|
|
|
|
/* form an index tuple and point it at the heap tuple */
|
|
|
|
itup = index_formtuple(RelationGetDescr(index), attdata, nulls);
|
|
|
|
itup->t_tid = htup->t_self;
|
|
|
|
|
|
|
|
/* rtree indexes don't index nulls, see notes in rtinsert */
|
|
|
|
if (IndexTupleHasNulls(itup))
|
|
|
|
{
|
|
|
|
pfree(itup);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2001-10-25 05:50:21 +00:00
|
|
|
* Since we already have the index relation locked, we call rtdoinsert
|
|
|
|
* directly. Normal access method calls dispatch through rtinsert,
|
|
|
|
* which locks the relation for write. This is the right thing to do
|
|
|
|
* if you're inserting single tups, but not when you're initializing
|
|
|
|
* the whole index at once.
|
Restructure index AM interface for index building and index tuple deletion,
per previous discussion on pghackers. Most of the duplicate code in
different AMs' ambuild routines has been moved out to a common routine
in index.c; this means that all index types now do the right things about
inserting recently-dead tuples, etc. (I also removed support for EXTEND
INDEX in the ambuild routines, since that's about to go away anyway, and
it cluttered the code a lot.) The retail indextuple deletion routines have
been replaced by a "bulk delete" routine in which the indexscan is inside
the access method. I haven't pushed this change as far as it should go yet,
but it should allow considerable simplification of the internal bookkeeping
for deletions. Also, add flag columns to pg_am to eliminate various
hardcoded tests on AM OIDs, and remove unused pg_am columns.
Fix rtree and gist index types to not attempt to store NULLs; before this,
gist usually crashed, while rtree managed not to crash but computed wacko
bounding boxes for NULL entries (which might have had something to do with
the performance problems we've heard about occasionally).
Add AtEOXact routines to hash, rtree, and gist, all of which have static
state that needs to be reset after an error. We discovered this need long
ago for btree, but missed the other guys.
Oh, one more thing: concurrent VACUUM is now the default.
2001-07-15 22:48:19 +00:00
|
|
|
*/
|
|
|
|
res = rtdoinsert(index, itup, &buildstate->rtState);
|
|
|
|
|
|
|
|
if (res)
|
|
|
|
pfree(res);
|
|
|
|
|
|
|
|
buildstate->indtuples += 1;
|
|
|
|
|
|
|
|
pfree(itup);
|
|
|
|
}
|
|
|
|
|
1996-07-09 06:22:35 +00:00
|
|
|
/*
|
1997-09-07 05:04:48 +00:00
|
|
|
* rtinsert -- wrapper for rtree tuple insertion.
|
1996-07-09 06:22:35 +00:00
|
|
|
*
|
1997-09-07 05:04:48 +00:00
|
|
|
* This is the public interface routine for tuple insertion in rtrees.
|
|
|
|
* It doesn't do any work; just locks the relation and passes the buck.
|
1996-07-09 06:22:35 +00:00
|
|
|
*/
|
2000-06-13 07:35:40 +00:00
|
|
|
Datum
|
|
|
|
rtinsert(PG_FUNCTION_ARGS)
|
1996-07-09 06:22:35 +00:00
|
|
|
{
|
2001-03-22 04:01:46 +00:00
|
|
|
Relation r = (Relation) PG_GETARG_POINTER(0);
|
|
|
|
Datum *datum = (Datum *) PG_GETARG_POINTER(1);
|
|
|
|
char *nulls = (char *) PG_GETARG_POINTER(2);
|
|
|
|
ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3);
|
2002-09-04 20:31:48 +00:00
|
|
|
|
2000-06-13 07:35:40 +00:00
|
|
|
#ifdef NOT_USED
|
2001-03-22 04:01:46 +00:00
|
|
|
Relation heapRel = (Relation) PG_GETARG_POINTER(4);
|
2002-05-24 18:57:57 +00:00
|
|
|
bool checkUnique = PG_GETARG_BOOL(5);
|
2000-06-13 07:35:40 +00:00
|
|
|
#endif
|
1997-09-07 05:04:48 +00:00
|
|
|
InsertIndexResult res;
|
1997-09-08 02:41:22 +00:00
|
|
|
IndexTuple itup;
|
|
|
|
RTSTATE rtState;
|
1997-09-07 05:04:48 +00:00
|
|
|
|
|
|
|
/* generate an index tuple */
|
1998-09-01 03:29:17 +00:00
|
|
|
itup = index_formtuple(RelationGetDescr(r), datum, nulls);
|
1997-09-07 05:04:48 +00:00
|
|
|
itup->t_tid = *ht_ctid;
|
Restructure index AM interface for index building and index tuple deletion,
per previous discussion on pghackers. Most of the duplicate code in
different AMs' ambuild routines has been moved out to a common routine
in index.c; this means that all index types now do the right things about
inserting recently-dead tuples, etc. (I also removed support for EXTEND
INDEX in the ambuild routines, since that's about to go away anyway, and
it cluttered the code a lot.) The retail indextuple deletion routines have
been replaced by a "bulk delete" routine in which the indexscan is inside
the access method. I haven't pushed this change as far as it should go yet,
but it should allow considerable simplification of the internal bookkeeping
for deletions. Also, add flag columns to pg_am to eliminate various
hardcoded tests on AM OIDs, and remove unused pg_am columns.
Fix rtree and gist index types to not attempt to store NULLs; before this,
gist usually crashed, while rtree managed not to crash but computed wacko
bounding boxes for NULL entries (which might have had something to do with
the performance problems we've heard about occasionally).
Add AtEOXact routines to hash, rtree, and gist, all of which have static
state that needs to be reset after an error. We discovered this need long
ago for btree, but missed the other guys.
Oh, one more thing: concurrent VACUUM is now the default.
2001-07-15 22:48:19 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Currently, rtrees do not support indexing NULLs; considerable
|
|
|
|
* infrastructure work would have to be done to do anything reasonable
|
|
|
|
* with a NULL.
|
|
|
|
*/
|
|
|
|
if (IndexTupleHasNulls(itup))
|
|
|
|
{
|
|
|
|
pfree(itup);
|
|
|
|
PG_RETURN_POINTER((InsertIndexResult) NULL);
|
|
|
|
}
|
|
|
|
|
1997-09-07 05:04:48 +00:00
|
|
|
initRtstate(&rtState, r);
|
|
|
|
|
1998-12-15 12:47:01 +00:00
|
|
|
/*
|
Restructure index AM interface for index building and index tuple deletion,
per previous discussion on pghackers. Most of the duplicate code in
different AMs' ambuild routines has been moved out to a common routine
in index.c; this means that all index types now do the right things about
inserting recently-dead tuples, etc. (I also removed support for EXTEND
INDEX in the ambuild routines, since that's about to go away anyway, and
it cluttered the code a lot.) The retail indextuple deletion routines have
been replaced by a "bulk delete" routine in which the indexscan is inside
the access method. I haven't pushed this change as far as it should go yet,
but it should allow considerable simplification of the internal bookkeeping
for deletions. Also, add flag columns to pg_am to eliminate various
hardcoded tests on AM OIDs, and remove unused pg_am columns.
Fix rtree and gist index types to not attempt to store NULLs; before this,
gist usually crashed, while rtree managed not to crash but computed wacko
bounding boxes for NULL entries (which might have had something to do with
the performance problems we've heard about occasionally).
Add AtEOXact routines to hash, rtree, and gist, all of which have static
state that needs to be reset after an error. We discovered this need long
ago for btree, but missed the other guys.
Oh, one more thing: concurrent VACUUM is now the default.
2001-07-15 22:48:19 +00:00
|
|
|
* Since rtree is not marked "amconcurrent" in pg_am, caller should
|
2001-10-25 05:50:21 +00:00
|
|
|
* have acquired exclusive lock on index relation. We need no locking
|
Restructure index AM interface for index building and index tuple deletion,
per previous discussion on pghackers. Most of the duplicate code in
different AMs' ambuild routines has been moved out to a common routine
in index.c; this means that all index types now do the right things about
inserting recently-dead tuples, etc. (I also removed support for EXTEND
INDEX in the ambuild routines, since that's about to go away anyway, and
it cluttered the code a lot.) The retail indextuple deletion routines have
been replaced by a "bulk delete" routine in which the indexscan is inside
the access method. I haven't pushed this change as far as it should go yet,
but it should allow considerable simplification of the internal bookkeeping
for deletions. Also, add flag columns to pg_am to eliminate various
hardcoded tests on AM OIDs, and remove unused pg_am columns.
Fix rtree and gist index types to not attempt to store NULLs; before this,
gist usually crashed, while rtree managed not to crash but computed wacko
bounding boxes for NULL entries (which might have had something to do with
the performance problems we've heard about occasionally).
Add AtEOXact routines to hash, rtree, and gist, all of which have static
state that needs to be reset after an error. We discovered this need long
ago for btree, but missed the other guys.
Oh, one more thing: concurrent VACUUM is now the default.
2001-07-15 22:48:19 +00:00
|
|
|
* here.
|
1998-12-15 12:47:01 +00:00
|
|
|
*/
|
|
|
|
|
1997-09-07 05:04:48 +00:00
|
|
|
res = rtdoinsert(r, itup, &rtState);
|
|
|
|
|
2000-06-13 07:35:40 +00:00
|
|
|
PG_RETURN_POINTER(res);
|
1996-07-09 06:22:35 +00:00
|
|
|
}
|
|
|
|
|
1997-09-08 02:41:22 +00:00
|
|
|
static InsertIndexResult
|
1997-09-08 21:56:23 +00:00
|
|
|
rtdoinsert(Relation r, IndexTuple itup, RTSTATE *rtstate)
|
1996-07-09 06:22:35 +00:00
|
|
|
{
|
1997-09-08 02:41:22 +00:00
|
|
|
Page page;
|
|
|
|
Buffer buffer;
|
|
|
|
BlockNumber blk;
|
|
|
|
IndexTuple which;
|
|
|
|
OffsetNumber l;
|
|
|
|
RTSTACK *stack;
|
1997-09-07 05:04:48 +00:00
|
|
|
InsertIndexResult res;
|
|
|
|
RTreePageOpaque opaque;
|
2001-03-07 21:20:26 +00:00
|
|
|
Datum datum;
|
1997-09-07 05:04:48 +00:00
|
|
|
|
|
|
|
blk = P_ROOT;
|
|
|
|
buffer = InvalidBuffer;
|
|
|
|
stack = (RTSTACK *) NULL;
|
|
|
|
|
|
|
|
do
|
|
|
|
{
|
|
|
|
/* let go of current buffer before getting next */
|
|
|
|
if (buffer != InvalidBuffer)
|
|
|
|
ReleaseBuffer(buffer);
|
|
|
|
|
|
|
|
/* get next buffer */
|
|
|
|
buffer = ReadBuffer(r, blk);
|
|
|
|
page = (Page) BufferGetPage(buffer);
|
|
|
|
|
|
|
|
opaque = (RTreePageOpaque) PageGetSpecialPointer(page);
|
|
|
|
if (!(opaque->flags & F_LEAF))
|
|
|
|
{
|
1997-09-08 02:41:22 +00:00
|
|
|
RTSTACK *n;
|
|
|
|
ItemId iid;
|
1997-09-07 05:04:48 +00:00
|
|
|
|
|
|
|
n = (RTSTACK *) palloc(sizeof(RTSTACK));
|
|
|
|
n->rts_parent = stack;
|
|
|
|
n->rts_blk = blk;
|
|
|
|
n->rts_child = choose(r, page, itup, rtstate);
|
|
|
|
stack = n;
|
|
|
|
|
|
|
|
iid = PageGetItemId(page, n->rts_child);
|
|
|
|
which = (IndexTuple) PageGetItem(page, iid);
|
|
|
|
blk = ItemPointerGetBlockNumber(&(which->t_tid));
|
|
|
|
}
|
|
|
|
} while (!(opaque->flags & F_LEAF));
|
|
|
|
|
|
|
|
if (nospace(page, itup))
|
|
|
|
{
|
|
|
|
/* need to do a split */
|
2001-03-07 21:20:26 +00:00
|
|
|
res = rtdosplit(r, buffer, stack, itup, rtstate);
|
1997-09-07 05:04:48 +00:00
|
|
|
freestack(stack);
|
|
|
|
WriteBuffer(buffer); /* don't forget to release buffer! */
|
1998-09-01 03:29:17 +00:00
|
|
|
return res;
|
1997-09-07 05:04:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* add the item and write the buffer */
|
|
|
|
if (PageIsEmpty(page))
|
|
|
|
{
|
|
|
|
l = PageAddItem(page, (Item) itup, IndexTupleSize(itup),
|
|
|
|
FirstOffsetNumber,
|
|
|
|
LP_USED);
|
1996-07-09 06:22:35 +00:00
|
|
|
}
|
1997-09-07 05:04:48 +00:00
|
|
|
else
|
|
|
|
{
|
|
|
|
l = PageAddItem(page, (Item) itup, IndexTupleSize(itup),
|
|
|
|
OffsetNumberNext(PageGetMaxOffsetNumber(page)),
|
|
|
|
LP_USED);
|
|
|
|
}
|
2001-03-07 21:20:26 +00:00
|
|
|
if (l == InvalidOffsetNumber)
|
2003-07-21 20:29:40 +00:00
|
|
|
elog(ERROR, "failed to add index item to \"%s\"",
|
2001-03-07 21:20:26 +00:00
|
|
|
RelationGetRelationName(r));
|
1997-09-07 05:04:48 +00:00
|
|
|
|
|
|
|
WriteBuffer(buffer);
|
|
|
|
|
2001-03-07 21:20:26 +00:00
|
|
|
datum = IndexTupleGetDatum(itup);
|
1997-09-07 05:04:48 +00:00
|
|
|
|
|
|
|
/* now expand the page boundary in the parent to include the new child */
|
2001-03-07 21:20:26 +00:00
|
|
|
rttighten(r, stack, datum, IndexTupleAttSize(itup), rtstate);
|
1996-07-09 06:22:35 +00:00
|
|
|
freestack(stack);
|
1997-09-07 05:04:48 +00:00
|
|
|
|
|
|
|
/* build and return an InsertIndexResult for this insertion */
|
|
|
|
res = (InsertIndexResult) palloc(sizeof(InsertIndexResultData));
|
|
|
|
ItemPointerSet(&(res->pointerData), blk, l);
|
|
|
|
|
1998-09-01 03:29:17 +00:00
|
|
|
return res;
|
1996-07-09 06:22:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
rttighten(Relation r,
|
1997-09-08 21:56:23 +00:00
|
|
|
RTSTACK *stk,
|
2001-03-07 21:20:26 +00:00
|
|
|
Datum datum,
|
1997-09-07 05:04:48 +00:00
|
|
|
int att_size,
|
1997-09-08 21:56:23 +00:00
|
|
|
RTSTATE *rtstate)
|
1996-07-09 06:22:35 +00:00
|
|
|
{
|
2001-03-07 21:20:26 +00:00
|
|
|
Datum oldud;
|
|
|
|
Datum tdatum;
|
1997-09-08 02:41:22 +00:00
|
|
|
Page p;
|
|
|
|
float old_size,
|
|
|
|
newd_size;
|
|
|
|
Buffer b;
|
1997-09-07 05:04:48 +00:00
|
|
|
|
|
|
|
if (stk == (RTSTACK *) NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
b = ReadBuffer(r, stk->rts_blk);
|
|
|
|
p = BufferGetPage(b);
|
|
|
|
|
2001-03-07 21:20:26 +00:00
|
|
|
oldud = IndexTupleGetDatum(PageGetItem(p,
|
2001-03-22 04:01:46 +00:00
|
|
|
PageGetItemId(p, stk->rts_child)));
|
1997-09-07 05:04:48 +00:00
|
|
|
|
2001-03-07 21:20:26 +00:00
|
|
|
FunctionCall2(&rtstate->sizeFn, oldud,
|
2000-05-30 04:25:00 +00:00
|
|
|
PointerGetDatum(&old_size));
|
1997-09-07 05:04:48 +00:00
|
|
|
|
2001-03-07 21:20:26 +00:00
|
|
|
datum = FunctionCall2(&rtstate->unionFn, oldud, datum);
|
2000-05-30 04:25:00 +00:00
|
|
|
|
2001-03-07 21:20:26 +00:00
|
|
|
FunctionCall2(&rtstate->sizeFn, datum,
|
2000-05-30 04:25:00 +00:00
|
|
|
PointerGetDatum(&newd_size));
|
1997-09-07 05:04:48 +00:00
|
|
|
|
2001-09-29 03:46:12 +00:00
|
|
|
/*
|
2001-10-25 05:50:21 +00:00
|
|
|
* If newd_size == 0 we have degenerate rectangles, so we don't know
|
|
|
|
* if there was any change, so we have to assume there was.
|
2001-09-29 03:46:12 +00:00
|
|
|
*/
|
|
|
|
if ((newd_size == 0) || (newd_size != old_size))
|
1997-09-07 05:04:48 +00:00
|
|
|
{
|
1998-09-01 03:29:17 +00:00
|
|
|
TupleDesc td = RelationGetDescr(r);
|
1997-09-07 05:04:48 +00:00
|
|
|
|
|
|
|
if (td->attrs[0]->attlen < 0)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* This is an internal page, so 'oldud' had better be a union
|
|
|
|
* (constant-length) key, too. (See comment below.)
|
|
|
|
*/
|
2001-03-07 21:20:26 +00:00
|
|
|
Assert(VARSIZE(DatumGetPointer(datum)) ==
|
|
|
|
VARSIZE(DatumGetPointer(oldud)));
|
|
|
|
memmove(DatumGetPointer(oldud), DatumGetPointer(datum),
|
|
|
|
VARSIZE(DatumGetPointer(datum)));
|
1997-09-07 05:04:48 +00:00
|
|
|
}
|
|
|
|
else
|
2001-03-07 21:20:26 +00:00
|
|
|
{
|
|
|
|
memmove(DatumGetPointer(oldud), DatumGetPointer(datum),
|
|
|
|
att_size);
|
|
|
|
}
|
1997-09-07 05:04:48 +00:00
|
|
|
WriteBuffer(b);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The user may be defining an index on variable-sized data (like
|
|
|
|
* polygons). If so, we need to get a constant-sized datum for
|
|
|
|
* insertion on the internal page. We do this by calling the
|
2001-03-07 21:20:26 +00:00
|
|
|
* union proc, which is required to return a rectangle.
|
1997-09-07 05:04:48 +00:00
|
|
|
*/
|
2001-03-07 21:20:26 +00:00
|
|
|
tdatum = FunctionCall2(&rtstate->unionFn, datum, datum);
|
1997-09-07 05:04:48 +00:00
|
|
|
|
|
|
|
rttighten(r, stk->rts_parent, tdatum, att_size, rtstate);
|
2001-03-07 21:20:26 +00:00
|
|
|
pfree(DatumGetPointer(tdatum));
|
1996-07-09 06:22:35 +00:00
|
|
|
}
|
1997-09-07 05:04:48 +00:00
|
|
|
else
|
|
|
|
ReleaseBuffer(b);
|
2001-03-07 21:20:26 +00:00
|
|
|
pfree(DatumGetPointer(datum));
|
1996-07-09 06:22:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2001-03-07 21:20:26 +00:00
|
|
|
* rtdosplit -- split a page in the tree.
|
1996-07-09 06:22:35 +00:00
|
|
|
*
|
2001-03-07 21:20:26 +00:00
|
|
|
* rtpicksplit does the interesting work of choosing the split.
|
|
|
|
* This routine just does the bit-pushing.
|
1996-07-09 06:22:35 +00:00
|
|
|
*/
|
1997-09-08 02:41:22 +00:00
|
|
|
static InsertIndexResult
|
2001-03-07 21:20:26 +00:00
|
|
|
rtdosplit(Relation r,
|
|
|
|
Buffer buffer,
|
|
|
|
RTSTACK *stack,
|
|
|
|
IndexTuple itup,
|
|
|
|
RTSTATE *rtstate)
|
1996-07-09 06:22:35 +00:00
|
|
|
{
|
1997-09-08 02:41:22 +00:00
|
|
|
Page p;
|
|
|
|
Buffer leftbuf,
|
|
|
|
rightbuf;
|
|
|
|
Page left,
|
|
|
|
right;
|
|
|
|
ItemId itemid;
|
|
|
|
IndexTuple item;
|
|
|
|
IndexTuple ltup,
|
|
|
|
rtup;
|
|
|
|
OffsetNumber maxoff;
|
|
|
|
OffsetNumber i;
|
|
|
|
OffsetNumber leftoff,
|
|
|
|
rightoff;
|
|
|
|
BlockNumber lbknum,
|
|
|
|
rbknum;
|
|
|
|
BlockNumber bufblock;
|
1997-09-07 05:04:48 +00:00
|
|
|
RTreePageOpaque opaque;
|
1997-09-08 02:41:22 +00:00
|
|
|
int blank;
|
1997-09-07 05:04:48 +00:00
|
|
|
InsertIndexResult res;
|
1997-09-08 02:41:22 +00:00
|
|
|
char *isnull;
|
|
|
|
SPLITVEC v;
|
2001-03-07 21:20:26 +00:00
|
|
|
OffsetNumber *spl_left,
|
|
|
|
*spl_right;
|
1997-09-08 02:41:22 +00:00
|
|
|
TupleDesc tupDesc;
|
2001-09-29 03:46:12 +00:00
|
|
|
int n;
|
|
|
|
OffsetNumber newitemoff;
|
1997-09-07 05:04:48 +00:00
|
|
|
|
|
|
|
p = (Page) BufferGetPage(buffer);
|
|
|
|
opaque = (RTreePageOpaque) PageGetSpecialPointer(p);
|
|
|
|
|
2001-03-07 21:20:26 +00:00
|
|
|
rtpicksplit(r, p, &v, itup, rtstate);
|
|
|
|
|
1997-09-07 05:04:48 +00:00
|
|
|
/*
|
|
|
|
* The root of the tree is the first block in the relation. If we're
|
|
|
|
* about to split the root, we need to do some hocus-pocus to enforce
|
|
|
|
* this guarantee.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (BufferGetBlockNumber(buffer) == P_ROOT)
|
|
|
|
{
|
|
|
|
leftbuf = ReadBuffer(r, P_NEW);
|
|
|
|
RTInitBuffer(leftbuf, opaque->flags);
|
|
|
|
lbknum = BufferGetBlockNumber(leftbuf);
|
|
|
|
left = (Page) BufferGetPage(leftbuf);
|
1996-07-09 06:22:35 +00:00
|
|
|
}
|
1997-09-07 05:04:48 +00:00
|
|
|
else
|
|
|
|
{
|
|
|
|
leftbuf = buffer;
|
|
|
|
IncrBufferRefCount(buffer);
|
|
|
|
lbknum = BufferGetBlockNumber(buffer);
|
|
|
|
left = (Page) PageGetTempPage(p, sizeof(RTreePageOpaqueData));
|
|
|
|
}
|
|
|
|
|
|
|
|
rightbuf = ReadBuffer(r, P_NEW);
|
|
|
|
RTInitBuffer(rightbuf, opaque->flags);
|
|
|
|
rbknum = BufferGetBlockNumber(rightbuf);
|
|
|
|
right = (Page) BufferGetPage(rightbuf);
|
|
|
|
|
2001-03-07 21:20:26 +00:00
|
|
|
spl_left = v.spl_left;
|
|
|
|
spl_right = v.spl_right;
|
1997-09-07 05:04:48 +00:00
|
|
|
leftoff = rightoff = FirstOffsetNumber;
|
|
|
|
maxoff = PageGetMaxOffsetNumber(p);
|
2001-09-29 03:46:12 +00:00
|
|
|
newitemoff = OffsetNumberNext(maxoff);
|
1997-09-07 05:04:48 +00:00
|
|
|
|
|
|
|
/* build an InsertIndexResult for this insertion */
|
|
|
|
res = (InsertIndexResult) palloc(sizeof(InsertIndexResultData));
|
|
|
|
|
2001-09-29 03:46:12 +00:00
|
|
|
/*
|
2001-10-25 05:50:21 +00:00
|
|
|
* spl_left contains a list of the offset numbers of the tuples that
|
|
|
|
* will go to the left page. For each offset number, get the tuple
|
|
|
|
* item, then add the item to the left page. Similarly for the right
|
|
|
|
* side.
|
2001-09-29 03:46:12 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
/* fill left node */
|
|
|
|
for (n = 0; n < v.spl_nleft; n++)
|
1997-09-07 05:04:48 +00:00
|
|
|
{
|
2001-09-29 03:46:12 +00:00
|
|
|
i = *spl_left;
|
|
|
|
if (i == newitemoff)
|
|
|
|
item = itup;
|
|
|
|
else
|
|
|
|
{
|
|
|
|
itemid = PageGetItemId(p, i);
|
|
|
|
item = (IndexTuple) PageGetItem(p, itemid);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (PageAddItem(left, (Item) item, IndexTupleSize(item),
|
2001-03-07 21:20:26 +00:00
|
|
|
leftoff, LP_USED) == InvalidOffsetNumber)
|
2003-07-21 20:29:40 +00:00
|
|
|
elog(ERROR, "failed to add index item to \"%s\"",
|
2001-03-07 21:20:26 +00:00
|
|
|
RelationGetRelationName(r));
|
1997-09-07 05:04:48 +00:00
|
|
|
leftoff = OffsetNumberNext(leftoff);
|
2001-09-29 03:46:12 +00:00
|
|
|
|
|
|
|
if (i == newitemoff)
|
|
|
|
ItemPointerSet(&(res->pointerData), lbknum, leftoff);
|
|
|
|
|
2001-10-25 05:50:21 +00:00
|
|
|
spl_left++; /* advance in left split vector */
|
1997-09-07 05:04:48 +00:00
|
|
|
}
|
2001-09-29 03:46:12 +00:00
|
|
|
|
|
|
|
/* fill right node */
|
|
|
|
for (n = 0; n < v.spl_nright; n++)
|
1997-09-07 05:04:48 +00:00
|
|
|
{
|
2001-09-29 03:46:12 +00:00
|
|
|
i = *spl_right;
|
|
|
|
if (i == newitemoff)
|
|
|
|
item = itup;
|
|
|
|
else
|
|
|
|
{
|
|
|
|
itemid = PageGetItemId(p, i);
|
|
|
|
item = (IndexTuple) PageGetItem(p, itemid);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (PageAddItem(right, (Item) item, IndexTupleSize(item),
|
2001-03-07 21:20:26 +00:00
|
|
|
rightoff, LP_USED) == InvalidOffsetNumber)
|
2003-07-21 20:29:40 +00:00
|
|
|
elog(ERROR, "failed to add index item to \"%s\"",
|
2001-03-07 21:20:26 +00:00
|
|
|
RelationGetRelationName(r));
|
1997-09-07 05:04:48 +00:00
|
|
|
rightoff = OffsetNumberNext(rightoff);
|
2001-09-29 03:46:12 +00:00
|
|
|
|
|
|
|
if (i == newitemoff)
|
|
|
|
ItemPointerSet(&(res->pointerData), rbknum, rightoff);
|
|
|
|
|
2001-10-25 05:50:21 +00:00
|
|
|
spl_right++; /* advance in right split vector */
|
1997-09-07 05:04:48 +00:00
|
|
|
}
|
|
|
|
|
2001-03-07 21:20:26 +00:00
|
|
|
/* Make sure we consumed all of the split vectors, and release 'em */
|
|
|
|
Assert(*spl_left == InvalidOffsetNumber);
|
|
|
|
Assert(*spl_right == InvalidOffsetNumber);
|
|
|
|
pfree(v.spl_left);
|
|
|
|
pfree(v.spl_right);
|
|
|
|
|
1997-09-07 05:04:48 +00:00
|
|
|
if ((bufblock = BufferGetBlockNumber(buffer)) != P_ROOT)
|
|
|
|
PageRestoreTempPage(left, p);
|
|
|
|
WriteBuffer(leftbuf);
|
|
|
|
WriteBuffer(rightbuf);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Okay, the page is split. We have three things left to do:
|
|
|
|
*
|
|
|
|
* 1) Adjust any active scans on this index to cope with changes we
|
|
|
|
* introduced in its structure by splitting this page.
|
|
|
|
*
|
|
|
|
* 2) "Tighten" the bounding box of the pointer to the left page in the
|
|
|
|
* parent node in the tree, if any. Since we moved a bunch of stuff
|
|
|
|
* off the left page, we expect it to get smaller. This happens in
|
|
|
|
* the internal insertion routine.
|
|
|
|
*
|
|
|
|
* 3) Insert a pointer to the right page in the parent. This may cause
|
|
|
|
* the parent to split. If it does, we need to repeat steps one and
|
|
|
|
* two for each split node in the tree.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* adjust active scans */
|
|
|
|
rtadjscans(r, RTOP_SPLIT, bufblock, FirstOffsetNumber);
|
|
|
|
|
|
|
|
tupDesc = r->rd_att;
|
2001-03-07 21:20:26 +00:00
|
|
|
isnull = (char *) palloc(r->rd_rel->relnatts);
|
|
|
|
for (blank = 0; blank < r->rd_rel->relnatts; blank++)
|
|
|
|
isnull[blank] = ' ';
|
|
|
|
|
1997-09-07 05:04:48 +00:00
|
|
|
ltup = (IndexTuple) index_formtuple(tupDesc,
|
2001-03-07 21:20:26 +00:00
|
|
|
&(v.spl_ldatum), isnull);
|
1997-09-07 05:04:48 +00:00
|
|
|
rtup = (IndexTuple) index_formtuple(tupDesc,
|
2001-03-07 21:20:26 +00:00
|
|
|
&(v.spl_rdatum), isnull);
|
1997-09-07 05:04:48 +00:00
|
|
|
pfree(isnull);
|
|
|
|
|
|
|
|
/* set pointers to new child pages in the internal index tuples */
|
|
|
|
ItemPointerSet(&(ltup->t_tid), lbknum, 1);
|
|
|
|
ItemPointerSet(&(rtup->t_tid), rbknum, 1);
|
|
|
|
|
|
|
|
rtintinsert(r, stack, ltup, rtup, rtstate);
|
|
|
|
|
|
|
|
pfree(ltup);
|
|
|
|
pfree(rtup);
|
|
|
|
|
1998-09-01 03:29:17 +00:00
|
|
|
return res;
|
1996-07-09 06:22:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
rtintinsert(Relation r,
|
1997-09-08 21:56:23 +00:00
|
|
|
RTSTACK *stk,
|
1997-09-07 05:04:48 +00:00
|
|
|
IndexTuple ltup,
|
|
|
|
IndexTuple rtup,
|
1997-09-08 21:56:23 +00:00
|
|
|
RTSTATE *rtstate)
|
1996-07-09 06:22:35 +00:00
|
|
|
{
|
1997-09-08 02:41:22 +00:00
|
|
|
IndexTuple old;
|
|
|
|
Buffer b;
|
|
|
|
Page p;
|
2001-03-07 21:20:26 +00:00
|
|
|
Datum ldatum,
|
|
|
|
rdatum,
|
|
|
|
newdatum;
|
1997-09-07 05:04:48 +00:00
|
|
|
InsertIndexResult res;
|
|
|
|
|
|
|
|
if (stk == (RTSTACK *) NULL)
|
|
|
|
{
|
|
|
|
rtnewroot(r, ltup, rtup);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
b = ReadBuffer(r, stk->rts_blk);
|
|
|
|
p = BufferGetPage(b);
|
|
|
|
old = (IndexTuple) PageGetItem(p, PageGetItemId(p, stk->rts_child));
|
|
|
|
|
|
|
|
/*
|
2001-03-22 04:01:46 +00:00
|
|
|
* This is a hack. Right now, we force rtree internal keys to be
|
|
|
|
* constant size. To fix this, need delete the old key and add both
|
|
|
|
* left and right for the two new pages. The insertion of left may
|
|
|
|
* force a split if the new left key is bigger than the old key.
|
1997-09-07 05:04:48 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
if (IndexTupleSize(old) != IndexTupleSize(ltup))
|
2003-07-21 20:29:40 +00:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
|
|
|
errmsg("variable-length rtree keys are not supported")));
|
1997-09-07 05:04:48 +00:00
|
|
|
|
|
|
|
/* install pointer to left child */
|
|
|
|
memmove(old, ltup, IndexTupleSize(ltup));
|
|
|
|
|
|
|
|
if (nospace(p, rtup))
|
|
|
|
{
|
2001-03-07 21:20:26 +00:00
|
|
|
newdatum = IndexTupleGetDatum(ltup);
|
1997-09-07 05:04:48 +00:00
|
|
|
rttighten(r, stk->rts_parent, newdatum,
|
2001-03-07 21:20:26 +00:00
|
|
|
IndexTupleAttSize(ltup), rtstate);
|
|
|
|
res = rtdosplit(r, b, stk->rts_parent, rtup, rtstate);
|
1997-09-07 05:04:48 +00:00
|
|
|
WriteBuffer(b); /* don't forget to release buffer! -
|
|
|
|
* 01/31/94 */
|
|
|
|
pfree(res);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2001-03-07 21:20:26 +00:00
|
|
|
if (PageAddItem(p, (Item) rtup, IndexTupleSize(rtup),
|
|
|
|
PageGetMaxOffsetNumber(p),
|
|
|
|
LP_USED) == InvalidOffsetNumber)
|
2003-07-21 20:29:40 +00:00
|
|
|
elog(ERROR, "failed to add index item to \"%s\"",
|
2001-03-07 21:20:26 +00:00
|
|
|
RelationGetRelationName(r));
|
1997-09-07 05:04:48 +00:00
|
|
|
WriteBuffer(b);
|
2001-03-07 21:20:26 +00:00
|
|
|
ldatum = IndexTupleGetDatum(ltup);
|
|
|
|
rdatum = IndexTupleGetDatum(rtup);
|
|
|
|
newdatum = FunctionCall2(&rtstate->unionFn, ldatum, rdatum);
|
1997-09-07 05:04:48 +00:00
|
|
|
|
|
|
|
rttighten(r, stk->rts_parent, newdatum,
|
2001-03-07 21:20:26 +00:00
|
|
|
IndexTupleAttSize(rtup), rtstate);
|
1997-09-07 05:04:48 +00:00
|
|
|
|
2001-03-07 21:20:26 +00:00
|
|
|
pfree(DatumGetPointer(newdatum));
|
1997-09-07 05:04:48 +00:00
|
|
|
}
|
1996-07-09 06:22:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
rtnewroot(Relation r, IndexTuple lt, IndexTuple rt)
|
|
|
|
{
|
1997-09-08 02:41:22 +00:00
|
|
|
Buffer b;
|
|
|
|
Page p;
|
1997-09-07 05:04:48 +00:00
|
|
|
|
|
|
|
b = ReadBuffer(r, P_ROOT);
|
|
|
|
RTInitBuffer(b, 0);
|
|
|
|
p = BufferGetPage(b);
|
2001-03-07 21:20:26 +00:00
|
|
|
if (PageAddItem(p, (Item) lt, IndexTupleSize(lt),
|
|
|
|
FirstOffsetNumber,
|
|
|
|
LP_USED) == InvalidOffsetNumber)
|
2003-07-21 20:29:40 +00:00
|
|
|
elog(ERROR, "failed to add index item to \"%s\"",
|
2001-03-07 21:20:26 +00:00
|
|
|
RelationGetRelationName(r));
|
|
|
|
if (PageAddItem(p, (Item) rt, IndexTupleSize(rt),
|
|
|
|
OffsetNumberNext(FirstOffsetNumber),
|
|
|
|
LP_USED) == InvalidOffsetNumber)
|
2003-07-21 20:29:40 +00:00
|
|
|
elog(ERROR, "failed to add index item to \"%s\"",
|
2001-03-07 21:20:26 +00:00
|
|
|
RelationGetRelationName(r));
|
1997-09-07 05:04:48 +00:00
|
|
|
WriteBuffer(b);
|
1996-07-09 06:22:35 +00:00
|
|
|
}
|
|
|
|
|
2001-03-07 21:20:26 +00:00
|
|
|
/*
|
|
|
|
* Choose how to split an rtree page into two pages.
|
|
|
|
*
|
|
|
|
* We return two vectors of index item numbers, one for the items to be
|
|
|
|
* put on the left page, one for the items to be put on the right page.
|
|
|
|
* In addition, the item to be added (itup) is listed in the appropriate
|
2001-03-22 04:01:46 +00:00
|
|
|
* vector. It is represented by item number N+1 (N = # of items on page).
|
2001-03-07 21:20:26 +00:00
|
|
|
*
|
2001-09-29 03:46:12 +00:00
|
|
|
* Both vectors have a terminating sentinel value of InvalidOffsetNumber,
|
|
|
|
* but the sentinal value is no longer used, because the SPLITVEC
|
|
|
|
* vector also contains the length of each vector, and that information
|
|
|
|
* is now used to iterate over them in rtdosplit(). --kbb, 21 Sept 2001
|
2001-03-07 21:20:26 +00:00
|
|
|
*
|
|
|
|
* The bounding-box datums for the two new pages are also returned in *v.
|
|
|
|
*
|
|
|
|
* This is the quadratic-cost split algorithm Guttman describes in
|
|
|
|
* his paper. The reason we chose it is that you can implement this
|
|
|
|
* with less information about the data types on which you're operating.
|
|
|
|
*
|
|
|
|
* We must also deal with a consideration not found in Guttman's algorithm:
|
|
|
|
* variable-length data. In particular, the incoming item might be
|
2001-03-22 04:01:46 +00:00
|
|
|
* large enough that not just any split will work. In the worst case,
|
2001-03-07 21:20:26 +00:00
|
|
|
* our "split" may have to be the new item on one page and all the existing
|
2001-03-22 04:01:46 +00:00
|
|
|
* items on the other. Short of that, we have to take care that we do not
|
2001-03-07 21:20:26 +00:00
|
|
|
* make a split that leaves both pages too full for the new item.
|
|
|
|
*/
|
1996-07-09 06:22:35 +00:00
|
|
|
static void
|
2001-03-07 21:20:26 +00:00
|
|
|
rtpicksplit(Relation r,
|
|
|
|
Page page,
|
|
|
|
SPLITVEC *v,
|
|
|
|
IndexTuple itup,
|
|
|
|
RTSTATE *rtstate)
|
1996-07-09 06:22:35 +00:00
|
|
|
{
|
2001-03-07 21:20:26 +00:00
|
|
|
OffsetNumber maxoff,
|
|
|
|
newitemoff;
|
1997-09-08 02:41:22 +00:00
|
|
|
OffsetNumber i,
|
|
|
|
j;
|
|
|
|
IndexTuple item_1,
|
|
|
|
item_2;
|
2001-03-07 21:20:26 +00:00
|
|
|
Datum datum_alpha,
|
|
|
|
datum_beta;
|
|
|
|
Datum datum_l,
|
|
|
|
datum_r;
|
|
|
|
Datum union_d,
|
|
|
|
union_dl,
|
|
|
|
union_dr;
|
|
|
|
Datum inter_d;
|
1997-09-08 02:41:22 +00:00
|
|
|
bool firsttime;
|
|
|
|
float size_alpha,
|
|
|
|
size_beta,
|
|
|
|
size_union,
|
|
|
|
size_inter;
|
|
|
|
float size_waste,
|
|
|
|
waste;
|
|
|
|
float size_l,
|
|
|
|
size_r;
|
|
|
|
int nbytes;
|
|
|
|
OffsetNumber seed_1 = 0,
|
|
|
|
seed_2 = 0;
|
|
|
|
OffsetNumber *left,
|
|
|
|
*right;
|
2001-03-07 21:20:26 +00:00
|
|
|
Size newitemsz,
|
|
|
|
item_1_sz,
|
|
|
|
item_2_sz,
|
|
|
|
left_avail_space,
|
|
|
|
right_avail_space;
|
2001-09-29 03:46:12 +00:00
|
|
|
int total_num_tuples,
|
|
|
|
num_tuples_without_seeds,
|
2001-10-28 06:26:15 +00:00
|
|
|
max_after_split; /* in Guttman's lingo, (M - m) */
|
2001-10-25 05:50:21 +00:00
|
|
|
float diff; /* diff between cost of putting tuple left
|
|
|
|
* or right */
|
|
|
|
SPLITCOST *cost_vector;
|
2001-09-29 03:46:12 +00:00
|
|
|
int n;
|
2001-03-07 21:20:26 +00:00
|
|
|
|
|
|
|
/*
|
2001-03-22 04:01:46 +00:00
|
|
|
* First, make sure the new item is not so large that we can't
|
|
|
|
* possibly fit it on a page, even by itself. (It's sufficient to
|
|
|
|
* make this test here, since any oversize tuple must lead to a page
|
|
|
|
* split attempt.)
|
2001-03-07 21:20:26 +00:00
|
|
|
*/
|
|
|
|
newitemsz = IndexTupleTotalSize(itup);
|
|
|
|
if (newitemsz > RTPageAvailSpace)
|
2003-07-21 20:29:40 +00:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
|
|
|
|
errmsg("index tuple size %lu exceeds rtree maximum, %lu",
|
|
|
|
(unsigned long) newitemsz,
|
|
|
|
(unsigned long) RTPageAvailSpace)));
|
1997-09-07 05:04:48 +00:00
|
|
|
|
|
|
|
maxoff = PageGetMaxOffsetNumber(page);
|
2001-03-22 04:01:46 +00:00
|
|
|
newitemoff = OffsetNumberNext(maxoff); /* phony index for new
|
|
|
|
* item */
|
2001-09-29 03:46:12 +00:00
|
|
|
total_num_tuples = newitemoff;
|
|
|
|
num_tuples_without_seeds = total_num_tuples - 2;
|
|
|
|
max_after_split = total_num_tuples / 2; /* works for m = M/2 */
|
1997-09-07 05:04:48 +00:00
|
|
|
|
2001-03-07 21:20:26 +00:00
|
|
|
/* Make arrays big enough for worst case, including sentinel */
|
1997-09-07 05:04:48 +00:00
|
|
|
nbytes = (maxoff + 2) * sizeof(OffsetNumber);
|
|
|
|
v->spl_left = (OffsetNumber *) palloc(nbytes);
|
|
|
|
v->spl_right = (OffsetNumber *) palloc(nbytes);
|
|
|
|
|
|
|
|
firsttime = true;
|
|
|
|
waste = 0.0;
|
|
|
|
|
|
|
|
for (i = FirstOffsetNumber; i < maxoff; i = OffsetNumberNext(i))
|
|
|
|
{
|
|
|
|
item_1 = (IndexTuple) PageGetItem(page, PageGetItemId(page, i));
|
2001-03-07 21:20:26 +00:00
|
|
|
datum_alpha = IndexTupleGetDatum(item_1);
|
|
|
|
item_1_sz = IndexTupleTotalSize(item_1);
|
|
|
|
|
1997-09-07 05:04:48 +00:00
|
|
|
for (j = OffsetNumberNext(i); j <= maxoff; j = OffsetNumberNext(j))
|
|
|
|
{
|
|
|
|
item_2 = (IndexTuple) PageGetItem(page, PageGetItemId(page, j));
|
2001-03-07 21:20:26 +00:00
|
|
|
datum_beta = IndexTupleGetDatum(item_2);
|
|
|
|
item_2_sz = IndexTupleTotalSize(item_2);
|
|
|
|
|
|
|
|
/*
|
2001-03-22 04:01:46 +00:00
|
|
|
* Ignore seed pairs that don't leave room for the new item on
|
|
|
|
* either split page.
|
2001-03-07 21:20:26 +00:00
|
|
|
*/
|
|
|
|
if (newitemsz + item_1_sz > RTPageAvailSpace &&
|
|
|
|
newitemsz + item_2_sz > RTPageAvailSpace)
|
|
|
|
continue;
|
1997-09-07 05:04:48 +00:00
|
|
|
|
|
|
|
/* compute the wasted space by unioning these guys */
|
2001-03-07 21:20:26 +00:00
|
|
|
union_d = FunctionCall2(&rtstate->unionFn,
|
|
|
|
datum_alpha, datum_beta);
|
|
|
|
FunctionCall2(&rtstate->sizeFn, union_d,
|
2000-05-30 04:25:00 +00:00
|
|
|
PointerGetDatum(&size_union));
|
2001-03-07 21:20:26 +00:00
|
|
|
inter_d = FunctionCall2(&rtstate->interFn,
|
|
|
|
datum_alpha, datum_beta);
|
2001-03-22 04:01:46 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The interFn may return a NULL pointer (not an SQL null!) to
|
|
|
|
* indicate no intersection. sizeFn must cope with this.
|
2000-07-30 20:44:02 +00:00
|
|
|
*/
|
2001-03-07 21:20:26 +00:00
|
|
|
FunctionCall2(&rtstate->sizeFn, inter_d,
|
2000-05-30 04:25:00 +00:00
|
|
|
PointerGetDatum(&size_inter));
|
1997-09-07 05:04:48 +00:00
|
|
|
size_waste = size_union - size_inter;
|
|
|
|
|
2001-03-07 21:20:26 +00:00
|
|
|
if (DatumGetPointer(union_d) != NULL)
|
|
|
|
pfree(DatumGetPointer(union_d));
|
|
|
|
if (DatumGetPointer(inter_d) != NULL)
|
|
|
|
pfree(DatumGetPointer(inter_d));
|
1997-09-07 05:04:48 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* are these a more promising split that what we've already
|
|
|
|
* seen?
|
|
|
|
*/
|
|
|
|
if (size_waste > waste || firsttime)
|
|
|
|
{
|
|
|
|
waste = size_waste;
|
|
|
|
seed_1 = i;
|
|
|
|
seed_2 = j;
|
|
|
|
firsttime = false;
|
|
|
|
}
|
|
|
|
}
|
1996-07-09 06:22:35 +00:00
|
|
|
}
|
1997-09-07 05:04:48 +00:00
|
|
|
|
2001-03-07 21:20:26 +00:00
|
|
|
if (firsttime)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* There is no possible split except to put the new item on its
|
|
|
|
* own page. Since we still have to compute the union rectangles,
|
|
|
|
* we play dumb and run through the split algorithm anyway,
|
|
|
|
* setting seed_1 = first item on page and seed_2 = new item.
|
|
|
|
*/
|
|
|
|
seed_1 = FirstOffsetNumber;
|
|
|
|
seed_2 = newitemoff;
|
|
|
|
}
|
1997-09-07 05:04:48 +00:00
|
|
|
|
|
|
|
item_1 = (IndexTuple) PageGetItem(page, PageGetItemId(page, seed_1));
|
2001-03-07 21:20:26 +00:00
|
|
|
datum_alpha = IndexTupleGetDatum(item_1);
|
|
|
|
datum_l = FunctionCall2(&rtstate->unionFn, datum_alpha, datum_alpha);
|
|
|
|
FunctionCall2(&rtstate->sizeFn, datum_l, PointerGetDatum(&size_l));
|
|
|
|
left_avail_space = RTPageAvailSpace - IndexTupleTotalSize(item_1);
|
|
|
|
|
|
|
|
if (seed_2 == newitemoff)
|
|
|
|
{
|
|
|
|
item_2 = itup;
|
|
|
|
/* Needn't leave room for new item in calculations below */
|
|
|
|
newitemsz = 0;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
item_2 = (IndexTuple) PageGetItem(page, PageGetItemId(page, seed_2));
|
|
|
|
datum_beta = IndexTupleGetDatum(item_2);
|
|
|
|
datum_r = FunctionCall2(&rtstate->unionFn, datum_beta, datum_beta);
|
|
|
|
FunctionCall2(&rtstate->sizeFn, datum_r, PointerGetDatum(&size_r));
|
|
|
|
right_avail_space = RTPageAvailSpace - IndexTupleTotalSize(item_2);
|
1997-09-07 05:04:48 +00:00
|
|
|
|
1996-07-09 06:22:35 +00:00
|
|
|
/*
|
2001-09-29 03:46:12 +00:00
|
|
|
* Now split up the regions between the two seeds.
|
|
|
|
*
|
2001-10-25 05:50:21 +00:00
|
|
|
* The cost_vector array will contain hints for determining where each
|
|
|
|
* tuple should go. Each record in the array will contain a boolean,
|
|
|
|
* choose_left, that indicates which node the tuple prefers to be on,
|
|
|
|
* and the absolute difference in cost between putting the tuple in
|
|
|
|
* its favored node and in the other node.
|
2001-09-29 03:46:12 +00:00
|
|
|
*
|
|
|
|
* Later, we will sort the cost_vector in descending order by cost
|
2001-10-25 05:50:21 +00:00
|
|
|
* difference, and consider the tuples in that order for placement.
|
|
|
|
* That way, the tuples that *really* want to be in one node or the
|
|
|
|
* other get to choose first, and the tuples that don't really care
|
|
|
|
* choose last.
|
2001-09-29 03:46:12 +00:00
|
|
|
*
|
2001-10-25 05:50:21 +00:00
|
|
|
* First, build the cost_vector array. The new index tuple will also be
|
|
|
|
* handled in this loop, and represented in the array, with
|
|
|
|
* i==newitemoff.
|
2001-09-29 03:46:12 +00:00
|
|
|
*
|
2001-10-25 05:50:21 +00:00
|
|
|
* In the case of variable size tuples it is possible that we only have
|
|
|
|
* the two seeds and no other tuples, in which case we don't do any of
|
|
|
|
* this cost_vector stuff.
|
2001-09-29 03:46:12 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
/* to keep compiler quiet */
|
|
|
|
cost_vector = (SPLITCOST *) NULL;
|
|
|
|
|
|
|
|
if (num_tuples_without_seeds > 0)
|
|
|
|
{
|
|
|
|
cost_vector =
|
|
|
|
(SPLITCOST *) palloc(num_tuples_without_seeds * sizeof(SPLITCOST));
|
|
|
|
n = 0;
|
|
|
|
for (i = FirstOffsetNumber; i <= newitemoff; i = OffsetNumberNext(i))
|
|
|
|
{
|
|
|
|
/* Compute new union datums and sizes for both choices */
|
|
|
|
|
|
|
|
if ((i == seed_1) || (i == seed_2))
|
|
|
|
continue;
|
|
|
|
else if (i == newitemoff)
|
|
|
|
item_1 = itup;
|
|
|
|
else
|
|
|
|
item_1 = (IndexTuple) PageGetItem(page, PageGetItemId(page, i));
|
|
|
|
|
|
|
|
datum_alpha = IndexTupleGetDatum(item_1);
|
|
|
|
union_dl = FunctionCall2(&rtstate->unionFn, datum_l, datum_alpha);
|
|
|
|
union_dr = FunctionCall2(&rtstate->unionFn, datum_r, datum_alpha);
|
|
|
|
FunctionCall2(&rtstate->sizeFn, union_dl,
|
|
|
|
PointerGetDatum(&size_alpha));
|
|
|
|
FunctionCall2(&rtstate->sizeFn, union_dr,
|
|
|
|
PointerGetDatum(&size_beta));
|
2002-06-25 17:26:11 +00:00
|
|
|
pfree(DatumGetPointer(union_dl));
|
|
|
|
pfree(DatumGetPointer(union_dr));
|
2001-09-29 03:46:12 +00:00
|
|
|
|
|
|
|
diff = (size_alpha - size_l) - (size_beta - size_r);
|
|
|
|
|
|
|
|
cost_vector[n].offset_number = i;
|
|
|
|
cost_vector[n].cost_differential = fabs(diff);
|
|
|
|
cost_vector[n].choose_left = (diff < 0);
|
|
|
|
|
|
|
|
n++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2001-10-25 05:50:21 +00:00
|
|
|
* Sort the array. The function qsort_comp_splitcost is set up
|
|
|
|
* "backwards", to provided descending order.
|
2001-09-29 03:46:12 +00:00
|
|
|
*/
|
|
|
|
qsort(cost_vector, num_tuples_without_seeds, sizeof(SPLITCOST),
|
|
|
|
&qsort_comp_splitcost);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2001-10-25 05:50:21 +00:00
|
|
|
* Now make the final decisions about where each tuple will go, and
|
|
|
|
* build the vectors to return in the SPLITVEC record.
|
1997-09-07 05:04:48 +00:00
|
|
|
*
|
2001-10-25 05:50:21 +00:00
|
|
|
* The cost_vector array contains (descriptions of) all the tuples, in
|
|
|
|
* the order that we want to consider them, so we we just iterate
|
|
|
|
* through it and place each tuple in left or right nodes, according
|
|
|
|
* to the criteria described below.
|
1996-07-09 06:22:35 +00:00
|
|
|
*/
|
2001-09-29 03:46:12 +00:00
|
|
|
|
2001-03-07 21:20:26 +00:00
|
|
|
left = v->spl_left;
|
|
|
|
v->spl_nleft = 0;
|
|
|
|
right = v->spl_right;
|
|
|
|
v->spl_nright = 0;
|
1997-09-07 05:04:48 +00:00
|
|
|
|
2001-10-25 05:50:21 +00:00
|
|
|
/*
|
|
|
|
* Place the seeds first. left avail space, left union, right avail
|
|
|
|
* space, and right union have already been adjusted for the seeds.
|
2001-09-29 03:46:12 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
*left++ = seed_1;
|
|
|
|
v->spl_nleft++;
|
|
|
|
|
|
|
|
*right++ = seed_2;
|
|
|
|
v->spl_nright++;
|
|
|
|
|
|
|
|
for (n = 0; n < num_tuples_without_seeds; n++)
|
1997-09-07 05:04:48 +00:00
|
|
|
{
|
2001-03-22 04:01:46 +00:00
|
|
|
bool left_feasible,
|
|
|
|
right_feasible,
|
|
|
|
choose_left;
|
1997-09-07 05:04:48 +00:00
|
|
|
|
|
|
|
/*
|
2001-10-25 05:50:21 +00:00
|
|
|
* We need to figure out which page needs the least enlargement in
|
|
|
|
* order to store the item.
|
1997-09-07 05:04:48 +00:00
|
|
|
*/
|
|
|
|
|
2001-09-29 03:46:12 +00:00
|
|
|
i = cost_vector[n].offset_number;
|
1997-09-07 05:04:48 +00:00
|
|
|
|
2001-03-07 21:20:26 +00:00
|
|
|
/* Compute new union datums and sizes for both possible additions */
|
|
|
|
if (i == newitemoff)
|
|
|
|
{
|
1997-09-07 05:04:48 +00:00
|
|
|
item_1 = itup;
|
2001-03-07 21:20:26 +00:00
|
|
|
/* Needn't leave room for new item anymore */
|
|
|
|
newitemsz = 0;
|
|
|
|
}
|
1997-09-07 05:04:48 +00:00
|
|
|
else
|
|
|
|
item_1 = (IndexTuple) PageGetItem(page, PageGetItemId(page, i));
|
2001-03-07 21:20:26 +00:00
|
|
|
item_1_sz = IndexTupleTotalSize(item_1);
|
1997-09-07 05:04:48 +00:00
|
|
|
|
2001-03-07 21:20:26 +00:00
|
|
|
datum_alpha = IndexTupleGetDatum(item_1);
|
|
|
|
union_dl = FunctionCall2(&rtstate->unionFn, datum_l, datum_alpha);
|
|
|
|
union_dr = FunctionCall2(&rtstate->unionFn, datum_r, datum_alpha);
|
|
|
|
FunctionCall2(&rtstate->sizeFn, union_dl,
|
2000-05-30 04:25:00 +00:00
|
|
|
PointerGetDatum(&size_alpha));
|
2001-03-07 21:20:26 +00:00
|
|
|
FunctionCall2(&rtstate->sizeFn, union_dr,
|
2000-05-30 04:25:00 +00:00
|
|
|
PointerGetDatum(&size_beta));
|
1997-09-07 05:04:48 +00:00
|
|
|
|
2001-03-07 21:20:26 +00:00
|
|
|
/*
|
2001-03-22 04:01:46 +00:00
|
|
|
* We prefer the page that shows smaller enlargement of its union
|
|
|
|
* area (Guttman's algorithm), but we must take care that at least
|
|
|
|
* one page will still have room for the new item after this one
|
|
|
|
* is added.
|
2001-03-07 21:20:26 +00:00
|
|
|
*
|
2001-03-22 04:01:46 +00:00
|
|
|
* (We know that all the old items together can fit on one page, so
|
|
|
|
* we need not worry about any other problem than failing to fit
|
2001-03-07 21:20:26 +00:00
|
|
|
* the new item.)
|
2001-09-29 03:46:12 +00:00
|
|
|
*
|
|
|
|
* Guttman's algorithm actually has two factors to consider (in
|
2001-10-25 05:50:21 +00:00
|
|
|
* order): 1. if one node has so many tuples already assigned to
|
2001-09-29 03:46:12 +00:00
|
|
|
* it that the other needs all the rest in order to satisfy the
|
2001-10-25 05:50:21 +00:00
|
|
|
* condition that neither node has fewer than m tuples, then that
|
|
|
|
* is decisive; 2. otherwise, choose the page that shows the
|
|
|
|
* smaller enlargement of its union area.
|
2001-09-29 03:46:12 +00:00
|
|
|
*
|
2001-10-25 05:50:21 +00:00
|
|
|
* I have chosen m = M/2, where M is the maximum number of tuples on
|
|
|
|
* a page. (Actually, this is only strictly true for fixed size
|
|
|
|
* tuples. For variable size tuples, there still might have to be
|
|
|
|
* only one tuple on a page, if it is really big. But even with
|
|
|
|
* variable size tuples we still try to get m as close as possible
|
|
|
|
* to M/2.)
|
2001-09-29 03:46:12 +00:00
|
|
|
*
|
2001-10-25 05:50:21 +00:00
|
|
|
* The question of which page shows the smaller enlargement of its
|
|
|
|
* union area has already been answered, and the answer stored in
|
|
|
|
* the choose_left field of the SPLITCOST record.
|
2001-03-07 21:20:26 +00:00
|
|
|
*/
|
|
|
|
left_feasible = (left_avail_space >= item_1_sz &&
|
|
|
|
((left_avail_space - item_1_sz) >= newitemsz ||
|
|
|
|
right_avail_space >= newitemsz));
|
|
|
|
right_feasible = (right_avail_space >= item_1_sz &&
|
|
|
|
((right_avail_space - item_1_sz) >= newitemsz ||
|
|
|
|
left_avail_space >= newitemsz));
|
|
|
|
if (left_feasible && right_feasible)
|
|
|
|
{
|
2001-09-29 03:46:12 +00:00
|
|
|
/*
|
2001-10-25 05:50:21 +00:00
|
|
|
* Both feasible, use Guttman's algorithm. First check the m
|
|
|
|
* condition described above, and if that doesn't apply,
|
|
|
|
* choose the page with the smaller enlargement of its union
|
|
|
|
* area.
|
2001-09-29 03:46:12 +00:00
|
|
|
*/
|
|
|
|
if (v->spl_nleft > max_after_split)
|
|
|
|
choose_left = false;
|
|
|
|
else if (v->spl_nright > max_after_split)
|
|
|
|
choose_left = true;
|
|
|
|
else
|
|
|
|
choose_left = cost_vector[n].choose_left;
|
2001-03-07 21:20:26 +00:00
|
|
|
}
|
|
|
|
else if (left_feasible)
|
|
|
|
choose_left = true;
|
|
|
|
else if (right_feasible)
|
|
|
|
choose_left = false;
|
|
|
|
else
|
1997-09-07 05:04:48 +00:00
|
|
|
{
|
2003-07-21 20:29:40 +00:00
|
|
|
elog(ERROR, "failed to find a workable rtree page split");
|
2001-10-28 06:26:15 +00:00
|
|
|
choose_left = false; /* keep compiler quiet */
|
2001-03-07 21:20:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (choose_left)
|
|
|
|
{
|
|
|
|
pfree(DatumGetPointer(datum_l));
|
|
|
|
pfree(DatumGetPointer(union_dr));
|
1997-09-07 05:04:48 +00:00
|
|
|
datum_l = union_dl;
|
|
|
|
size_l = size_alpha;
|
2001-03-07 21:20:26 +00:00
|
|
|
left_avail_space -= item_1_sz;
|
1997-09-07 05:04:48 +00:00
|
|
|
*left++ = i;
|
|
|
|
v->spl_nleft++;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2001-03-07 21:20:26 +00:00
|
|
|
pfree(DatumGetPointer(datum_r));
|
|
|
|
pfree(DatumGetPointer(union_dl));
|
1997-09-07 05:04:48 +00:00
|
|
|
datum_r = union_dr;
|
1999-11-15 09:59:00 +00:00
|
|
|
size_r = size_beta;
|
2001-03-07 21:20:26 +00:00
|
|
|
right_avail_space -= item_1_sz;
|
1997-09-07 05:04:48 +00:00
|
|
|
*right++ = i;
|
|
|
|
v->spl_nright++;
|
|
|
|
}
|
1996-07-09 06:22:35 +00:00
|
|
|
}
|
2001-03-07 21:20:26 +00:00
|
|
|
|
2001-09-29 03:46:12 +00:00
|
|
|
if (num_tuples_without_seeds > 0)
|
|
|
|
pfree(cost_vector);
|
|
|
|
|
2001-03-22 04:01:46 +00:00
|
|
|
*left = *right = InvalidOffsetNumber; /* add ending sentinels */
|
1997-09-07 05:04:48 +00:00
|
|
|
|
|
|
|
v->spl_ldatum = datum_l;
|
|
|
|
v->spl_rdatum = datum_r;
|
1996-07-09 06:22:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
RTInitBuffer(Buffer b, uint32 f)
|
|
|
|
{
|
1997-09-07 05:04:48 +00:00
|
|
|
RTreePageOpaque opaque;
|
1997-09-08 02:41:22 +00:00
|
|
|
Page page;
|
|
|
|
Size pageSize;
|
1997-09-07 05:04:48 +00:00
|
|
|
|
|
|
|
pageSize = BufferGetPageSize(b);
|
|
|
|
|
|
|
|
page = BufferGetPage(b);
|
2002-01-15 22:14:17 +00:00
|
|
|
|
1997-09-07 05:04:48 +00:00
|
|
|
PageInit(page, pageSize, sizeof(RTreePageOpaqueData));
|
|
|
|
|
|
|
|
opaque = (RTreePageOpaque) PageGetSpecialPointer(page);
|
|
|
|
opaque->flags = f;
|
1996-07-09 06:22:35 +00:00
|
|
|
}
|
|
|
|
|
1997-09-08 02:41:22 +00:00
|
|
|
static OffsetNumber
|
1997-09-08 21:56:23 +00:00
|
|
|
choose(Relation r, Page p, IndexTuple it, RTSTATE *rtstate)
|
1996-07-09 06:22:35 +00:00
|
|
|
{
|
1997-09-08 02:41:22 +00:00
|
|
|
OffsetNumber maxoff;
|
|
|
|
OffsetNumber i;
|
2001-03-07 21:20:26 +00:00
|
|
|
Datum ud,
|
|
|
|
id;
|
|
|
|
Datum datum;
|
1997-09-08 02:41:22 +00:00
|
|
|
float usize,
|
|
|
|
dsize;
|
|
|
|
OffsetNumber which;
|
|
|
|
float which_grow;
|
1997-09-07 05:04:48 +00:00
|
|
|
|
2001-03-07 21:20:26 +00:00
|
|
|
id = IndexTupleGetDatum(it);
|
1997-09-07 05:04:48 +00:00
|
|
|
maxoff = PageGetMaxOffsetNumber(p);
|
|
|
|
which_grow = -1.0;
|
|
|
|
which = -1;
|
|
|
|
|
|
|
|
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
|
|
|
|
{
|
2001-03-07 21:20:26 +00:00
|
|
|
datum = IndexTupleGetDatum(PageGetItem(p, PageGetItemId(p, i)));
|
|
|
|
FunctionCall2(&rtstate->sizeFn, datum,
|
2000-05-30 04:25:00 +00:00
|
|
|
PointerGetDatum(&dsize));
|
2001-03-07 21:20:26 +00:00
|
|
|
ud = FunctionCall2(&rtstate->unionFn, datum, id);
|
|
|
|
FunctionCall2(&rtstate->sizeFn, ud,
|
2000-05-30 04:25:00 +00:00
|
|
|
PointerGetDatum(&usize));
|
2001-03-07 21:20:26 +00:00
|
|
|
pfree(DatumGetPointer(ud));
|
1997-09-07 05:04:48 +00:00
|
|
|
if (which_grow < 0 || usize - dsize < which_grow)
|
|
|
|
{
|
|
|
|
which = i;
|
|
|
|
which_grow = usize - dsize;
|
|
|
|
if (which_grow == 0)
|
|
|
|
break;
|
|
|
|
}
|
1996-07-09 06:22:35 +00:00
|
|
|
}
|
1997-09-07 05:04:48 +00:00
|
|
|
|
1998-09-01 03:29:17 +00:00
|
|
|
return which;
|
1996-07-09 06:22:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
nospace(Page p, IndexTuple it)
|
|
|
|
{
|
1998-09-01 03:29:17 +00:00
|
|
|
return PageGetFreeSpace(p) < IndexTupleSize(it);
|
1996-07-09 06:22:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
1997-09-08 21:56:23 +00:00
|
|
|
freestack(RTSTACK *s)
|
1996-07-09 06:22:35 +00:00
|
|
|
{
|
1997-09-08 02:41:22 +00:00
|
|
|
RTSTACK *p;
|
1997-09-07 05:04:48 +00:00
|
|
|
|
|
|
|
while (s != (RTSTACK *) NULL)
|
|
|
|
{
|
|
|
|
p = s->rts_parent;
|
|
|
|
pfree(s);
|
|
|
|
s = p;
|
|
|
|
}
|
1996-07-09 06:22:35 +00:00
|
|
|
}
|
|
|
|
|
Restructure index AM interface for index building and index tuple deletion,
per previous discussion on pghackers. Most of the duplicate code in
different AMs' ambuild routines has been moved out to a common routine
in index.c; this means that all index types now do the right things about
inserting recently-dead tuples, etc. (I also removed support for EXTEND
INDEX in the ambuild routines, since that's about to go away anyway, and
it cluttered the code a lot.) The retail indextuple deletion routines have
been replaced by a "bulk delete" routine in which the indexscan is inside
the access method. I haven't pushed this change as far as it should go yet,
but it should allow considerable simplification of the internal bookkeeping
for deletions. Also, add flag columns to pg_am to eliminate various
hardcoded tests on AM OIDs, and remove unused pg_am columns.
Fix rtree and gist index types to not attempt to store NULLs; before this,
gist usually crashed, while rtree managed not to crash but computed wacko
bounding boxes for NULL entries (which might have had something to do with
the performance problems we've heard about occasionally).
Add AtEOXact routines to hash, rtree, and gist, all of which have static
state that needs to be reset after an error. We discovered this need long
ago for btree, but missed the other guys.
Oh, one more thing: concurrent VACUUM is now the default.
2001-07-15 22:48:19 +00:00
|
|
|
/*
|
|
|
|
* Bulk deletion of all index entries pointing to a set of heap tuples.
|
|
|
|
* The set of target tuples is specified via a callback routine that tells
|
|
|
|
* whether any given heap tuple (identified by ItemPointer) is being deleted.
|
|
|
|
*
|
|
|
|
* Result: a palloc'd struct containing statistical info for VACUUM displays.
|
|
|
|
*/
|
2000-06-13 07:35:40 +00:00
|
|
|
Datum
|
Restructure index AM interface for index building and index tuple deletion,
per previous discussion on pghackers. Most of the duplicate code in
different AMs' ambuild routines has been moved out to a common routine
in index.c; this means that all index types now do the right things about
inserting recently-dead tuples, etc. (I also removed support for EXTEND
INDEX in the ambuild routines, since that's about to go away anyway, and
it cluttered the code a lot.) The retail indextuple deletion routines have
been replaced by a "bulk delete" routine in which the indexscan is inside
the access method. I haven't pushed this change as far as it should go yet,
but it should allow considerable simplification of the internal bookkeeping
for deletions. Also, add flag columns to pg_am to eliminate various
hardcoded tests on AM OIDs, and remove unused pg_am columns.
Fix rtree and gist index types to not attempt to store NULLs; before this,
gist usually crashed, while rtree managed not to crash but computed wacko
bounding boxes for NULL entries (which might have had something to do with
the performance problems we've heard about occasionally).
Add AtEOXact routines to hash, rtree, and gist, all of which have static
state that needs to be reset after an error. We discovered this need long
ago for btree, but missed the other guys.
Oh, one more thing: concurrent VACUUM is now the default.
2001-07-15 22:48:19 +00:00
|
|
|
rtbulkdelete(PG_FUNCTION_ARGS)
|
1996-07-09 06:22:35 +00:00
|
|
|
{
|
Restructure index AM interface for index building and index tuple deletion,
per previous discussion on pghackers. Most of the duplicate code in
different AMs' ambuild routines has been moved out to a common routine
in index.c; this means that all index types now do the right things about
inserting recently-dead tuples, etc. (I also removed support for EXTEND
INDEX in the ambuild routines, since that's about to go away anyway, and
it cluttered the code a lot.) The retail indextuple deletion routines have
been replaced by a "bulk delete" routine in which the indexscan is inside
the access method. I haven't pushed this change as far as it should go yet,
but it should allow considerable simplification of the internal bookkeeping
for deletions. Also, add flag columns to pg_am to eliminate various
hardcoded tests on AM OIDs, and remove unused pg_am columns.
Fix rtree and gist index types to not attempt to store NULLs; before this,
gist usually crashed, while rtree managed not to crash but computed wacko
bounding boxes for NULL entries (which might have had something to do with
the performance problems we've heard about occasionally).
Add AtEOXact routines to hash, rtree, and gist, all of which have static
state that needs to be reset after an error. We discovered this need long
ago for btree, but missed the other guys.
Oh, one more thing: concurrent VACUUM is now the default.
2001-07-15 22:48:19 +00:00
|
|
|
Relation rel = (Relation) PG_GETARG_POINTER(0);
|
|
|
|
IndexBulkDeleteCallback callback = (IndexBulkDeleteCallback) PG_GETARG_POINTER(1);
|
|
|
|
void *callback_state = (void *) PG_GETARG_POINTER(2);
|
|
|
|
IndexBulkDeleteResult *result;
|
2001-10-25 05:50:21 +00:00
|
|
|
BlockNumber num_pages;
|
Restructure index AM interface for index building and index tuple deletion,
per previous discussion on pghackers. Most of the duplicate code in
different AMs' ambuild routines has been moved out to a common routine
in index.c; this means that all index types now do the right things about
inserting recently-dead tuples, etc. (I also removed support for EXTEND
INDEX in the ambuild routines, since that's about to go away anyway, and
it cluttered the code a lot.) The retail indextuple deletion routines have
been replaced by a "bulk delete" routine in which the indexscan is inside
the access method. I haven't pushed this change as far as it should go yet,
but it should allow considerable simplification of the internal bookkeeping
for deletions. Also, add flag columns to pg_am to eliminate various
hardcoded tests on AM OIDs, and remove unused pg_am columns.
Fix rtree and gist index types to not attempt to store NULLs; before this,
gist usually crashed, while rtree managed not to crash but computed wacko
bounding boxes for NULL entries (which might have had something to do with
the performance problems we've heard about occasionally).
Add AtEOXact routines to hash, rtree, and gist, all of which have static
state that needs to be reset after an error. We discovered this need long
ago for btree, but missed the other guys.
Oh, one more thing: concurrent VACUUM is now the default.
2001-07-15 22:48:19 +00:00
|
|
|
double tuples_removed;
|
|
|
|
double num_index_tuples;
|
|
|
|
IndexScanDesc iscan;
|
|
|
|
|
|
|
|
tuples_removed = 0;
|
|
|
|
num_index_tuples = 0;
|
1997-09-07 05:04:48 +00:00
|
|
|
|
1998-12-15 12:47:01 +00:00
|
|
|
/*
|
Restructure index AM interface for index building and index tuple deletion,
per previous discussion on pghackers. Most of the duplicate code in
different AMs' ambuild routines has been moved out to a common routine
in index.c; this means that all index types now do the right things about
inserting recently-dead tuples, etc. (I also removed support for EXTEND
INDEX in the ambuild routines, since that's about to go away anyway, and
it cluttered the code a lot.) The retail indextuple deletion routines have
been replaced by a "bulk delete" routine in which the indexscan is inside
the access method. I haven't pushed this change as far as it should go yet,
but it should allow considerable simplification of the internal bookkeeping
for deletions. Also, add flag columns to pg_am to eliminate various
hardcoded tests on AM OIDs, and remove unused pg_am columns.
Fix rtree and gist index types to not attempt to store NULLs; before this,
gist usually crashed, while rtree managed not to crash but computed wacko
bounding boxes for NULL entries (which might have had something to do with
the performance problems we've heard about occasionally).
Add AtEOXact routines to hash, rtree, and gist, all of which have static
state that needs to be reset after an error. We discovered this need long
ago for btree, but missed the other guys.
Oh, one more thing: concurrent VACUUM is now the default.
2001-07-15 22:48:19 +00:00
|
|
|
* Since rtree is not marked "amconcurrent" in pg_am, caller should
|
2001-10-25 05:50:21 +00:00
|
|
|
* have acquired exclusive lock on index relation. We need no locking
|
Restructure index AM interface for index building and index tuple deletion,
per previous discussion on pghackers. Most of the duplicate code in
different AMs' ambuild routines has been moved out to a common routine
in index.c; this means that all index types now do the right things about
inserting recently-dead tuples, etc. (I also removed support for EXTEND
INDEX in the ambuild routines, since that's about to go away anyway, and
it cluttered the code a lot.) The retail indextuple deletion routines have
been replaced by a "bulk delete" routine in which the indexscan is inside
the access method. I haven't pushed this change as far as it should go yet,
but it should allow considerable simplification of the internal bookkeeping
for deletions. Also, add flag columns to pg_am to eliminate various
hardcoded tests on AM OIDs, and remove unused pg_am columns.
Fix rtree and gist index types to not attempt to store NULLs; before this,
gist usually crashed, while rtree managed not to crash but computed wacko
bounding boxes for NULL entries (which might have had something to do with
the performance problems we've heard about occasionally).
Add AtEOXact routines to hash, rtree, and gist, all of which have static
state that needs to be reset after an error. We discovered this need long
ago for btree, but missed the other guys.
Oh, one more thing: concurrent VACUUM is now the default.
2001-07-15 22:48:19 +00:00
|
|
|
* here.
|
1998-12-15 12:47:01 +00:00
|
|
|
*/
|
1997-09-07 05:04:48 +00:00
|
|
|
|
Restructure index AM interface for index building and index tuple deletion,
per previous discussion on pghackers. Most of the duplicate code in
different AMs' ambuild routines has been moved out to a common routine
in index.c; this means that all index types now do the right things about
inserting recently-dead tuples, etc. (I also removed support for EXTEND
INDEX in the ambuild routines, since that's about to go away anyway, and
it cluttered the code a lot.) The retail indextuple deletion routines have
been replaced by a "bulk delete" routine in which the indexscan is inside
the access method. I haven't pushed this change as far as it should go yet,
but it should allow considerable simplification of the internal bookkeeping
for deletions. Also, add flag columns to pg_am to eliminate various
hardcoded tests on AM OIDs, and remove unused pg_am columns.
Fix rtree and gist index types to not attempt to store NULLs; before this,
gist usually crashed, while rtree managed not to crash but computed wacko
bounding boxes for NULL entries (which might have had something to do with
the performance problems we've heard about occasionally).
Add AtEOXact routines to hash, rtree, and gist, all of which have static
state that needs to be reset after an error. We discovered this need long
ago for btree, but missed the other guys.
Oh, one more thing: concurrent VACUUM is now the default.
2001-07-15 22:48:19 +00:00
|
|
|
/*
|
|
|
|
* XXX generic implementation --- should be improved!
|
|
|
|
*/
|
1997-09-07 05:04:48 +00:00
|
|
|
|
Restructure index AM interface for index building and index tuple deletion,
per previous discussion on pghackers. Most of the duplicate code in
different AMs' ambuild routines has been moved out to a common routine
in index.c; this means that all index types now do the right things about
inserting recently-dead tuples, etc. (I also removed support for EXTEND
INDEX in the ambuild routines, since that's about to go away anyway, and
it cluttered the code a lot.) The retail indextuple deletion routines have
been replaced by a "bulk delete" routine in which the indexscan is inside
the access method. I haven't pushed this change as far as it should go yet,
but it should allow considerable simplification of the internal bookkeeping
for deletions. Also, add flag columns to pg_am to eliminate various
hardcoded tests on AM OIDs, and remove unused pg_am columns.
Fix rtree and gist index types to not attempt to store NULLs; before this,
gist usually crashed, while rtree managed not to crash but computed wacko
bounding boxes for NULL entries (which might have had something to do with
the performance problems we've heard about occasionally).
Add AtEOXact routines to hash, rtree, and gist, all of which have static
state that needs to be reset after an error. We discovered this need long
ago for btree, but missed the other guys.
Oh, one more thing: concurrent VACUUM is now the default.
2001-07-15 22:48:19 +00:00
|
|
|
/* walk through the entire index */
|
2002-05-20 23:51:44 +00:00
|
|
|
iscan = index_beginscan(NULL, rel, SnapshotAny, 0, (ScanKey) NULL);
|
2002-05-24 18:57:57 +00:00
|
|
|
/* including killed tuples */
|
|
|
|
iscan->ignore_killed_tuples = false;
|
1997-09-07 05:04:48 +00:00
|
|
|
|
2002-05-20 23:51:44 +00:00
|
|
|
while (index_getnext_indexitem(iscan, ForwardScanDirection))
|
Restructure index AM interface for index building and index tuple deletion,
per previous discussion on pghackers. Most of the duplicate code in
different AMs' ambuild routines has been moved out to a common routine
in index.c; this means that all index types now do the right things about
inserting recently-dead tuples, etc. (I also removed support for EXTEND
INDEX in the ambuild routines, since that's about to go away anyway, and
it cluttered the code a lot.) The retail indextuple deletion routines have
been replaced by a "bulk delete" routine in which the indexscan is inside
the access method. I haven't pushed this change as far as it should go yet,
but it should allow considerable simplification of the internal bookkeeping
for deletions. Also, add flag columns to pg_am to eliminate various
hardcoded tests on AM OIDs, and remove unused pg_am columns.
Fix rtree and gist index types to not attempt to store NULLs; before this,
gist usually crashed, while rtree managed not to crash but computed wacko
bounding boxes for NULL entries (which might have had something to do with
the performance problems we've heard about occasionally).
Add AtEOXact routines to hash, rtree, and gist, all of which have static
state that needs to be reset after an error. We discovered this need long
ago for btree, but missed the other guys.
Oh, one more thing: concurrent VACUUM is now the default.
2001-07-15 22:48:19 +00:00
|
|
|
{
|
2002-05-20 23:51:44 +00:00
|
|
|
if (callback(&iscan->xs_ctup.t_self, callback_state))
|
Restructure index AM interface for index building and index tuple deletion,
per previous discussion on pghackers. Most of the duplicate code in
different AMs' ambuild routines has been moved out to a common routine
in index.c; this means that all index types now do the right things about
inserting recently-dead tuples, etc. (I also removed support for EXTEND
INDEX in the ambuild routines, since that's about to go away anyway, and
it cluttered the code a lot.) The retail indextuple deletion routines have
been replaced by a "bulk delete" routine in which the indexscan is inside
the access method. I haven't pushed this change as far as it should go yet,
but it should allow considerable simplification of the internal bookkeeping
for deletions. Also, add flag columns to pg_am to eliminate various
hardcoded tests on AM OIDs, and remove unused pg_am columns.
Fix rtree and gist index types to not attempt to store NULLs; before this,
gist usually crashed, while rtree managed not to crash but computed wacko
bounding boxes for NULL entries (which might have had something to do with
the performance problems we've heard about occasionally).
Add AtEOXact routines to hash, rtree, and gist, all of which have static
state that needs to be reset after an error. We discovered this need long
ago for btree, but missed the other guys.
Oh, one more thing: concurrent VACUUM is now the default.
2001-07-15 22:48:19 +00:00
|
|
|
{
|
2002-05-20 23:51:44 +00:00
|
|
|
ItemPointerData indextup = iscan->currentItemData;
|
Restructure index AM interface for index building and index tuple deletion,
per previous discussion on pghackers. Most of the duplicate code in
different AMs' ambuild routines has been moved out to a common routine
in index.c; this means that all index types now do the right things about
inserting recently-dead tuples, etc. (I also removed support for EXTEND
INDEX in the ambuild routines, since that's about to go away anyway, and
it cluttered the code a lot.) The retail indextuple deletion routines have
been replaced by a "bulk delete" routine in which the indexscan is inside
the access method. I haven't pushed this change as far as it should go yet,
but it should allow considerable simplification of the internal bookkeeping
for deletions. Also, add flag columns to pg_am to eliminate various
hardcoded tests on AM OIDs, and remove unused pg_am columns.
Fix rtree and gist index types to not attempt to store NULLs; before this,
gist usually crashed, while rtree managed not to crash but computed wacko
bounding boxes for NULL entries (which might have had something to do with
the performance problems we've heard about occasionally).
Add AtEOXact routines to hash, rtree, and gist, all of which have static
state that needs to be reset after an error. We discovered this need long
ago for btree, but missed the other guys.
Oh, one more thing: concurrent VACUUM is now the default.
2001-07-15 22:48:19 +00:00
|
|
|
BlockNumber blkno;
|
|
|
|
OffsetNumber offnum;
|
|
|
|
Buffer buf;
|
|
|
|
Page page;
|
1997-09-07 05:04:48 +00:00
|
|
|
|
2002-05-20 23:51:44 +00:00
|
|
|
blkno = ItemPointerGetBlockNumber(&indextup);
|
|
|
|
offnum = ItemPointerGetOffsetNumber(&indextup);
|
1997-09-07 05:04:48 +00:00
|
|
|
|
Restructure index AM interface for index building and index tuple deletion,
per previous discussion on pghackers. Most of the duplicate code in
different AMs' ambuild routines has been moved out to a common routine
in index.c; this means that all index types now do the right things about
inserting recently-dead tuples, etc. (I also removed support for EXTEND
INDEX in the ambuild routines, since that's about to go away anyway, and
it cluttered the code a lot.) The retail indextuple deletion routines have
been replaced by a "bulk delete" routine in which the indexscan is inside
the access method. I haven't pushed this change as far as it should go yet,
but it should allow considerable simplification of the internal bookkeeping
for deletions. Also, add flag columns to pg_am to eliminate various
hardcoded tests on AM OIDs, and remove unused pg_am columns.
Fix rtree and gist index types to not attempt to store NULLs; before this,
gist usually crashed, while rtree managed not to crash but computed wacko
bounding boxes for NULL entries (which might have had something to do with
the performance problems we've heard about occasionally).
Add AtEOXact routines to hash, rtree, and gist, all of which have static
state that needs to be reset after an error. We discovered this need long
ago for btree, but missed the other guys.
Oh, one more thing: concurrent VACUUM is now the default.
2001-07-15 22:48:19 +00:00
|
|
|
/* adjust any scans that will be affected by this deletion */
|
|
|
|
/* (namely, my own scan) */
|
|
|
|
rtadjscans(rel, RTOP_DEL, blkno, offnum);
|
|
|
|
|
|
|
|
/* delete the index tuple */
|
|
|
|
buf = ReadBuffer(rel, blkno);
|
|
|
|
page = BufferGetPage(buf);
|
|
|
|
|
|
|
|
PageIndexTupleDelete(page, offnum);
|
|
|
|
|
|
|
|
WriteBuffer(buf);
|
|
|
|
|
|
|
|
tuples_removed += 1;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
num_index_tuples += 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
index_endscan(iscan);
|
|
|
|
|
|
|
|
/* return statistics */
|
|
|
|
num_pages = RelationGetNumberOfBlocks(rel);
|
|
|
|
|
2003-02-24 00:57:17 +00:00
|
|
|
result = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));
|
Restructure index AM interface for index building and index tuple deletion,
per previous discussion on pghackers. Most of the duplicate code in
different AMs' ambuild routines has been moved out to a common routine
in index.c; this means that all index types now do the right things about
inserting recently-dead tuples, etc. (I also removed support for EXTEND
INDEX in the ambuild routines, since that's about to go away anyway, and
it cluttered the code a lot.) The retail indextuple deletion routines have
been replaced by a "bulk delete" routine in which the indexscan is inside
the access method. I haven't pushed this change as far as it should go yet,
but it should allow considerable simplification of the internal bookkeeping
for deletions. Also, add flag columns to pg_am to eliminate various
hardcoded tests on AM OIDs, and remove unused pg_am columns.
Fix rtree and gist index types to not attempt to store NULLs; before this,
gist usually crashed, while rtree managed not to crash but computed wacko
bounding boxes for NULL entries (which might have had something to do with
the performance problems we've heard about occasionally).
Add AtEOXact routines to hash, rtree, and gist, all of which have static
state that needs to be reset after an error. We discovered this need long
ago for btree, but missed the other guys.
Oh, one more thing: concurrent VACUUM is now the default.
2001-07-15 22:48:19 +00:00
|
|
|
result->num_pages = num_pages;
|
|
|
|
result->num_index_tuples = num_index_tuples;
|
2003-02-22 00:45:05 +00:00
|
|
|
result->tuples_removed = tuples_removed;
|
Restructure index AM interface for index building and index tuple deletion,
per previous discussion on pghackers. Most of the duplicate code in
different AMs' ambuild routines has been moved out to a common routine
in index.c; this means that all index types now do the right things about
inserting recently-dead tuples, etc. (I also removed support for EXTEND
INDEX in the ambuild routines, since that's about to go away anyway, and
it cluttered the code a lot.) The retail indextuple deletion routines have
been replaced by a "bulk delete" routine in which the indexscan is inside
the access method. I haven't pushed this change as far as it should go yet,
but it should allow considerable simplification of the internal bookkeeping
for deletions. Also, add flag columns to pg_am to eliminate various
hardcoded tests on AM OIDs, and remove unused pg_am columns.
Fix rtree and gist index types to not attempt to store NULLs; before this,
gist usually crashed, while rtree managed not to crash but computed wacko
bounding boxes for NULL entries (which might have had something to do with
the performance problems we've heard about occasionally).
Add AtEOXact routines to hash, rtree, and gist, all of which have static
state that needs to be reset after an error. We discovered this need long
ago for btree, but missed the other guys.
Oh, one more thing: concurrent VACUUM is now the default.
2001-07-15 22:48:19 +00:00
|
|
|
|
|
|
|
PG_RETURN_POINTER(result);
|
1996-07-09 06:22:35 +00:00
|
|
|
}
|
|
|
|
|
Restructure index AM interface for index building and index tuple deletion,
per previous discussion on pghackers. Most of the duplicate code in
different AMs' ambuild routines has been moved out to a common routine
in index.c; this means that all index types now do the right things about
inserting recently-dead tuples, etc. (I also removed support for EXTEND
INDEX in the ambuild routines, since that's about to go away anyway, and
it cluttered the code a lot.) The retail indextuple deletion routines have
been replaced by a "bulk delete" routine in which the indexscan is inside
the access method. I haven't pushed this change as far as it should go yet,
but it should allow considerable simplification of the internal bookkeeping
for deletions. Also, add flag columns to pg_am to eliminate various
hardcoded tests on AM OIDs, and remove unused pg_am columns.
Fix rtree and gist index types to not attempt to store NULLs; before this,
gist usually crashed, while rtree managed not to crash but computed wacko
bounding boxes for NULL entries (which might have had something to do with
the performance problems we've heard about occasionally).
Add AtEOXact routines to hash, rtree, and gist, all of which have static
state that needs to be reset after an error. We discovered this need long
ago for btree, but missed the other guys.
Oh, one more thing: concurrent VACUUM is now the default.
2001-07-15 22:48:19 +00:00
|
|
|
|
1997-09-07 05:04:48 +00:00
|
|
|
static void
|
1997-09-08 21:56:23 +00:00
|
|
|
initRtstate(RTSTATE *rtstate, Relation index)
|
1996-07-09 06:22:35 +00:00
|
|
|
{
|
2001-10-06 23:21:45 +00:00
|
|
|
fmgr_info_copy(&rtstate->unionFn,
|
|
|
|
index_getprocinfo(index, 1, RT_UNION_PROC),
|
|
|
|
CurrentMemoryContext);
|
|
|
|
fmgr_info_copy(&rtstate->sizeFn,
|
|
|
|
index_getprocinfo(index, 1, RT_SIZE_PROC),
|
|
|
|
CurrentMemoryContext);
|
|
|
|
fmgr_info_copy(&rtstate->interFn,
|
|
|
|
index_getprocinfo(index, 1, RT_INTER_PROC),
|
|
|
|
CurrentMemoryContext);
|
1996-07-09 06:22:35 +00:00
|
|
|
}
|
|
|
|
|
2001-09-29 03:46:12 +00:00
|
|
|
/* for sorting SPLITCOST records in descending order */
|
|
|
|
static int
|
|
|
|
qsort_comp_splitcost(const void *a, const void *b)
|
|
|
|
{
|
2001-10-25 05:50:21 +00:00
|
|
|
float diff =
|
|
|
|
((SPLITCOST *) a)->cost_differential -
|
|
|
|
((SPLITCOST *) b)->cost_differential;
|
|
|
|
|
2001-09-29 03:46:12 +00:00
|
|
|
if (diff < 0)
|
|
|
|
return 1;
|
|
|
|
else if (diff > 0)
|
|
|
|
return -1;
|
|
|
|
else
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
1996-07-09 06:22:35 +00:00
|
|
|
#ifdef RTDEBUG
|
|
|
|
|
|
|
|
void
|
|
|
|
_rtdump(Relation r)
|
|
|
|
{
|
1997-09-08 02:41:22 +00:00
|
|
|
Buffer buf;
|
|
|
|
Page page;
|
|
|
|
OffsetNumber offnum,
|
|
|
|
maxoff;
|
|
|
|
BlockNumber blkno;
|
|
|
|
BlockNumber nblocks;
|
1997-09-07 05:04:48 +00:00
|
|
|
RTreePageOpaque po;
|
1997-09-08 02:41:22 +00:00
|
|
|
IndexTuple itup;
|
|
|
|
BlockNumber itblkno;
|
|
|
|
OffsetNumber itoffno;
|
2001-03-07 21:20:26 +00:00
|
|
|
Datum datum;
|
1997-09-08 02:41:22 +00:00
|
|
|
char *itkey;
|
1997-09-07 05:04:48 +00:00
|
|
|
|
|
|
|
nblocks = RelationGetNumberOfBlocks(r);
|
|
|
|
for (blkno = 0; blkno < nblocks; blkno++)
|
|
|
|
{
|
|
|
|
buf = ReadBuffer(r, blkno);
|
|
|
|
page = BufferGetPage(buf);
|
|
|
|
po = (RTreePageOpaque) PageGetSpecialPointer(page);
|
|
|
|
maxoff = PageGetMaxOffsetNumber(page);
|
|
|
|
printf("Page %d maxoff %d <%s>\n", blkno, maxoff,
|
|
|
|
(po->flags & F_LEAF ? "LEAF" : "INTERNAL"));
|
|
|
|
|
|
|
|
if (PageIsEmpty(page))
|
|
|
|
{
|
|
|
|
ReleaseBuffer(buf);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (offnum = FirstOffsetNumber;
|
|
|
|
offnum <= maxoff;
|
|
|
|
offnum = OffsetNumberNext(offnum))
|
|
|
|
{
|
|
|
|
itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, offnum));
|
|
|
|
itblkno = ItemPointerGetBlockNumber(&(itup->t_tid));
|
|
|
|
itoffno = ItemPointerGetOffsetNumber(&(itup->t_tid));
|
2001-03-07 21:20:26 +00:00
|
|
|
datum = IndexTupleGetDatum(itup);
|
2000-07-30 20:44:02 +00:00
|
|
|
itkey = DatumGetCString(DirectFunctionCall1(box_out,
|
2001-03-07 21:20:26 +00:00
|
|
|
datum));
|
1997-09-07 05:04:48 +00:00
|
|
|
printf("\t[%d] size %d heap <%d,%d> key:%s\n",
|
|
|
|
offnum, IndexTupleSize(itup), itblkno, itoffno, itkey);
|
|
|
|
pfree(itkey);
|
|
|
|
}
|
|
|
|
|
|
|
|
ReleaseBuffer(buf);
|
1996-07-09 06:22:35 +00:00
|
|
|
}
|
|
|
|
}
|
2001-11-05 17:46:40 +00:00
|
|
|
#endif /* defined RTDEBUG */
|
2000-10-21 15:43:36 +00:00
|
|
|
|
|
|
|
void
|
|
|
|
rtree_redo(XLogRecPtr lsn, XLogRecord *record)
|
|
|
|
{
|
Commit to match discussed elog() changes. Only update is that LOG is
now just below FATAL in server_min_messages. Added more text to
highlight ordering difference between it and client_min_messages.
---------------------------------------------------------------------------
REALLYFATAL => PANIC
STOP => PANIC
New INFO level the prints to client by default
New LOG level the prints to server log by default
Cause VACUUM information to print only to the client
NOTICE => INFO where purely information messages are sent
DEBUG => LOG for purely server status messages
DEBUG removed, kept as backward compatible
DEBUG5, DEBUG4, DEBUG3, DEBUG2, DEBUG1 added
DebugLvl removed in favor of new DEBUG[1-5] symbols
New server_min_messages GUC parameter with values:
DEBUG[5-1], INFO, NOTICE, ERROR, LOG, FATAL, PANIC
New client_min_messages GUC parameter with values:
DEBUG[5-1], LOG, INFO, NOTICE, ERROR, FATAL, PANIC
Server startup now logged with LOG instead of DEBUG
Remove debug_level GUC parameter
elog() numbers now start at 10
Add test to print error message if older elog() values are passed to elog()
Bootstrap mode now has a -d that requires an argument, like postmaster
2002-03-02 21:39:36 +00:00
|
|
|
elog(PANIC, "rtree_redo: unimplemented");
|
2000-10-21 15:43:36 +00:00
|
|
|
}
|
2001-03-22 04:01:46 +00:00
|
|
|
|
2000-10-21 15:43:36 +00:00
|
|
|
void
|
|
|
|
rtree_undo(XLogRecPtr lsn, XLogRecord *record)
|
|
|
|
{
|
Commit to match discussed elog() changes. Only update is that LOG is
now just below FATAL in server_min_messages. Added more text to
highlight ordering difference between it and client_min_messages.
---------------------------------------------------------------------------
REALLYFATAL => PANIC
STOP => PANIC
New INFO level the prints to client by default
New LOG level the prints to server log by default
Cause VACUUM information to print only to the client
NOTICE => INFO where purely information messages are sent
DEBUG => LOG for purely server status messages
DEBUG removed, kept as backward compatible
DEBUG5, DEBUG4, DEBUG3, DEBUG2, DEBUG1 added
DebugLvl removed in favor of new DEBUG[1-5] symbols
New server_min_messages GUC parameter with values:
DEBUG[5-1], INFO, NOTICE, ERROR, LOG, FATAL, PANIC
New client_min_messages GUC parameter with values:
DEBUG[5-1], LOG, INFO, NOTICE, ERROR, FATAL, PANIC
Server startup now logged with LOG instead of DEBUG
Remove debug_level GUC parameter
elog() numbers now start at 10
Add test to print error message if older elog() values are passed to elog()
Bootstrap mode now has a -d that requires an argument, like postmaster
2002-03-02 21:39:36 +00:00
|
|
|
elog(PANIC, "rtree_undo: unimplemented");
|
2000-10-21 15:43:36 +00:00
|
|
|
}
|
2001-03-22 04:01:46 +00:00
|
|
|
|
2000-10-21 15:43:36 +00:00
|
|
|
void
|
2001-03-22 04:01:46 +00:00
|
|
|
rtree_desc(char *buf, uint8 xl_info, char *rec)
|
2000-10-21 15:43:36 +00:00
|
|
|
{
|
|
|
|
}
|