1996-07-09 08:22:35 +02:00
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
*
|
1999-02-14 00:22:53 +01:00
|
|
|
* hashpage.c
|
1997-09-07 07:04:48 +02:00
|
|
|
* Hash table page management code for the Postgres hash access method
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
2019-01-02 18:44:25 +01:00
|
|
|
* Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
|
2000-01-26 06:58:53 +01:00
|
|
|
* Portions Copyright (c) 1994, Regents of the University of California
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
|
|
|
*
|
|
|
|
* IDENTIFICATION
|
2010-09-20 22:08:53 +02:00
|
|
|
* src/backend/access/hash/hashpage.c
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
|
|
|
* NOTES
|
1997-09-07 07:04:48 +02:00
|
|
|
* Postgres hash pages look like ordinary relation pages. The opaque
|
|
|
|
* data at high addresses includes information about the page including
|
2003-09-01 22:26:34 +02:00
|
|
|
* whether a page is an overflow page or a true bucket, the bucket
|
|
|
|
* number, and the block numbers of the preceding and following pages
|
|
|
|
* in the same bucket.
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
1997-09-07 07:04:48 +02:00
|
|
|
* The first page in a hash relation, page zero, is special -- it stores
|
Restructure index AM interface for index building and index tuple deletion,
per previous discussion on pghackers. Most of the duplicate code in
different AMs' ambuild routines has been moved out to a common routine
in index.c; this means that all index types now do the right things about
inserting recently-dead tuples, etc. (I also removed support for EXTEND
INDEX in the ambuild routines, since that's about to go away anyway, and
it cluttered the code a lot.) The retail indextuple deletion routines have
been replaced by a "bulk delete" routine in which the indexscan is inside
the access method. I haven't pushed this change as far as it should go yet,
but it should allow considerable simplification of the internal bookkeeping
for deletions. Also, add flag columns to pg_am to eliminate various
hardcoded tests on AM OIDs, and remove unused pg_am columns.
Fix rtree and gist index types to not attempt to store NULLs; before this,
gist usually crashed, while rtree managed not to crash but computed wacko
bounding boxes for NULL entries (which might have had something to do with
the performance problems we've heard about occasionally).
Add AtEOXact routines to hash, rtree, and gist, all of which have static
state that needs to be reset after an error. We discovered this need long
ago for btree, but missed the other guys.
Oh, one more thing: concurrent VACUUM is now the default.
2001-07-16 00:48:19 +02:00
|
|
|
* information describing the hash table; it is referred to as the
|
1997-09-07 07:04:48 +02:00
|
|
|
* "meta page." Pages one and higher store the actual data.
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
2003-09-01 22:26:34 +02:00
|
|
|
* There are also bitmap pages, which are not manipulated here;
|
|
|
|
* see hashovfl.c.
|
|
|
|
*
|
1996-07-09 08:22:35 +02:00
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
*/
|
1999-07-16 01:04:24 +02:00
|
|
|
#include "postgres.h"
|
1997-09-07 07:04:48 +02:00
|
|
|
|
1999-07-16 01:04:24 +02:00
|
|
|
#include "access/hash.h"
|
2017-03-14 18:27:02 +01:00
|
|
|
#include "access/hash_xlog.h"
|
2005-06-09 23:01:25 +02:00
|
|
|
#include "miscadmin.h"
|
2000-11-30 02:39:08 +01:00
|
|
|
#include "storage/lmgr.h"
|
2018-04-07 15:59:14 +02:00
|
|
|
#include "storage/predicate.h"
|
2019-11-12 04:00:16 +01:00
|
|
|
#include "storage/smgr.h"
|
2003-09-05 00:06:27 +02:00
|
|
|
|
2007-04-19 22:24:04 +02:00
|
|
|
static bool _hash_alloc_buckets(Relation rel, BlockNumber firstblock,
|
2019-05-22 19:04:48 +02:00
|
|
|
uint32 nblocks);
|
2003-09-05 00:06:27 +02:00
|
|
|
static void _hash_splitbucket(Relation rel, Buffer metabuf,
|
2019-05-22 19:04:48 +02:00
|
|
|
Bucket obucket, Bucket nbucket,
|
|
|
|
Buffer obuf,
|
|
|
|
Buffer nbuf,
|
|
|
|
HTAB *htab,
|
|
|
|
uint32 maxbucket,
|
|
|
|
uint32 highmask, uint32 lowmask);
|
2017-03-14 18:27:02 +01:00
|
|
|
static void log_split_page(Relation rel, Buffer buf);
|
2003-09-05 00:06:27 +02:00
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* _hash_getbuf() -- Get a buffer by block number for read or write.
|
|
|
|
*
|
|
|
|
* 'access' must be HASH_READ, HASH_WRITE, or HASH_NOLOCK.
|
2007-05-03 18:45:58 +02:00
|
|
|
* 'flags' is a bitwise OR of the allowed page types.
|
|
|
|
*
|
|
|
|
* This must be used only to fetch pages that are expected to be valid
|
|
|
|
* already. _hash_checkpage() is applied using the given flags.
|
2003-09-05 00:06:27 +02:00
|
|
|
*
|
|
|
|
* When this routine returns, the appropriate lock is set on the
|
|
|
|
* requested buffer and its reference count has been incremented
|
|
|
|
* (ie, the buffer is "locked and pinned").
|
|
|
|
*
|
2007-05-03 18:45:58 +02:00
|
|
|
* P_NEW is disallowed because this routine can only be used
|
2007-04-19 22:24:04 +02:00
|
|
|
* to access pages that are known to be before the filesystem EOF.
|
|
|
|
* Extending the index should be done with _hash_getnewbuf.
|
2003-09-05 00:06:27 +02:00
|
|
|
*/
|
|
|
|
Buffer
|
2007-05-03 18:45:58 +02:00
|
|
|
_hash_getbuf(Relation rel, BlockNumber blkno, int access, int flags)
|
2003-09-05 00:06:27 +02:00
|
|
|
{
|
|
|
|
Buffer buf;
|
|
|
|
|
2007-04-19 22:24:04 +02:00
|
|
|
if (blkno == P_NEW)
|
|
|
|
elog(ERROR, "hash AM does not use P_NEW");
|
|
|
|
|
2003-09-05 00:06:27 +02:00
|
|
|
buf = ReadBuffer(rel, blkno);
|
|
|
|
|
|
|
|
if (access != HASH_NOLOCK)
|
|
|
|
LockBuffer(buf, access);
|
|
|
|
|
|
|
|
/* ref count and lock type are correct */
|
2007-05-03 18:45:58 +02:00
|
|
|
|
|
|
|
_hash_checkpage(rel, buf, flags);
|
|
|
|
|
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
/*
|
|
|
|
* _hash_getbuf_with_condlock_cleanup() -- Try to get a buffer for cleanup.
|
|
|
|
*
|
|
|
|
* We read the page and try to acquire a cleanup lock. If we get it,
|
|
|
|
* we return the buffer; otherwise, we return InvalidBuffer.
|
|
|
|
*/
|
|
|
|
Buffer
|
|
|
|
_hash_getbuf_with_condlock_cleanup(Relation rel, BlockNumber blkno, int flags)
|
|
|
|
{
|
|
|
|
Buffer buf;
|
|
|
|
|
|
|
|
if (blkno == P_NEW)
|
|
|
|
elog(ERROR, "hash AM does not use P_NEW");
|
|
|
|
|
|
|
|
buf = ReadBuffer(rel, blkno);
|
|
|
|
|
|
|
|
if (!ConditionalLockBufferForCleanup(buf))
|
|
|
|
{
|
|
|
|
ReleaseBuffer(buf);
|
|
|
|
return InvalidBuffer;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ref count and lock type are correct */
|
|
|
|
|
|
|
|
_hash_checkpage(rel, buf, flags);
|
|
|
|
|
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
2007-05-03 18:45:58 +02:00
|
|
|
/*
|
|
|
|
* _hash_getinitbuf() -- Get and initialize a buffer by block number.
|
|
|
|
*
|
|
|
|
* This must be used only to fetch pages that are known to be before
|
|
|
|
* the index's filesystem EOF, but are to be filled from scratch.
|
2014-05-06 18:12:18 +02:00
|
|
|
* _hash_pageinit() is applied automatically. Otherwise it has
|
2007-05-03 18:45:58 +02:00
|
|
|
* effects similar to _hash_getbuf() with access = HASH_WRITE.
|
|
|
|
*
|
|
|
|
* When this routine returns, a write lock is set on the
|
|
|
|
* requested buffer and its reference count has been incremented
|
|
|
|
* (ie, the buffer is "locked and pinned").
|
|
|
|
*
|
|
|
|
* P_NEW is disallowed because this routine can only be used
|
|
|
|
* to access pages that are known to be before the filesystem EOF.
|
|
|
|
* Extending the index should be done with _hash_getnewbuf.
|
|
|
|
*/
|
|
|
|
Buffer
|
|
|
|
_hash_getinitbuf(Relation rel, BlockNumber blkno)
|
|
|
|
{
|
|
|
|
Buffer buf;
|
|
|
|
|
|
|
|
if (blkno == P_NEW)
|
|
|
|
elog(ERROR, "hash AM does not use P_NEW");
|
|
|
|
|
Fix race condition between hot standby and restoring a full-page image.
There was a window in RestoreBackupBlock where a page would be zeroed out,
but not yet locked. If a backend pinned and locked the page in that window,
it saw the zeroed page instead of the old page or new page contents, which
could lead to missing rows in a result set, or errors.
To fix, replace RBM_ZERO with RBM_ZERO_AND_LOCK, which atomically pins,
zeroes, and locks the page, if it's not in the buffer cache already.
In stable branches, the old RBM_ZERO constant is renamed to RBM_DO_NOT_USE,
to avoid breaking any 3rd party extensions that might use RBM_ZERO. More
importantly, this avoids renumbering the other enum values, which would
cause even bigger confusion in extensions that use ReadBufferExtended, but
haven't been recompiled.
Backpatch to all supported versions; this has been racy since hot standby
was introduced.
2014-11-13 18:47:44 +01:00
|
|
|
buf = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_ZERO_AND_LOCK,
|
|
|
|
NULL);
|
2007-05-03 18:45:58 +02:00
|
|
|
|
|
|
|
/* ref count and lock type are correct */
|
|
|
|
|
|
|
|
/* initialize the page */
|
2016-04-20 15:31:19 +02:00
|
|
|
_hash_pageinit(BufferGetPage(buf), BufferGetPageSize(buf));
|
2007-05-03 18:45:58 +02:00
|
|
|
|
2003-09-05 00:06:27 +02:00
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
2017-03-07 23:03:51 +01:00
|
|
|
/*
|
|
|
|
* _hash_initbuf() -- Get and initialize a buffer by bucket number.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
_hash_initbuf(Buffer buf, uint32 max_bucket, uint32 num_bucket, uint32 flag,
|
|
|
|
bool initpage)
|
|
|
|
{
|
|
|
|
HashPageOpaque pageopaque;
|
|
|
|
Page page;
|
|
|
|
|
|
|
|
page = BufferGetPage(buf);
|
|
|
|
|
|
|
|
/* initialize the page */
|
|
|
|
if (initpage)
|
|
|
|
_hash_pageinit(page, BufferGetPageSize(buf));
|
|
|
|
|
|
|
|
pageopaque = (HashPageOpaque) PageGetSpecialPointer(page);
|
|
|
|
|
|
|
|
/*
|
2017-05-17 22:31:56 +02:00
|
|
|
* Set hasho_prevblkno with current hashm_maxbucket. This value will be
|
|
|
|
* used to validate cached HashMetaPageData. See
|
2017-03-07 23:03:51 +01:00
|
|
|
* _hash_getbucketbuf_from_hashkey().
|
|
|
|
*/
|
|
|
|
pageopaque->hasho_prevblkno = max_bucket;
|
|
|
|
pageopaque->hasho_nextblkno = InvalidBlockNumber;
|
|
|
|
pageopaque->hasho_bucket = num_bucket;
|
|
|
|
pageopaque->hasho_flag = flag;
|
|
|
|
pageopaque->hasho_page_id = HASHO_PAGE_ID;
|
|
|
|
}
|
|
|
|
|
2007-04-19 22:24:04 +02:00
|
|
|
/*
|
|
|
|
* _hash_getnewbuf() -- Get a new page at the end of the index.
|
|
|
|
*
|
2007-05-03 18:45:58 +02:00
|
|
|
* This has the same API as _hash_getinitbuf, except that we are adding
|
2007-04-19 22:24:04 +02:00
|
|
|
* a page to the index, and hence expect the page to be past the
|
|
|
|
* logical EOF. (However, we have to support the case where it isn't,
|
|
|
|
* since a prior try might have crashed after extending the filesystem
|
|
|
|
* EOF but before updating the metapage to reflect the added page.)
|
|
|
|
*
|
|
|
|
* It is caller's responsibility to ensure that only one process can
|
2015-03-30 22:40:05 +02:00
|
|
|
* extend the index at a time. In practice, this function is called
|
|
|
|
* only while holding write lock on the metapage, because adding a page
|
|
|
|
* is always associated with an update of metapage data.
|
2007-04-19 22:24:04 +02:00
|
|
|
*/
|
|
|
|
Buffer
|
2010-12-29 12:48:53 +01:00
|
|
|
_hash_getnewbuf(Relation rel, BlockNumber blkno, ForkNumber forkNum)
|
2007-04-19 22:24:04 +02:00
|
|
|
{
|
2010-12-29 12:48:53 +01:00
|
|
|
BlockNumber nblocks = RelationGetNumberOfBlocksInFork(rel, forkNum);
|
2007-04-19 22:24:04 +02:00
|
|
|
Buffer buf;
|
|
|
|
|
|
|
|
if (blkno == P_NEW)
|
|
|
|
elog(ERROR, "hash AM does not use P_NEW");
|
|
|
|
if (blkno > nblocks)
|
|
|
|
elog(ERROR, "access to noncontiguous page in hash index \"%s\"",
|
|
|
|
RelationGetRelationName(rel));
|
|
|
|
|
|
|
|
/* smgr insists we use P_NEW to extend the relation */
|
|
|
|
if (blkno == nblocks)
|
|
|
|
{
|
2010-12-29 12:48:53 +01:00
|
|
|
buf = ReadBufferExtended(rel, forkNum, P_NEW, RBM_NORMAL, NULL);
|
2007-04-19 22:24:04 +02:00
|
|
|
if (BufferGetBlockNumber(buf) != blkno)
|
|
|
|
elog(ERROR, "unexpected hash relation size: %u, should be %u",
|
|
|
|
BufferGetBlockNumber(buf), blkno);
|
Fix race condition between hot standby and restoring a full-page image.
There was a window in RestoreBackupBlock where a page would be zeroed out,
but not yet locked. If a backend pinned and locked the page in that window,
it saw the zeroed page instead of the old page or new page contents, which
could lead to missing rows in a result set, or errors.
To fix, replace RBM_ZERO with RBM_ZERO_AND_LOCK, which atomically pins,
zeroes, and locks the page, if it's not in the buffer cache already.
In stable branches, the old RBM_ZERO constant is renamed to RBM_DO_NOT_USE,
to avoid breaking any 3rd party extensions that might use RBM_ZERO. More
importantly, this avoids renumbering the other enum values, which would
cause even bigger confusion in extensions that use ReadBufferExtended, but
haven't been recompiled.
Backpatch to all supported versions; this has been racy since hot standby
was introduced.
2014-11-13 18:47:44 +01:00
|
|
|
LockBuffer(buf, HASH_WRITE);
|
2007-04-19 22:24:04 +02:00
|
|
|
}
|
|
|
|
else
|
Fix race condition between hot standby and restoring a full-page image.
There was a window in RestoreBackupBlock where a page would be zeroed out,
but not yet locked. If a backend pinned and locked the page in that window,
it saw the zeroed page instead of the old page or new page contents, which
could lead to missing rows in a result set, or errors.
To fix, replace RBM_ZERO with RBM_ZERO_AND_LOCK, which atomically pins,
zeroes, and locks the page, if it's not in the buffer cache already.
In stable branches, the old RBM_ZERO constant is renamed to RBM_DO_NOT_USE,
to avoid breaking any 3rd party extensions that might use RBM_ZERO. More
importantly, this avoids renumbering the other enum values, which would
cause even bigger confusion in extensions that use ReadBufferExtended, but
haven't been recompiled.
Backpatch to all supported versions; this has been racy since hot standby
was introduced.
2014-11-13 18:47:44 +01:00
|
|
|
{
|
|
|
|
buf = ReadBufferExtended(rel, forkNum, blkno, RBM_ZERO_AND_LOCK,
|
|
|
|
NULL);
|
|
|
|
}
|
2007-04-19 22:24:04 +02:00
|
|
|
|
|
|
|
/* ref count and lock type are correct */
|
2007-05-03 18:45:58 +02:00
|
|
|
|
|
|
|
/* initialize the page */
|
2016-04-20 15:31:19 +02:00
|
|
|
_hash_pageinit(BufferGetPage(buf), BufferGetPageSize(buf));
|
2007-05-03 18:45:58 +02:00
|
|
|
|
2007-04-19 22:24:04 +02:00
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
2007-05-30 22:12:03 +02:00
|
|
|
/*
|
|
|
|
* _hash_getbuf_with_strategy() -- Get a buffer with nondefault strategy.
|
|
|
|
*
|
|
|
|
* This is identical to _hash_getbuf() but also allows a buffer access
|
|
|
|
* strategy to be specified. We use this for VACUUM operations.
|
|
|
|
*/
|
|
|
|
Buffer
|
|
|
|
_hash_getbuf_with_strategy(Relation rel, BlockNumber blkno,
|
|
|
|
int access, int flags,
|
|
|
|
BufferAccessStrategy bstrategy)
|
|
|
|
{
|
|
|
|
Buffer buf;
|
|
|
|
|
|
|
|
if (blkno == P_NEW)
|
|
|
|
elog(ERROR, "hash AM does not use P_NEW");
|
|
|
|
|
Unite ReadBufferWithFork, ReadBufferWithStrategy, and ZeroOrReadBuffer
functions into one ReadBufferExtended function, that takes the strategy
and mode as argument. There's three modes, RBM_NORMAL which is the default
used by plain ReadBuffer(), RBM_ZERO, which replaces ZeroOrReadBuffer, and
a new mode RBM_ZERO_ON_ERROR, which allows callers to read corrupt pages
without throwing an error. The FSM needs the new mode to recover from
corrupt pages, which could happend if we crash after extending an FSM file,
and the new page is "torn".
Add fork number to some error messages in bufmgr.c, that still lacked it.
2008-10-31 16:05:00 +01:00
|
|
|
buf = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_NORMAL, bstrategy);
|
2007-05-30 22:12:03 +02:00
|
|
|
|
|
|
|
if (access != HASH_NOLOCK)
|
|
|
|
LockBuffer(buf, access);
|
|
|
|
|
|
|
|
/* ref count and lock type are correct */
|
|
|
|
|
|
|
|
_hash_checkpage(rel, buf, flags);
|
|
|
|
|
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
2003-09-05 00:06:27 +02:00
|
|
|
/*
|
|
|
|
* _hash_relbuf() -- release a locked buffer.
|
|
|
|
*
|
2006-04-01 01:32:07 +02:00
|
|
|
* Lock and pin (refcount) are both dropped.
|
2003-09-05 00:06:27 +02:00
|
|
|
*/
|
|
|
|
void
|
|
|
|
_hash_relbuf(Relation rel, Buffer buf)
|
|
|
|
{
|
2006-04-01 01:32:07 +02:00
|
|
|
UnlockReleaseBuffer(buf);
|
2003-09-05 00:06:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* _hash_dropbuf() -- release an unlocked buffer.
|
|
|
|
*
|
2006-04-01 01:32:07 +02:00
|
|
|
* This is used to unpin a buffer on which we hold no lock.
|
2003-09-05 00:06:27 +02:00
|
|
|
*/
|
|
|
|
void
|
|
|
|
_hash_dropbuf(Relation rel, Buffer buf)
|
|
|
|
{
|
|
|
|
ReleaseBuffer(buf);
|
|
|
|
}
|
|
|
|
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
/*
|
|
|
|
* _hash_dropscanbuf() -- release buffers used in scan.
|
|
|
|
*
|
|
|
|
* This routine unpins the buffers used during scan on which we
|
|
|
|
* hold no lock.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
_hash_dropscanbuf(Relation rel, HashScanOpaque so)
|
|
|
|
{
|
|
|
|
/* release pin we hold on primary bucket page */
|
|
|
|
if (BufferIsValid(so->hashso_bucket_buf) &&
|
2017-09-22 19:26:25 +02:00
|
|
|
so->hashso_bucket_buf != so->currPos.buf)
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
_hash_dropbuf(rel, so->hashso_bucket_buf);
|
|
|
|
so->hashso_bucket_buf = InvalidBuffer;
|
|
|
|
|
|
|
|
/* release pin we hold on primary bucket page of bucket being split */
|
|
|
|
if (BufferIsValid(so->hashso_split_bucket_buf) &&
|
2017-09-22 19:26:25 +02:00
|
|
|
so->hashso_split_bucket_buf != so->currPos.buf)
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
_hash_dropbuf(rel, so->hashso_split_bucket_buf);
|
|
|
|
so->hashso_split_bucket_buf = InvalidBuffer;
|
|
|
|
|
|
|
|
/* release any pin we still hold */
|
2017-09-22 19:26:25 +02:00
|
|
|
if (BufferIsValid(so->currPos.buf))
|
|
|
|
_hash_dropbuf(rel, so->currPos.buf);
|
|
|
|
so->currPos.buf = InvalidBuffer;
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
|
|
|
|
/* reset split scan */
|
|
|
|
so->hashso_buc_populated = false;
|
|
|
|
so->hashso_buc_split = false;
|
|
|
|
}
|
|
|
|
|
1996-07-09 08:22:35 +02:00
|
|
|
|
|
|
|
/*
|
2017-03-07 23:03:51 +01:00
|
|
|
* _hash_init() -- Initialize the metadata page of a hash index,
|
2008-03-15 21:46:31 +01:00
|
|
|
* the initial buckets, and the initial bitmap page.
|
|
|
|
*
|
|
|
|
* The initial number of buckets is dependent on num_tuples, an estimate
|
2008-03-17 00:15:08 +01:00
|
|
|
* of the number of tuples to be loaded into the index initially. The
|
|
|
|
* chosen number of buckets is returned.
|
2003-09-05 00:06:27 +02:00
|
|
|
*
|
|
|
|
* We are fairly cavalier about locking here, since we know that no one else
|
|
|
|
* could be accessing this index. In particular the rule about not holding
|
|
|
|
* multiple buffer locks is ignored.
|
1996-07-09 08:22:35 +02:00
|
|
|
*/
|
2008-03-17 00:15:08 +01:00
|
|
|
uint32
|
2017-03-07 23:03:51 +01:00
|
|
|
_hash_init(Relation rel, double num_tuples, ForkNumber forkNum)
|
1996-07-09 08:22:35 +02:00
|
|
|
{
|
1997-09-08 04:41:22 +02:00
|
|
|
Buffer metabuf;
|
|
|
|
Buffer buf;
|
2017-03-07 23:03:51 +01:00
|
|
|
Buffer bitmapbuf;
|
1997-09-08 04:41:22 +02:00
|
|
|
Page pg;
|
2017-03-07 23:03:51 +01:00
|
|
|
HashMetaPage metap;
|
|
|
|
RegProcedure procid;
|
2003-09-05 00:06:27 +02:00
|
|
|
int32 data_width;
|
|
|
|
int32 item_width;
|
|
|
|
int32 ffactor;
|
2008-03-15 21:46:31 +01:00
|
|
|
uint32 num_buckets;
|
|
|
|
uint32 i;
|
2017-07-17 18:03:35 +02:00
|
|
|
bool use_wal;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2003-09-05 00:06:27 +02:00
|
|
|
/* safety check */
|
2010-12-29 12:48:53 +01:00
|
|
|
if (RelationGetNumberOfBlocksInFork(rel, forkNum) != 0)
|
2003-07-21 22:29:40 +02:00
|
|
|
elog(ERROR, "cannot initialize non-empty hash index \"%s\"",
|
1997-09-07 07:04:48 +02:00
|
|
|
RelationGetRelationName(rel));
|
|
|
|
|
2017-07-17 18:03:35 +02:00
|
|
|
/*
|
|
|
|
* WAL log creation of pages if the relation is persistent, or this is the
|
|
|
|
* init fork. Init forks for unlogged relations always need to be WAL
|
|
|
|
* logged.
|
|
|
|
*/
|
|
|
|
use_wal = RelationNeedsWAL(rel) || forkNum == INIT_FORKNUM;
|
|
|
|
|
2003-09-05 00:06:27 +02:00
|
|
|
/*
|
2006-07-04 00:45:41 +02:00
|
|
|
* Determine the target fill factor (in tuples per bucket) for this index.
|
|
|
|
* The idea is to make the fill factor correspond to pages about as full
|
2014-05-06 18:12:18 +02:00
|
|
|
* as the user-settable fillfactor parameter says. We can compute it
|
2008-09-15 20:43:41 +02:00
|
|
|
* exactly since the index datatype (i.e. uint32 hash key) is fixed-width.
|
2003-09-05 00:06:27 +02:00
|
|
|
*/
|
2008-09-15 20:43:41 +02:00
|
|
|
data_width = sizeof(uint32);
|
2006-01-26 00:26:11 +01:00
|
|
|
item_width = MAXALIGN(sizeof(IndexTupleData)) + MAXALIGN(data_width) +
|
2003-09-05 00:06:27 +02:00
|
|
|
sizeof(ItemIdData); /* include the line pointer */
|
2006-07-04 00:45:41 +02:00
|
|
|
ffactor = RelationGetTargetPageUsage(rel, HASH_DEFAULT_FILLFACTOR) / item_width;
|
2003-09-05 00:06:27 +02:00
|
|
|
/* keep to a sane range */
|
|
|
|
if (ffactor < 10)
|
|
|
|
ffactor = 10;
|
|
|
|
|
2017-09-01 04:21:21 +02:00
|
|
|
procid = index_getprocid(rel, 1, HASHSTANDARD_PROC);
|
2017-03-07 23:03:51 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We initialize the metapage, the first N bucket pages, and the first
|
|
|
|
* bitmap page in sequence, using _hash_getnewbuf to cause smgrextend()
|
|
|
|
* calls to occur. This ensures that the smgr level has the right idea of
|
|
|
|
* the physical index length.
|
|
|
|
*
|
|
|
|
* Critical section not required, because on error the creation of the
|
|
|
|
* whole relation will be rolled back.
|
|
|
|
*/
|
|
|
|
metabuf = _hash_getnewbuf(rel, HASH_METAPAGE, forkNum);
|
|
|
|
_hash_init_metabuffer(metabuf, num_tuples, procid, ffactor, false);
|
|
|
|
MarkBufferDirty(metabuf);
|
|
|
|
|
|
|
|
pg = BufferGetPage(metabuf);
|
|
|
|
metap = HashPageGetMeta(pg);
|
|
|
|
|
2017-03-14 18:27:02 +01:00
|
|
|
/* XLOG stuff */
|
2017-07-17 18:03:35 +02:00
|
|
|
if (use_wal)
|
2017-03-14 18:27:02 +01:00
|
|
|
{
|
|
|
|
xl_hash_init_meta_page xlrec;
|
|
|
|
XLogRecPtr recptr;
|
|
|
|
|
|
|
|
xlrec.num_tuples = num_tuples;
|
|
|
|
xlrec.procid = metap->hashm_procid;
|
|
|
|
xlrec.ffactor = metap->hashm_ffactor;
|
|
|
|
|
|
|
|
XLogBeginInsert();
|
|
|
|
XLogRegisterData((char *) &xlrec, SizeOfHashInitMetaPage);
|
2017-11-03 21:31:32 +01:00
|
|
|
XLogRegisterBuffer(0, metabuf, REGBUF_WILL_INIT | REGBUF_STANDARD);
|
2017-03-14 18:27:02 +01:00
|
|
|
|
|
|
|
recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_INIT_META_PAGE);
|
|
|
|
|
|
|
|
PageSetLSN(BufferGetPage(metabuf), recptr);
|
|
|
|
}
|
|
|
|
|
2017-03-07 23:03:51 +01:00
|
|
|
num_buckets = metap->hashm_maxbucket + 1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Release buffer lock on the metapage while we initialize buckets.
|
|
|
|
* Otherwise, we'll be in interrupt holdoff and the CHECK_FOR_INTERRUPTS
|
|
|
|
* won't accomplish anything. It's a bad idea to hold buffer locks for
|
|
|
|
* long intervals in any case, since that can block the bgwriter.
|
|
|
|
*/
|
|
|
|
LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize and WAL Log the first N buckets
|
|
|
|
*/
|
|
|
|
for (i = 0; i < num_buckets; i++)
|
|
|
|
{
|
|
|
|
BlockNumber blkno;
|
|
|
|
|
|
|
|
/* Allow interrupts, in case N is huge */
|
|
|
|
CHECK_FOR_INTERRUPTS();
|
|
|
|
|
|
|
|
blkno = BUCKET_TO_BLKNO(metap, i);
|
|
|
|
buf = _hash_getnewbuf(rel, blkno, forkNum);
|
|
|
|
_hash_initbuf(buf, metap->hashm_maxbucket, i, LH_BUCKET_PAGE, false);
|
|
|
|
MarkBufferDirty(buf);
|
2017-03-14 18:27:02 +01:00
|
|
|
|
2017-07-17 18:03:35 +02:00
|
|
|
if (use_wal)
|
|
|
|
log_newpage(&rel->rd_node,
|
|
|
|
forkNum,
|
|
|
|
blkno,
|
|
|
|
BufferGetPage(buf),
|
|
|
|
true);
|
2017-03-07 23:03:51 +01:00
|
|
|
_hash_relbuf(rel, buf);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Now reacquire buffer lock on metapage */
|
|
|
|
LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize bitmap page
|
|
|
|
*/
|
|
|
|
bitmapbuf = _hash_getnewbuf(rel, num_buckets + 1, forkNum);
|
|
|
|
_hash_initbitmapbuffer(bitmapbuf, metap->hashm_bmsize, false);
|
|
|
|
MarkBufferDirty(bitmapbuf);
|
|
|
|
|
|
|
|
/* add the new bitmap page to the metapage's list of bitmaps */
|
|
|
|
/* metapage already has a write lock */
|
|
|
|
if (metap->hashm_nmaps >= HASH_MAX_BITMAPS)
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
|
|
|
|
errmsg("out of overflow pages in hash index \"%s\"",
|
|
|
|
RelationGetRelationName(rel))));
|
|
|
|
|
|
|
|
metap->hashm_mapp[metap->hashm_nmaps] = num_buckets + 1;
|
|
|
|
|
|
|
|
metap->hashm_nmaps++;
|
|
|
|
MarkBufferDirty(metabuf);
|
|
|
|
|
2017-03-14 18:27:02 +01:00
|
|
|
/* XLOG stuff */
|
2017-07-17 18:03:35 +02:00
|
|
|
if (use_wal)
|
2017-03-14 18:27:02 +01:00
|
|
|
{
|
|
|
|
xl_hash_init_bitmap_page xlrec;
|
|
|
|
XLogRecPtr recptr;
|
|
|
|
|
|
|
|
xlrec.bmsize = metap->hashm_bmsize;
|
|
|
|
|
|
|
|
XLogBeginInsert();
|
|
|
|
XLogRegisterData((char *) &xlrec, SizeOfHashInitBitmapPage);
|
|
|
|
XLogRegisterBuffer(0, bitmapbuf, REGBUF_WILL_INIT);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is safe only because nobody else can be modifying the index at
|
|
|
|
* this stage; it's only visible to the transaction that is creating
|
|
|
|
* it.
|
|
|
|
*/
|
|
|
|
XLogRegisterBuffer(1, metabuf, REGBUF_STANDARD);
|
|
|
|
|
|
|
|
recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_INIT_BITMAP_PAGE);
|
|
|
|
|
|
|
|
PageSetLSN(BufferGetPage(bitmapbuf), recptr);
|
|
|
|
PageSetLSN(BufferGetPage(metabuf), recptr);
|
|
|
|
}
|
|
|
|
|
2017-03-07 23:03:51 +01:00
|
|
|
/* all done */
|
|
|
|
_hash_relbuf(rel, bitmapbuf);
|
|
|
|
_hash_relbuf(rel, metabuf);
|
|
|
|
|
|
|
|
return num_buckets;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* _hash_init_metabuffer() -- Initialize the metadata page of a hash index.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
_hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid,
|
|
|
|
uint16 ffactor, bool initpage)
|
|
|
|
{
|
|
|
|
HashMetaPage metap;
|
|
|
|
HashPageOpaque pageopaque;
|
|
|
|
Page page;
|
|
|
|
double dnumbuckets;
|
|
|
|
uint32 num_buckets;
|
Expand hash indexes more gradually.
Since hash indexes typically have very few overflow pages, adding a
new splitpoint essentially doubles the on-disk size of the index,
which can lead to large and abrupt increases in disk usage (and
perhaps long delays on occasion). To mitigate this problem to some
degree, divide larger splitpoints into four equal phases. This means
that, for example, instead of growing from 4GB to 8GB all at once, a
hash index will now grow from 4GB to 5GB to 6GB to 7GB to 8GB, which
is perhaps still not as smooth as we'd like but certainly an
improvement.
This changes the on-disk format of the metapage, so bump HASH_VERSION
from 2 to 3. This will force a REINDEX of all existing hash indexes,
but that's probably a good idea anyway. First, hash indexes from
pre-10 versions of PostgreSQL could easily be corrupted, and we don't
want to confuse corruption carried over from an older release with any
corruption caused despite the new write-ahead logging in v10. Second,
it will let us remove some backward-compatibility code added by commit
293e24e507838733aba4748b514536af2d39d7f2.
Mithun Cy, reviewed by Amit Kapila, Jesper Pedersen and me. Regression
test outputs updated by me.
Discussion: http://postgr.es/m/CAD__OuhG6F1gQLCgMQNnMNgoCvOLQZz9zKYJQNYvYmmJoM42gA@mail.gmail.com
Discussion: http://postgr.es/m/CA+TgmoYty0jCf-pa+m+vYUJ716+AxM7nv_syvyanyf5O-L_i2A@mail.gmail.com
2017-04-04 05:46:33 +02:00
|
|
|
uint32 spare_index;
|
2017-03-07 23:03:51 +01:00
|
|
|
uint32 i;
|
|
|
|
|
2006-11-19 22:33:23 +01:00
|
|
|
/*
|
2008-03-15 21:46:31 +01:00
|
|
|
* Choose the number of initial bucket pages to match the fill factor
|
|
|
|
* given the estimated number of tuples. We round up the result to the
|
Expand hash indexes more gradually.
Since hash indexes typically have very few overflow pages, adding a
new splitpoint essentially doubles the on-disk size of the index,
which can lead to large and abrupt increases in disk usage (and
perhaps long delays on occasion). To mitigate this problem to some
degree, divide larger splitpoints into four equal phases. This means
that, for example, instead of growing from 4GB to 8GB all at once, a
hash index will now grow from 4GB to 5GB to 6GB to 7GB to 8GB, which
is perhaps still not as smooth as we'd like but certainly an
improvement.
This changes the on-disk format of the metapage, so bump HASH_VERSION
from 2 to 3. This will force a REINDEX of all existing hash indexes,
but that's probably a good idea anyway. First, hash indexes from
pre-10 versions of PostgreSQL could easily be corrupted, and we don't
want to confuse corruption carried over from an older release with any
corruption caused despite the new write-ahead logging in v10. Second,
it will let us remove some backward-compatibility code added by commit
293e24e507838733aba4748b514536af2d39d7f2.
Mithun Cy, reviewed by Amit Kapila, Jesper Pedersen and me. Regression
test outputs updated by me.
Discussion: http://postgr.es/m/CAD__OuhG6F1gQLCgMQNnMNgoCvOLQZz9zKYJQNYvYmmJoM42gA@mail.gmail.com
Discussion: http://postgr.es/m/CA+TgmoYty0jCf-pa+m+vYUJ716+AxM7nv_syvyanyf5O-L_i2A@mail.gmail.com
2017-04-04 05:46:33 +02:00
|
|
|
* total number of buckets which has to be allocated before using its
|
2019-07-22 03:01:50 +02:00
|
|
|
* hashm_spares element. However always force at least 2 bucket pages. The
|
2017-05-17 22:31:56 +02:00
|
|
|
* upper limit is determined by considerations explained in
|
2008-03-15 21:46:31 +01:00
|
|
|
* _hash_expandtable().
|
|
|
|
*/
|
|
|
|
dnumbuckets = num_tuples / ffactor;
|
|
|
|
if (dnumbuckets <= 2.0)
|
|
|
|
num_buckets = 2;
|
|
|
|
else if (dnumbuckets >= (double) 0x40000000)
|
|
|
|
num_buckets = 0x40000000;
|
|
|
|
else
|
Expand hash indexes more gradually.
Since hash indexes typically have very few overflow pages, adding a
new splitpoint essentially doubles the on-disk size of the index,
which can lead to large and abrupt increases in disk usage (and
perhaps long delays on occasion). To mitigate this problem to some
degree, divide larger splitpoints into four equal phases. This means
that, for example, instead of growing from 4GB to 8GB all at once, a
hash index will now grow from 4GB to 5GB to 6GB to 7GB to 8GB, which
is perhaps still not as smooth as we'd like but certainly an
improvement.
This changes the on-disk format of the metapage, so bump HASH_VERSION
from 2 to 3. This will force a REINDEX of all existing hash indexes,
but that's probably a good idea anyway. First, hash indexes from
pre-10 versions of PostgreSQL could easily be corrupted, and we don't
want to confuse corruption carried over from an older release with any
corruption caused despite the new write-ahead logging in v10. Second,
it will let us remove some backward-compatibility code added by commit
293e24e507838733aba4748b514536af2d39d7f2.
Mithun Cy, reviewed by Amit Kapila, Jesper Pedersen and me. Regression
test outputs updated by me.
Discussion: http://postgr.es/m/CAD__OuhG6F1gQLCgMQNnMNgoCvOLQZz9zKYJQNYvYmmJoM42gA@mail.gmail.com
Discussion: http://postgr.es/m/CA+TgmoYty0jCf-pa+m+vYUJ716+AxM7nv_syvyanyf5O-L_i2A@mail.gmail.com
2017-04-04 05:46:33 +02:00
|
|
|
num_buckets = _hash_get_totalbuckets(_hash_spareindex(dnumbuckets));
|
2008-03-15 21:46:31 +01:00
|
|
|
|
Expand hash indexes more gradually.
Since hash indexes typically have very few overflow pages, adding a
new splitpoint essentially doubles the on-disk size of the index,
which can lead to large and abrupt increases in disk usage (and
perhaps long delays on occasion). To mitigate this problem to some
degree, divide larger splitpoints into four equal phases. This means
that, for example, instead of growing from 4GB to 8GB all at once, a
hash index will now grow from 4GB to 5GB to 6GB to 7GB to 8GB, which
is perhaps still not as smooth as we'd like but certainly an
improvement.
This changes the on-disk format of the metapage, so bump HASH_VERSION
from 2 to 3. This will force a REINDEX of all existing hash indexes,
but that's probably a good idea anyway. First, hash indexes from
pre-10 versions of PostgreSQL could easily be corrupted, and we don't
want to confuse corruption carried over from an older release with any
corruption caused despite the new write-ahead logging in v10. Second,
it will let us remove some backward-compatibility code added by commit
293e24e507838733aba4748b514536af2d39d7f2.
Mithun Cy, reviewed by Amit Kapila, Jesper Pedersen and me. Regression
test outputs updated by me.
Discussion: http://postgr.es/m/CAD__OuhG6F1gQLCgMQNnMNgoCvOLQZz9zKYJQNYvYmmJoM42gA@mail.gmail.com
Discussion: http://postgr.es/m/CA+TgmoYty0jCf-pa+m+vYUJ716+AxM7nv_syvyanyf5O-L_i2A@mail.gmail.com
2017-04-04 05:46:33 +02:00
|
|
|
spare_index = _hash_spareindex(num_buckets);
|
|
|
|
Assert(spare_index < HASH_MAX_SPLITPOINTS);
|
2008-03-15 21:46:31 +01:00
|
|
|
|
2017-03-07 23:03:51 +01:00
|
|
|
page = BufferGetPage(buf);
|
|
|
|
if (initpage)
|
|
|
|
_hash_pageinit(page, BufferGetPageSize(buf));
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2017-03-07 23:03:51 +01:00
|
|
|
pageopaque = (HashPageOpaque) PageGetSpecialPointer(page);
|
2003-09-01 22:26:34 +02:00
|
|
|
pageopaque->hasho_prevblkno = InvalidBlockNumber;
|
|
|
|
pageopaque->hasho_nextblkno = InvalidBlockNumber;
|
|
|
|
pageopaque->hasho_bucket = -1;
|
2003-09-02 20:13:32 +02:00
|
|
|
pageopaque->hasho_flag = LH_META_PAGE;
|
2007-04-10 00:04:08 +02:00
|
|
|
pageopaque->hasho_page_id = HASHO_PAGE_ID;
|
2003-09-01 22:26:34 +02:00
|
|
|
|
2017-03-07 23:03:51 +01:00
|
|
|
metap = HashPageGetMeta(page);
|
2003-09-01 22:26:34 +02:00
|
|
|
|
1997-09-07 07:04:48 +02:00
|
|
|
metap->hashm_magic = HASH_MAGIC;
|
|
|
|
metap->hashm_version = HASH_VERSION;
|
2003-09-01 22:26:34 +02:00
|
|
|
metap->hashm_ntuples = 0;
|
1997-09-07 07:04:48 +02:00
|
|
|
metap->hashm_nmaps = 0;
|
2003-09-05 00:06:27 +02:00
|
|
|
metap->hashm_ffactor = ffactor;
|
2017-03-07 23:03:51 +01:00
|
|
|
metap->hashm_bsize = HashGetMaxBitmapSize(page);
|
2003-09-02 20:13:32 +02:00
|
|
|
/* find largest bitmap array size that will fit in page size */
|
|
|
|
for (i = _hash_log2(metap->hashm_bsize); i > 0; --i)
|
|
|
|
{
|
2008-09-15 20:43:41 +02:00
|
|
|
if ((1 << i) <= metap->hashm_bsize)
|
2003-09-02 20:13:32 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
Assert(i > 0);
|
|
|
|
metap->hashm_bmsize = 1 << i;
|
|
|
|
metap->hashm_bmshift = i + BYTE_TO_BIT;
|
2003-09-01 22:26:34 +02:00
|
|
|
Assert((1 << BMPG_SHIFT(metap)) == (BMPG_MASK(metap) + 1));
|
|
|
|
|
2007-01-30 00:22:59 +01:00
|
|
|
/*
|
|
|
|
* Label the index with its primary hash support function's OID. This is
|
|
|
|
* pretty useless for normal operation (in fact, hashm_procid is not used
|
|
|
|
* anywhere), but it might be handy for forensic purposes so we keep it.
|
|
|
|
*/
|
2017-03-07 23:03:51 +01:00
|
|
|
metap->hashm_procid = procid;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
|
|
|
/*
|
2008-03-15 21:46:31 +01:00
|
|
|
* We initialize the index with N buckets, 0 .. N-1, occupying physical
|
Expand hash indexes more gradually.
Since hash indexes typically have very few overflow pages, adding a
new splitpoint essentially doubles the on-disk size of the index,
which can lead to large and abrupt increases in disk usage (and
perhaps long delays on occasion). To mitigate this problem to some
degree, divide larger splitpoints into four equal phases. This means
that, for example, instead of growing from 4GB to 8GB all at once, a
hash index will now grow from 4GB to 5GB to 6GB to 7GB to 8GB, which
is perhaps still not as smooth as we'd like but certainly an
improvement.
This changes the on-disk format of the metapage, so bump HASH_VERSION
from 2 to 3. This will force a REINDEX of all existing hash indexes,
but that's probably a good idea anyway. First, hash indexes from
pre-10 versions of PostgreSQL could easily be corrupted, and we don't
want to confuse corruption carried over from an older release with any
corruption caused despite the new write-ahead logging in v10. Second,
it will let us remove some backward-compatibility code added by commit
293e24e507838733aba4748b514536af2d39d7f2.
Mithun Cy, reviewed by Amit Kapila, Jesper Pedersen and me. Regression
test outputs updated by me.
Discussion: http://postgr.es/m/CAD__OuhG6F1gQLCgMQNnMNgoCvOLQZz9zKYJQNYvYmmJoM42gA@mail.gmail.com
Discussion: http://postgr.es/m/CA+TgmoYty0jCf-pa+m+vYUJ716+AxM7nv_syvyanyf5O-L_i2A@mail.gmail.com
2017-04-04 05:46:33 +02:00
|
|
|
* blocks 1 to N. The first freespace bitmap page is in block N+1.
|
1997-09-07 07:04:48 +02:00
|
|
|
*/
|
Expand hash indexes more gradually.
Since hash indexes typically have very few overflow pages, adding a
new splitpoint essentially doubles the on-disk size of the index,
which can lead to large and abrupt increases in disk usage (and
perhaps long delays on occasion). To mitigate this problem to some
degree, divide larger splitpoints into four equal phases. This means
that, for example, instead of growing from 4GB to 8GB all at once, a
hash index will now grow from 4GB to 5GB to 6GB to 7GB to 8GB, which
is perhaps still not as smooth as we'd like but certainly an
improvement.
This changes the on-disk format of the metapage, so bump HASH_VERSION
from 2 to 3. This will force a REINDEX of all existing hash indexes,
but that's probably a good idea anyway. First, hash indexes from
pre-10 versions of PostgreSQL could easily be corrupted, and we don't
want to confuse corruption carried over from an older release with any
corruption caused despite the new write-ahead logging in v10. Second,
it will let us remove some backward-compatibility code added by commit
293e24e507838733aba4748b514536af2d39d7f2.
Mithun Cy, reviewed by Amit Kapila, Jesper Pedersen and me. Regression
test outputs updated by me.
Discussion: http://postgr.es/m/CAD__OuhG6F1gQLCgMQNnMNgoCvOLQZz9zKYJQNYvYmmJoM42gA@mail.gmail.com
Discussion: http://postgr.es/m/CA+TgmoYty0jCf-pa+m+vYUJ716+AxM7nv_syvyanyf5O-L_i2A@mail.gmail.com
2017-04-04 05:46:33 +02:00
|
|
|
metap->hashm_maxbucket = num_buckets - 1;
|
|
|
|
|
|
|
|
/*
|
2017-05-17 22:31:56 +02:00
|
|
|
* Set highmask as next immediate ((2 ^ x) - 1), which should be
|
|
|
|
* sufficient to cover num_buckets.
|
Expand hash indexes more gradually.
Since hash indexes typically have very few overflow pages, adding a
new splitpoint essentially doubles the on-disk size of the index,
which can lead to large and abrupt increases in disk usage (and
perhaps long delays on occasion). To mitigate this problem to some
degree, divide larger splitpoints into four equal phases. This means
that, for example, instead of growing from 4GB to 8GB all at once, a
hash index will now grow from 4GB to 5GB to 6GB to 7GB to 8GB, which
is perhaps still not as smooth as we'd like but certainly an
improvement.
This changes the on-disk format of the metapage, so bump HASH_VERSION
from 2 to 3. This will force a REINDEX of all existing hash indexes,
but that's probably a good idea anyway. First, hash indexes from
pre-10 versions of PostgreSQL could easily be corrupted, and we don't
want to confuse corruption carried over from an older release with any
corruption caused despite the new write-ahead logging in v10. Second,
it will let us remove some backward-compatibility code added by commit
293e24e507838733aba4748b514536af2d39d7f2.
Mithun Cy, reviewed by Amit Kapila, Jesper Pedersen and me. Regression
test outputs updated by me.
Discussion: http://postgr.es/m/CAD__OuhG6F1gQLCgMQNnMNgoCvOLQZz9zKYJQNYvYmmJoM42gA@mail.gmail.com
Discussion: http://postgr.es/m/CA+TgmoYty0jCf-pa+m+vYUJ716+AxM7nv_syvyanyf5O-L_i2A@mail.gmail.com
2017-04-04 05:46:33 +02:00
|
|
|
*/
|
|
|
|
metap->hashm_highmask = (1 << (_hash_log2(num_buckets + 1))) - 1;
|
|
|
|
metap->hashm_lowmask = (metap->hashm_highmask >> 1);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2005-05-11 03:26:02 +02:00
|
|
|
MemSet(metap->hashm_spares, 0, sizeof(metap->hashm_spares));
|
|
|
|
MemSet(metap->hashm_mapp, 0, sizeof(metap->hashm_mapp));
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2008-03-15 21:46:31 +01:00
|
|
|
/* Set up mapping for one spare page after the initial splitpoints */
|
Expand hash indexes more gradually.
Since hash indexes typically have very few overflow pages, adding a
new splitpoint essentially doubles the on-disk size of the index,
which can lead to large and abrupt increases in disk usage (and
perhaps long delays on occasion). To mitigate this problem to some
degree, divide larger splitpoints into four equal phases. This means
that, for example, instead of growing from 4GB to 8GB all at once, a
hash index will now grow from 4GB to 5GB to 6GB to 7GB to 8GB, which
is perhaps still not as smooth as we'd like but certainly an
improvement.
This changes the on-disk format of the metapage, so bump HASH_VERSION
from 2 to 3. This will force a REINDEX of all existing hash indexes,
but that's probably a good idea anyway. First, hash indexes from
pre-10 versions of PostgreSQL could easily be corrupted, and we don't
want to confuse corruption carried over from an older release with any
corruption caused despite the new write-ahead logging in v10. Second,
it will let us remove some backward-compatibility code added by commit
293e24e507838733aba4748b514536af2d39d7f2.
Mithun Cy, reviewed by Amit Kapila, Jesper Pedersen and me. Regression
test outputs updated by me.
Discussion: http://postgr.es/m/CAD__OuhG6F1gQLCgMQNnMNgoCvOLQZz9zKYJQNYvYmmJoM42gA@mail.gmail.com
Discussion: http://postgr.es/m/CA+TgmoYty0jCf-pa+m+vYUJ716+AxM7nv_syvyanyf5O-L_i2A@mail.gmail.com
2017-04-04 05:46:33 +02:00
|
|
|
metap->hashm_spares[spare_index] = 1;
|
|
|
|
metap->hashm_ovflpoint = spare_index;
|
2003-09-01 22:26:34 +02:00
|
|
|
metap->hashm_firstfree = 0;
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2017-03-14 18:27:02 +01:00
|
|
|
/*
|
2017-11-03 21:31:32 +01:00
|
|
|
* Set pd_lower just past the end of the metadata. This is essential,
|
|
|
|
* because without doing so, metadata will be lost if xlog.c compresses
|
|
|
|
* the page.
|
2017-03-14 18:27:02 +01:00
|
|
|
*/
|
2017-03-07 23:03:51 +01:00
|
|
|
((PageHeader) page)->pd_lower =
|
|
|
|
((char *) metap + sizeof(HashMetaPageData)) - (char *) page;
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2003-09-05 00:06:27 +02:00
|
|
|
* _hash_pageinit() -- Initialize a new hash index page.
|
1996-07-09 08:22:35 +02:00
|
|
|
*/
|
|
|
|
void
|
|
|
|
_hash_pageinit(Page page, Size size)
|
|
|
|
{
|
1997-09-07 07:04:48 +02:00
|
|
|
PageInit(page, size, sizeof(HashPageOpaqueData));
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
|
|
|
|
2003-09-01 22:26:34 +02:00
|
|
|
/*
|
2003-09-05 00:06:27 +02:00
|
|
|
* Attempt to expand the hash table by creating one new bucket.
|
|
|
|
*
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
* This will silently do nothing if we don't get cleanup lock on old or
|
|
|
|
* new bucket.
|
2003-09-05 00:06:27 +02:00
|
|
|
*
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
* Complete the pending splits and remove the tuples from old bucket,
|
|
|
|
* if there are any left over from the previous split.
|
2003-09-05 00:06:27 +02:00
|
|
|
*
|
|
|
|
* The caller must hold a pin, but no lock, on the metapage buffer.
|
|
|
|
* The buffer is returned in the same state.
|
2003-09-01 22:26:34 +02:00
|
|
|
*/
|
1996-07-09 08:22:35 +02:00
|
|
|
void
|
|
|
|
_hash_expandtable(Relation rel, Buffer metabuf)
|
|
|
|
{
|
1997-09-08 04:41:22 +02:00
|
|
|
HashMetaPage metap;
|
|
|
|
Bucket old_bucket;
|
|
|
|
Bucket new_bucket;
|
|
|
|
uint32 spare_ndx;
|
2003-09-05 00:06:27 +02:00
|
|
|
BlockNumber start_oblkno;
|
|
|
|
BlockNumber start_nblkno;
|
2015-03-30 22:40:05 +02:00
|
|
|
Buffer buf_nblkno;
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
Buffer buf_oblkno;
|
|
|
|
Page opage;
|
2017-03-01 10:13:38 +01:00
|
|
|
Page npage;
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
HashPageOpaque oopaque;
|
2017-03-01 10:13:38 +01:00
|
|
|
HashPageOpaque nopaque;
|
2003-09-05 00:06:27 +02:00
|
|
|
uint32 maxbucket;
|
|
|
|
uint32 highmask;
|
|
|
|
uint32 lowmask;
|
2017-03-14 18:27:02 +01:00
|
|
|
bool metap_update_masks = false;
|
|
|
|
bool metap_update_splitpoint = false;
|
2003-09-05 00:06:27 +02:00
|
|
|
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
restart_expand:
|
|
|
|
|
2003-09-05 00:06:27 +02:00
|
|
|
/*
|
Reduce use of heavyweight locking inside hash AM.
Avoid using LockPage(rel, 0, lockmode) to protect against changes to
the bucket mapping. Instead, an exclusive buffer content lock is now
viewed as sufficient permission to modify the metapage, and a shared
buffer content lock is used when such modifications need to be
prevented. This more relaxed locking regimen makes it possible that,
when we're busy getting a heavyweight bucket on the bucket we intend
to search or insert into, a bucket split might occur underneath us.
To compenate for that possibility, we use a loop-and-retry system:
release the metapage content lock, acquire the heavyweight lock on the
target bucket, and then reacquire the metapage content lock and check
that the bucket mapping has not changed. Normally it hasn't, and
we're done. But if by chance it has, we simply unlock the metapage,
release the heavyweight lock we acquired previously, lock the new
bucket, and loop around again. Even in the worst case we cannot loop
very many times here, since we don't split the same bucket again until
we've split all the other buckets, and 2^N gets big pretty fast.
This results in greatly improved concurrency, because we're
effectively replacing two lwlock acquire-and-release cycles in
exclusive mode (on one of the lock manager locks) with a single
acquire-and-release cycle in shared mode (on the metapage buffer
content lock). Testing shows that it's still not quite as good as
btree; for that, we'd probably have to find some way of getting rid
of the heavyweight bucket locks as well, which does not appear
straightforward.
Patch by me, review by Jeff Janes.
2012-06-26 12:56:10 +02:00
|
|
|
* Write-lock the meta page. It used to be necessary to acquire a
|
|
|
|
* heavyweight lock to begin a split, but that is no longer required.
|
2003-09-05 00:06:27 +02:00
|
|
|
*/
|
2016-12-23 13:14:37 +01:00
|
|
|
LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE);
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2005-11-06 20:29:01 +01:00
|
|
|
_hash_checkpage(rel, metabuf, LH_META_PAGE);
|
2016-04-20 15:31:19 +02:00
|
|
|
metap = HashPageGetMeta(BufferGetPage(metabuf));
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2003-09-05 00:06:27 +02:00
|
|
|
/*
|
2005-10-15 04:49:52 +02:00
|
|
|
* Check to see if split is still needed; someone else might have already
|
|
|
|
* done one while we waited for the lock.
|
2003-09-05 00:06:27 +02:00
|
|
|
*
|
2005-05-10 07:15:07 +02:00
|
|
|
* Make sure this stays in sync with _hash_doinsert()
|
2003-09-05 00:06:27 +02:00
|
|
|
*/
|
|
|
|
if (metap->hashm_ntuples <=
|
|
|
|
(double) metap->hashm_ffactor * (metap->hashm_maxbucket + 1))
|
|
|
|
goto fail;
|
2003-09-01 22:26:34 +02:00
|
|
|
|
2006-11-19 22:33:23 +01:00
|
|
|
/*
|
2007-11-15 22:14:46 +01:00
|
|
|
* Can't split anymore if maxbucket has reached its maximum possible
|
|
|
|
* value.
|
2006-11-19 22:33:23 +01:00
|
|
|
*
|
|
|
|
* Ideally we'd allow bucket numbers up to UINT_MAX-1 (no higher because
|
|
|
|
* the calculation maxbucket+1 mustn't overflow). Currently we restrict
|
|
|
|
* to half that because of overflow looping in _hash_log2() and
|
|
|
|
* insufficient space in hashm_spares[]. It's moot anyway because an
|
2007-11-15 22:14:46 +01:00
|
|
|
* index with 2^32 buckets would certainly overflow BlockNumber and hence
|
|
|
|
* _hash_alloc_buckets() would fail, but if we supported buckets smaller
|
|
|
|
* than a disk block then this would be an independent constraint.
|
2008-03-15 21:46:31 +01:00
|
|
|
*
|
2009-06-11 16:49:15 +02:00
|
|
|
* If you change this, see also the maximum initial number of buckets in
|
2017-03-07 23:03:51 +01:00
|
|
|
* _hash_init().
|
2006-11-19 22:33:23 +01:00
|
|
|
*/
|
|
|
|
if (metap->hashm_maxbucket >= (uint32) 0x7FFFFFFE)
|
|
|
|
goto fail;
|
|
|
|
|
2003-09-05 00:06:27 +02:00
|
|
|
/*
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
* Determine which bucket is to be split, and attempt to take cleanup lock
|
|
|
|
* on the old bucket. If we can't get the lock, give up.
|
|
|
|
*
|
|
|
|
* The cleanup lock protects us not only against other backends, but
|
|
|
|
* against our own backend as well.
|
2003-09-05 00:06:27 +02:00
|
|
|
*
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
* The cleanup lock is mainly to protect the split from concurrent
|
|
|
|
* inserts. See src/backend/access/hash/README, Lock Definitions for
|
|
|
|
* further details. Due to this locking restriction, if there is any
|
|
|
|
* pending scan, the split will give up which is not good, but harmless.
|
2003-09-05 00:06:27 +02:00
|
|
|
*/
|
2007-04-19 22:24:04 +02:00
|
|
|
new_bucket = metap->hashm_maxbucket + 1;
|
|
|
|
|
2003-09-01 22:26:34 +02:00
|
|
|
old_bucket = (new_bucket & metap->hashm_lowmask);
|
|
|
|
|
2003-09-05 00:06:27 +02:00
|
|
|
start_oblkno = BUCKET_TO_BLKNO(metap, old_bucket);
|
|
|
|
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
buf_oblkno = _hash_getbuf_with_condlock_cleanup(rel, start_oblkno, LH_BUCKET_PAGE);
|
|
|
|
if (!buf_oblkno)
|
2003-09-05 00:06:27 +02:00
|
|
|
goto fail;
|
|
|
|
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
opage = BufferGetPage(buf_oblkno);
|
|
|
|
oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We want to finish the split from a bucket as there is no apparent
|
|
|
|
* benefit by not doing so and it will make the code complicated to finish
|
|
|
|
* the split that involves multiple buckets considering the case where new
|
|
|
|
* split also fails. We don't need to consider the new bucket for
|
|
|
|
* completing the split here as it is not possible that a re-split of new
|
|
|
|
* bucket starts when there is still a pending split from old bucket.
|
|
|
|
*/
|
|
|
|
if (H_BUCKET_BEING_SPLIT(oopaque))
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Copy bucket mapping info now; refer the comment in code below where
|
|
|
|
* we copy this information before calling _hash_splitbucket to see
|
|
|
|
* why this is okay.
|
|
|
|
*/
|
|
|
|
maxbucket = metap->hashm_maxbucket;
|
|
|
|
highmask = metap->hashm_highmask;
|
|
|
|
lowmask = metap->hashm_lowmask;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Release the lock on metapage and old_bucket, before completing the
|
|
|
|
* split.
|
|
|
|
*/
|
2016-12-23 13:14:37 +01:00
|
|
|
LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
|
|
|
|
LockBuffer(buf_oblkno, BUFFER_LOCK_UNLOCK);
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
|
|
|
|
_hash_finish_split(rel, metabuf, buf_oblkno, old_bucket, maxbucket,
|
|
|
|
highmask, lowmask);
|
|
|
|
|
|
|
|
/* release the pin on old buffer and retry for expand. */
|
|
|
|
_hash_dropbuf(rel, buf_oblkno);
|
|
|
|
|
|
|
|
goto restart_expand;
|
|
|
|
}
|
2003-09-05 00:06:27 +02:00
|
|
|
|
2007-04-19 22:24:04 +02:00
|
|
|
/*
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
* Clean the tuples remained from the previous split. This operation
|
|
|
|
* requires cleanup lock and we already have one on the old bucket, so
|
|
|
|
* let's do it. We also don't want to allow further splits from the bucket
|
|
|
|
* till the garbage of previous split is cleaned. This has two
|
|
|
|
* advantages; first, it helps in avoiding the bloat due to garbage and
|
|
|
|
* second is, during cleanup of bucket, we are always sure that the
|
|
|
|
* garbage tuples belong to most recently split bucket. On the contrary,
|
|
|
|
* if we allow cleanup of bucket after meta page is updated to indicate
|
|
|
|
* the new split and before the actual split, the cleanup operation won't
|
|
|
|
* be able to decide whether the tuple has been moved to the newly created
|
|
|
|
* bucket and ended up deleting such tuples.
|
|
|
|
*/
|
|
|
|
if (H_NEEDS_SPLIT_CLEANUP(oopaque))
|
|
|
|
{
|
2016-12-05 17:43:37 +01:00
|
|
|
/*
|
|
|
|
* Copy bucket mapping info now; refer to the comment in code below
|
2017-05-17 22:31:56 +02:00
|
|
|
* where we copy this information before calling _hash_splitbucket to
|
|
|
|
* see why this is okay.
|
2016-12-05 17:43:37 +01:00
|
|
|
*/
|
|
|
|
maxbucket = metap->hashm_maxbucket;
|
|
|
|
highmask = metap->hashm_highmask;
|
|
|
|
lowmask = metap->hashm_lowmask;
|
|
|
|
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
/* Release the metapage lock. */
|
2016-12-23 13:14:37 +01:00
|
|
|
LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
|
|
|
|
hashbucketcleanup(rel, old_bucket, buf_oblkno, start_oblkno, NULL,
|
2016-12-05 17:43:37 +01:00
|
|
|
maxbucket, highmask, lowmask, NULL, NULL, true,
|
|
|
|
NULL, NULL);
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
|
|
|
|
_hash_dropbuf(rel, buf_oblkno);
|
|
|
|
|
|
|
|
goto restart_expand;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* There shouldn't be any active scan on new bucket.
|
2007-04-19 22:24:04 +02:00
|
|
|
*
|
2007-11-15 22:14:46 +01:00
|
|
|
* Note: it is safe to compute the new bucket's blkno here, even though we
|
|
|
|
* may still need to update the BUCKET_TO_BLKNO mapping. This is because
|
|
|
|
* the current value of hashm_spares[hashm_ovflpoint] correctly shows
|
|
|
|
* where we are going to put a new splitpoint's worth of buckets.
|
2007-04-19 22:24:04 +02:00
|
|
|
*/
|
|
|
|
start_nblkno = BUCKET_TO_BLKNO(metap, new_bucket);
|
|
|
|
|
|
|
|
/*
|
Expand hash indexes more gradually.
Since hash indexes typically have very few overflow pages, adding a
new splitpoint essentially doubles the on-disk size of the index,
which can lead to large and abrupt increases in disk usage (and
perhaps long delays on occasion). To mitigate this problem to some
degree, divide larger splitpoints into four equal phases. This means
that, for example, instead of growing from 4GB to 8GB all at once, a
hash index will now grow from 4GB to 5GB to 6GB to 7GB to 8GB, which
is perhaps still not as smooth as we'd like but certainly an
improvement.
This changes the on-disk format of the metapage, so bump HASH_VERSION
from 2 to 3. This will force a REINDEX of all existing hash indexes,
but that's probably a good idea anyway. First, hash indexes from
pre-10 versions of PostgreSQL could easily be corrupted, and we don't
want to confuse corruption carried over from an older release with any
corruption caused despite the new write-ahead logging in v10. Second,
it will let us remove some backward-compatibility code added by commit
293e24e507838733aba4748b514536af2d39d7f2.
Mithun Cy, reviewed by Amit Kapila, Jesper Pedersen and me. Regression
test outputs updated by me.
Discussion: http://postgr.es/m/CAD__OuhG6F1gQLCgMQNnMNgoCvOLQZz9zKYJQNYvYmmJoM42gA@mail.gmail.com
Discussion: http://postgr.es/m/CA+TgmoYty0jCf-pa+m+vYUJ716+AxM7nv_syvyanyf5O-L_i2A@mail.gmail.com
2017-04-04 05:46:33 +02:00
|
|
|
* If the split point is increasing we need to allocate a new batch of
|
|
|
|
* bucket pages.
|
2007-04-19 22:24:04 +02:00
|
|
|
*/
|
Expand hash indexes more gradually.
Since hash indexes typically have very few overflow pages, adding a
new splitpoint essentially doubles the on-disk size of the index,
which can lead to large and abrupt increases in disk usage (and
perhaps long delays on occasion). To mitigate this problem to some
degree, divide larger splitpoints into four equal phases. This means
that, for example, instead of growing from 4GB to 8GB all at once, a
hash index will now grow from 4GB to 5GB to 6GB to 7GB to 8GB, which
is perhaps still not as smooth as we'd like but certainly an
improvement.
This changes the on-disk format of the metapage, so bump HASH_VERSION
from 2 to 3. This will force a REINDEX of all existing hash indexes,
but that's probably a good idea anyway. First, hash indexes from
pre-10 versions of PostgreSQL could easily be corrupted, and we don't
want to confuse corruption carried over from an older release with any
corruption caused despite the new write-ahead logging in v10. Second,
it will let us remove some backward-compatibility code added by commit
293e24e507838733aba4748b514536af2d39d7f2.
Mithun Cy, reviewed by Amit Kapila, Jesper Pedersen and me. Regression
test outputs updated by me.
Discussion: http://postgr.es/m/CAD__OuhG6F1gQLCgMQNnMNgoCvOLQZz9zKYJQNYvYmmJoM42gA@mail.gmail.com
Discussion: http://postgr.es/m/CA+TgmoYty0jCf-pa+m+vYUJ716+AxM7nv_syvyanyf5O-L_i2A@mail.gmail.com
2017-04-04 05:46:33 +02:00
|
|
|
spare_ndx = _hash_spareindex(new_bucket + 1);
|
2007-04-19 22:24:04 +02:00
|
|
|
if (spare_ndx > metap->hashm_ovflpoint)
|
|
|
|
{
|
Expand hash indexes more gradually.
Since hash indexes typically have very few overflow pages, adding a
new splitpoint essentially doubles the on-disk size of the index,
which can lead to large and abrupt increases in disk usage (and
perhaps long delays on occasion). To mitigate this problem to some
degree, divide larger splitpoints into four equal phases. This means
that, for example, instead of growing from 4GB to 8GB all at once, a
hash index will now grow from 4GB to 5GB to 6GB to 7GB to 8GB, which
is perhaps still not as smooth as we'd like but certainly an
improvement.
This changes the on-disk format of the metapage, so bump HASH_VERSION
from 2 to 3. This will force a REINDEX of all existing hash indexes,
but that's probably a good idea anyway. First, hash indexes from
pre-10 versions of PostgreSQL could easily be corrupted, and we don't
want to confuse corruption carried over from an older release with any
corruption caused despite the new write-ahead logging in v10. Second,
it will let us remove some backward-compatibility code added by commit
293e24e507838733aba4748b514536af2d39d7f2.
Mithun Cy, reviewed by Amit Kapila, Jesper Pedersen and me. Regression
test outputs updated by me.
Discussion: http://postgr.es/m/CAD__OuhG6F1gQLCgMQNnMNgoCvOLQZz9zKYJQNYvYmmJoM42gA@mail.gmail.com
Discussion: http://postgr.es/m/CA+TgmoYty0jCf-pa+m+vYUJ716+AxM7nv_syvyanyf5O-L_i2A@mail.gmail.com
2017-04-04 05:46:33 +02:00
|
|
|
uint32 buckets_to_add;
|
|
|
|
|
2007-04-19 22:24:04 +02:00
|
|
|
Assert(spare_ndx == metap->hashm_ovflpoint + 1);
|
2007-11-15 22:14:46 +01:00
|
|
|
|
2007-04-19 22:24:04 +02:00
|
|
|
/*
|
Expand hash indexes more gradually.
Since hash indexes typically have very few overflow pages, adding a
new splitpoint essentially doubles the on-disk size of the index,
which can lead to large and abrupt increases in disk usage (and
perhaps long delays on occasion). To mitigate this problem to some
degree, divide larger splitpoints into four equal phases. This means
that, for example, instead of growing from 4GB to 8GB all at once, a
hash index will now grow from 4GB to 5GB to 6GB to 7GB to 8GB, which
is perhaps still not as smooth as we'd like but certainly an
improvement.
This changes the on-disk format of the metapage, so bump HASH_VERSION
from 2 to 3. This will force a REINDEX of all existing hash indexes,
but that's probably a good idea anyway. First, hash indexes from
pre-10 versions of PostgreSQL could easily be corrupted, and we don't
want to confuse corruption carried over from an older release with any
corruption caused despite the new write-ahead logging in v10. Second,
it will let us remove some backward-compatibility code added by commit
293e24e507838733aba4748b514536af2d39d7f2.
Mithun Cy, reviewed by Amit Kapila, Jesper Pedersen and me. Regression
test outputs updated by me.
Discussion: http://postgr.es/m/CAD__OuhG6F1gQLCgMQNnMNgoCvOLQZz9zKYJQNYvYmmJoM42gA@mail.gmail.com
Discussion: http://postgr.es/m/CA+TgmoYty0jCf-pa+m+vYUJ716+AxM7nv_syvyanyf5O-L_i2A@mail.gmail.com
2017-04-04 05:46:33 +02:00
|
|
|
* We treat allocation of buckets as a separate WAL-logged action.
|
|
|
|
* Even if we fail after this operation, won't leak bucket pages;
|
|
|
|
* rather, the next split will consume this space. In any case, even
|
2017-05-17 22:31:56 +02:00
|
|
|
* without failure we don't use all the space in one split operation.
|
2007-04-19 22:24:04 +02:00
|
|
|
*/
|
Expand hash indexes more gradually.
Since hash indexes typically have very few overflow pages, adding a
new splitpoint essentially doubles the on-disk size of the index,
which can lead to large and abrupt increases in disk usage (and
perhaps long delays on occasion). To mitigate this problem to some
degree, divide larger splitpoints into four equal phases. This means
that, for example, instead of growing from 4GB to 8GB all at once, a
hash index will now grow from 4GB to 5GB to 6GB to 7GB to 8GB, which
is perhaps still not as smooth as we'd like but certainly an
improvement.
This changes the on-disk format of the metapage, so bump HASH_VERSION
from 2 to 3. This will force a REINDEX of all existing hash indexes,
but that's probably a good idea anyway. First, hash indexes from
pre-10 versions of PostgreSQL could easily be corrupted, and we don't
want to confuse corruption carried over from an older release with any
corruption caused despite the new write-ahead logging in v10. Second,
it will let us remove some backward-compatibility code added by commit
293e24e507838733aba4748b514536af2d39d7f2.
Mithun Cy, reviewed by Amit Kapila, Jesper Pedersen and me. Regression
test outputs updated by me.
Discussion: http://postgr.es/m/CAD__OuhG6F1gQLCgMQNnMNgoCvOLQZz9zKYJQNYvYmmJoM42gA@mail.gmail.com
Discussion: http://postgr.es/m/CA+TgmoYty0jCf-pa+m+vYUJ716+AxM7nv_syvyanyf5O-L_i2A@mail.gmail.com
2017-04-04 05:46:33 +02:00
|
|
|
buckets_to_add = _hash_get_totalbuckets(spare_ndx) - new_bucket;
|
|
|
|
if (!_hash_alloc_buckets(rel, start_nblkno, buckets_to_add))
|
2007-04-19 22:24:04 +02:00
|
|
|
{
|
|
|
|
/* can't split due to BlockNumber overflow */
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
_hash_relbuf(rel, buf_oblkno);
|
2007-04-19 22:24:04 +02:00
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-30 22:40:05 +02:00
|
|
|
/*
|
|
|
|
* Physically allocate the new bucket's primary page. We want to do this
|
|
|
|
* before changing the metapage's mapping info, in case we can't get the
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
* disk space. Ideally, we don't need to check for cleanup lock on new
|
|
|
|
* bucket as no other backend could find this bucket unless meta page is
|
|
|
|
* updated. However, it is good to be consistent with old bucket locking.
|
2015-03-30 22:40:05 +02:00
|
|
|
*/
|
|
|
|
buf_nblkno = _hash_getnewbuf(rel, start_nblkno, MAIN_FORKNUM);
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
if (!IsBufferCleanupOK(buf_nblkno))
|
|
|
|
{
|
|
|
|
_hash_relbuf(rel, buf_oblkno);
|
|
|
|
_hash_relbuf(rel, buf_nblkno);
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2003-09-05 00:06:27 +02:00
|
|
|
/*
|
2017-03-01 10:13:38 +01:00
|
|
|
* Since we are scribbling on the pages in the shared buffers, establish a
|
|
|
|
* critical section. Any failure in this next code leaves us with a big
|
2005-11-22 19:17:34 +01:00
|
|
|
* problem: the metapage is effectively corrupt but could get written back
|
2017-03-14 18:27:02 +01:00
|
|
|
* to disk.
|
2003-09-05 00:06:27 +02:00
|
|
|
*/
|
2005-06-09 20:23:50 +02:00
|
|
|
START_CRIT_SECTION();
|
|
|
|
|
2017-03-01 10:13:38 +01:00
|
|
|
/*
|
|
|
|
* Okay to proceed with split. Update the metapage bucket mapping info.
|
|
|
|
*/
|
2003-09-05 00:06:27 +02:00
|
|
|
metap->hashm_maxbucket = new_bucket;
|
|
|
|
|
2003-09-01 22:26:34 +02:00
|
|
|
if (new_bucket > metap->hashm_highmask)
|
|
|
|
{
|
|
|
|
/* Starting a new doubling */
|
|
|
|
metap->hashm_lowmask = metap->hashm_highmask;
|
|
|
|
metap->hashm_highmask = new_bucket | metap->hashm_lowmask;
|
2017-03-14 18:27:02 +01:00
|
|
|
metap_update_masks = true;
|
2003-09-01 22:26:34 +02:00
|
|
|
}
|
1997-09-07 07:04:48 +02:00
|
|
|
|
|
|
|
/*
|
Expand hash indexes more gradually.
Since hash indexes typically have very few overflow pages, adding a
new splitpoint essentially doubles the on-disk size of the index,
which can lead to large and abrupt increases in disk usage (and
perhaps long delays on occasion). To mitigate this problem to some
degree, divide larger splitpoints into four equal phases. This means
that, for example, instead of growing from 4GB to 8GB all at once, a
hash index will now grow from 4GB to 5GB to 6GB to 7GB to 8GB, which
is perhaps still not as smooth as we'd like but certainly an
improvement.
This changes the on-disk format of the metapage, so bump HASH_VERSION
from 2 to 3. This will force a REINDEX of all existing hash indexes,
but that's probably a good idea anyway. First, hash indexes from
pre-10 versions of PostgreSQL could easily be corrupted, and we don't
want to confuse corruption carried over from an older release with any
corruption caused despite the new write-ahead logging in v10. Second,
it will let us remove some backward-compatibility code added by commit
293e24e507838733aba4748b514536af2d39d7f2.
Mithun Cy, reviewed by Amit Kapila, Jesper Pedersen and me. Regression
test outputs updated by me.
Discussion: http://postgr.es/m/CAD__OuhG6F1gQLCgMQNnMNgoCvOLQZz9zKYJQNYvYmmJoM42gA@mail.gmail.com
Discussion: http://postgr.es/m/CA+TgmoYty0jCf-pa+m+vYUJ716+AxM7nv_syvyanyf5O-L_i2A@mail.gmail.com
2017-04-04 05:46:33 +02:00
|
|
|
* If the split point is increasing we need to adjust the hashm_spares[]
|
|
|
|
* array and hashm_ovflpoint so that future overflow pages will be created
|
|
|
|
* beyond this new batch of bucket pages.
|
1997-09-07 07:04:48 +02:00
|
|
|
*/
|
Restructure index AM interface for index building and index tuple deletion,
per previous discussion on pghackers. Most of the duplicate code in
different AMs' ambuild routines has been moved out to a common routine
in index.c; this means that all index types now do the right things about
inserting recently-dead tuples, etc. (I also removed support for EXTEND
INDEX in the ambuild routines, since that's about to go away anyway, and
it cluttered the code a lot.) The retail indextuple deletion routines have
been replaced by a "bulk delete" routine in which the indexscan is inside
the access method. I haven't pushed this change as far as it should go yet,
but it should allow considerable simplification of the internal bookkeeping
for deletions. Also, add flag columns to pg_am to eliminate various
hardcoded tests on AM OIDs, and remove unused pg_am columns.
Fix rtree and gist index types to not attempt to store NULLs; before this,
gist usually crashed, while rtree managed not to crash but computed wacko
bounding boxes for NULL entries (which might have had something to do with
the performance problems we've heard about occasionally).
Add AtEOXact routines to hash, rtree, and gist, all of which have static
state that needs to be reset after an error. We discovered this need long
ago for btree, but missed the other guys.
Oh, one more thing: concurrent VACUUM is now the default.
2001-07-16 00:48:19 +02:00
|
|
|
if (spare_ndx > metap->hashm_ovflpoint)
|
1997-09-07 07:04:48 +02:00
|
|
|
{
|
Restructure index AM interface for index building and index tuple deletion,
per previous discussion on pghackers. Most of the duplicate code in
different AMs' ambuild routines has been moved out to a common routine
in index.c; this means that all index types now do the right things about
inserting recently-dead tuples, etc. (I also removed support for EXTEND
INDEX in the ambuild routines, since that's about to go away anyway, and
it cluttered the code a lot.) The retail indextuple deletion routines have
been replaced by a "bulk delete" routine in which the indexscan is inside
the access method. I haven't pushed this change as far as it should go yet,
but it should allow considerable simplification of the internal bookkeeping
for deletions. Also, add flag columns to pg_am to eliminate various
hardcoded tests on AM OIDs, and remove unused pg_am columns.
Fix rtree and gist index types to not attempt to store NULLs; before this,
gist usually crashed, while rtree managed not to crash but computed wacko
bounding boxes for NULL entries (which might have had something to do with
the performance problems we've heard about occasionally).
Add AtEOXact routines to hash, rtree, and gist, all of which have static
state that needs to be reset after an error. We discovered this need long
ago for btree, but missed the other guys.
Oh, one more thing: concurrent VACUUM is now the default.
2001-07-16 00:48:19 +02:00
|
|
|
metap->hashm_spares[spare_ndx] = metap->hashm_spares[metap->hashm_ovflpoint];
|
|
|
|
metap->hashm_ovflpoint = spare_ndx;
|
2017-03-14 18:27:02 +01:00
|
|
|
metap_update_splitpoint = true;
|
1997-09-07 07:04:48 +02:00
|
|
|
}
|
|
|
|
|
2017-03-01 10:13:38 +01:00
|
|
|
MarkBufferDirty(metabuf);
|
2005-06-09 20:23:50 +02:00
|
|
|
|
2003-09-05 00:06:27 +02:00
|
|
|
/*
|
|
|
|
* Copy bucket mapping info now; this saves re-accessing the meta page
|
|
|
|
* inside _hash_splitbucket's inner loop. Note that once we drop the
|
2005-10-15 04:49:52 +02:00
|
|
|
* split lock, other splits could begin, so these values might be out of
|
2014-05-06 18:12:18 +02:00
|
|
|
* date before _hash_splitbucket finishes. That's okay, since all it
|
2005-10-15 04:49:52 +02:00
|
|
|
* needs is to tell which of these two buckets to map hashkeys into.
|
2003-09-05 00:06:27 +02:00
|
|
|
*/
|
|
|
|
maxbucket = metap->hashm_maxbucket;
|
|
|
|
highmask = metap->hashm_highmask;
|
|
|
|
lowmask = metap->hashm_lowmask;
|
|
|
|
|
2017-03-01 10:13:38 +01:00
|
|
|
opage = BufferGetPage(buf_oblkno);
|
|
|
|
oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Mark the old bucket to indicate that split is in progress. (At
|
2017-05-17 22:31:56 +02:00
|
|
|
* operation end, we will clear the split-in-progress flag.) Also, for a
|
|
|
|
* primary bucket page, hasho_prevblkno stores the number of buckets that
|
|
|
|
* existed as of the last split, so we must update that value here.
|
2017-03-01 10:13:38 +01:00
|
|
|
*/
|
|
|
|
oopaque->hasho_flag |= LH_BUCKET_BEING_SPLIT;
|
|
|
|
oopaque->hasho_prevblkno = maxbucket;
|
|
|
|
|
|
|
|
MarkBufferDirty(buf_oblkno);
|
|
|
|
|
|
|
|
npage = BufferGetPage(buf_nblkno);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* initialize the new bucket's primary page and mark it to indicate that
|
|
|
|
* split is in progress.
|
|
|
|
*/
|
|
|
|
nopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
|
|
|
|
nopaque->hasho_prevblkno = maxbucket;
|
|
|
|
nopaque->hasho_nextblkno = InvalidBlockNumber;
|
|
|
|
nopaque->hasho_bucket = new_bucket;
|
|
|
|
nopaque->hasho_flag = LH_BUCKET_PAGE | LH_BUCKET_BEING_POPULATED;
|
|
|
|
nopaque->hasho_page_id = HASHO_PAGE_ID;
|
|
|
|
|
|
|
|
MarkBufferDirty(buf_nblkno);
|
|
|
|
|
2017-03-14 18:27:02 +01:00
|
|
|
/* XLOG stuff */
|
|
|
|
if (RelationNeedsWAL(rel))
|
|
|
|
{
|
|
|
|
xl_hash_split_allocate_page xlrec;
|
|
|
|
XLogRecPtr recptr;
|
|
|
|
|
|
|
|
xlrec.new_bucket = maxbucket;
|
|
|
|
xlrec.old_bucket_flag = oopaque->hasho_flag;
|
|
|
|
xlrec.new_bucket_flag = nopaque->hasho_flag;
|
|
|
|
xlrec.flags = 0;
|
|
|
|
|
|
|
|
XLogBeginInsert();
|
|
|
|
|
|
|
|
XLogRegisterBuffer(0, buf_oblkno, REGBUF_STANDARD);
|
|
|
|
XLogRegisterBuffer(1, buf_nblkno, REGBUF_WILL_INIT);
|
|
|
|
XLogRegisterBuffer(2, metabuf, REGBUF_STANDARD);
|
|
|
|
|
|
|
|
if (metap_update_masks)
|
|
|
|
{
|
|
|
|
xlrec.flags |= XLH_SPLIT_META_UPDATE_MASKS;
|
|
|
|
XLogRegisterBufData(2, (char *) &metap->hashm_lowmask, sizeof(uint32));
|
|
|
|
XLogRegisterBufData(2, (char *) &metap->hashm_highmask, sizeof(uint32));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (metap_update_splitpoint)
|
|
|
|
{
|
|
|
|
xlrec.flags |= XLH_SPLIT_META_UPDATE_SPLITPOINT;
|
|
|
|
XLogRegisterBufData(2, (char *) &metap->hashm_ovflpoint,
|
|
|
|
sizeof(uint32));
|
|
|
|
XLogRegisterBufData(2,
|
Phase 3 of pgindent updates.
Don't move parenthesized lines to the left, even if that means they
flow past the right margin.
By default, BSD indent lines up statement continuation lines that are
within parentheses so that they start just to the right of the preceding
left parenthesis. However, traditionally, if that resulted in the
continuation line extending to the right of the desired right margin,
then indent would push it left just far enough to not overrun the margin,
if it could do so without making the continuation line start to the left of
the current statement indent. That makes for a weird mix of indentations
unless one has been completely rigid about never violating the 80-column
limit.
This behavior has been pretty universally panned by Postgres developers.
Hence, disable it with indent's new -lpl switch, so that parenthesized
lines are always lined up with the preceding left paren.
This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.
Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
2017-06-21 21:35:54 +02:00
|
|
|
(char *) &metap->hashm_spares[metap->hashm_ovflpoint],
|
2017-03-14 18:27:02 +01:00
|
|
|
sizeof(uint32));
|
|
|
|
}
|
|
|
|
|
|
|
|
XLogRegisterData((char *) &xlrec, SizeOfHashSplitAllocPage);
|
|
|
|
|
|
|
|
recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_SPLIT_ALLOCATE_PAGE);
|
|
|
|
|
|
|
|
PageSetLSN(BufferGetPage(buf_oblkno), recptr);
|
|
|
|
PageSetLSN(BufferGetPage(buf_nblkno), recptr);
|
|
|
|
PageSetLSN(BufferGetPage(metabuf), recptr);
|
|
|
|
}
|
|
|
|
|
2017-03-01 10:13:38 +01:00
|
|
|
END_CRIT_SECTION();
|
|
|
|
|
|
|
|
/* drop lock, but keep pin */
|
2016-12-23 13:14:37 +01:00
|
|
|
LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
|
2003-09-05 00:06:27 +02:00
|
|
|
|
1997-09-07 07:04:48 +02:00
|
|
|
/* Relocate records to the new bucket */
|
2015-04-03 22:49:11 +02:00
|
|
|
_hash_splitbucket(rel, metabuf,
|
2015-03-30 22:40:05 +02:00
|
|
|
old_bucket, new_bucket,
|
2017-03-01 10:13:38 +01:00
|
|
|
buf_oblkno, buf_nblkno, NULL,
|
2003-09-05 00:06:27 +02:00
|
|
|
maxbucket, highmask, lowmask);
|
|
|
|
|
2017-08-05 01:33:01 +02:00
|
|
|
/* all done, now release the pins on primary buckets. */
|
|
|
|
_hash_dropbuf(rel, buf_oblkno);
|
|
|
|
_hash_dropbuf(rel, buf_nblkno);
|
2017-03-01 10:13:38 +01:00
|
|
|
|
2003-09-05 00:06:27 +02:00
|
|
|
return;
|
|
|
|
|
|
|
|
/* Here if decide not to split or fail to acquire old bucket lock */
|
|
|
|
fail:
|
|
|
|
|
|
|
|
/* We didn't write the metapage, so just drop lock */
|
2016-12-23 13:14:37 +01:00
|
|
|
LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-11-19 22:33:23 +01:00
|
|
|
/*
|
|
|
|
* _hash_alloc_buckets -- allocate a new splitpoint's worth of bucket pages
|
|
|
|
*
|
|
|
|
* This does not need to initialize the new bucket pages; we'll do that as
|
|
|
|
* each one is used by _hash_expandtable(). But we have to extend the logical
|
2007-01-03 19:11:01 +01:00
|
|
|
* EOF to the end of the splitpoint; this keeps smgr's idea of the EOF in
|
2007-04-19 22:24:04 +02:00
|
|
|
* sync with ours, so that we don't get complaints from smgr.
|
2006-11-19 22:33:23 +01:00
|
|
|
*
|
|
|
|
* We do this by writing a page of zeroes at the end of the splitpoint range.
|
|
|
|
* We expect that the filesystem will ensure that the intervening pages read
|
|
|
|
* as zeroes too. On many filesystems this "hole" will not be allocated
|
|
|
|
* immediately, which means that the index file may end up more fragmented
|
|
|
|
* than if we forced it all to be allocated now; but since we don't scan
|
|
|
|
* hash indexes sequentially anyway, that probably doesn't matter.
|
|
|
|
*
|
|
|
|
* XXX It's annoying that this code is executed with the metapage lock held.
|
2017-03-15 12:21:17 +01:00
|
|
|
* We need to interlock against _hash_addovflpage() adding a new overflow page
|
2006-11-19 22:33:23 +01:00
|
|
|
* concurrently, but it'd likely be better to use LockRelationForExtension
|
|
|
|
* for the purpose. OTOH, adding a splitpoint is a very infrequent operation,
|
|
|
|
* so it may not be worth worrying about.
|
|
|
|
*
|
2017-08-16 06:22:32 +02:00
|
|
|
* Returns true if successful, or false if allocation failed due to
|
2007-04-19 22:24:04 +02:00
|
|
|
* BlockNumber overflow.
|
2006-11-19 22:33:23 +01:00
|
|
|
*/
|
2007-04-19 22:24:04 +02:00
|
|
|
static bool
|
|
|
|
_hash_alloc_buckets(Relation rel, BlockNumber firstblock, uint32 nblocks)
|
2006-11-19 22:33:23 +01:00
|
|
|
{
|
2007-11-15 22:14:46 +01:00
|
|
|
BlockNumber lastblock;
|
2018-09-01 21:27:12 +02:00
|
|
|
PGAlignedBlock zerobuf;
|
2017-03-14 18:27:02 +01:00
|
|
|
Page page;
|
2017-04-05 20:17:23 +02:00
|
|
|
HashPageOpaque ovflopaque;
|
2006-11-19 22:33:23 +01:00
|
|
|
|
|
|
|
lastblock = firstblock + nblocks - 1;
|
|
|
|
|
|
|
|
/*
|
2007-11-15 22:14:46 +01:00
|
|
|
* Check for overflow in block number calculation; if so, we cannot extend
|
|
|
|
* the index anymore.
|
2006-11-19 22:33:23 +01:00
|
|
|
*/
|
|
|
|
if (lastblock < firstblock || lastblock == InvalidBlockNumber)
|
2007-04-19 22:24:04 +02:00
|
|
|
return false;
|
2006-11-19 22:33:23 +01:00
|
|
|
|
2018-09-01 21:27:12 +02:00
|
|
|
page = (Page) zerobuf.data;
|
2017-03-14 18:27:02 +01:00
|
|
|
|
|
|
|
/*
|
2017-03-27 04:15:50 +02:00
|
|
|
* Initialize the page. Just zeroing the page won't work; see
|
2017-05-17 22:31:56 +02:00
|
|
|
* _hash_freeovflpage for similar usage. We take care to make the special
|
|
|
|
* space valid for the benefit of tools such as pageinspect.
|
2017-03-14 18:27:02 +01:00
|
|
|
*/
|
|
|
|
_hash_pageinit(page, BLCKSZ);
|
|
|
|
|
2017-04-05 20:17:23 +02:00
|
|
|
ovflopaque = (HashPageOpaque) PageGetSpecialPointer(page);
|
|
|
|
|
|
|
|
ovflopaque->hasho_prevblkno = InvalidBlockNumber;
|
|
|
|
ovflopaque->hasho_nextblkno = InvalidBlockNumber;
|
|
|
|
ovflopaque->hasho_bucket = -1;
|
|
|
|
ovflopaque->hasho_flag = LH_UNUSED_PAGE;
|
|
|
|
ovflopaque->hasho_page_id = HASHO_PAGE_ID;
|
|
|
|
|
2017-03-14 18:27:02 +01:00
|
|
|
if (RelationNeedsWAL(rel))
|
|
|
|
log_newpage(&rel->rd_node,
|
|
|
|
MAIN_FORKNUM,
|
|
|
|
lastblock,
|
2018-09-01 21:27:12 +02:00
|
|
|
zerobuf.data,
|
2017-03-14 18:27:02 +01:00
|
|
|
true);
|
2006-11-19 22:33:23 +01:00
|
|
|
|
2007-04-19 22:24:04 +02:00
|
|
|
RelationOpenSmgr(rel);
|
2018-09-04 04:43:35 +02:00
|
|
|
PageSetChecksumInplace(page, lastblock);
|
2018-09-01 21:27:12 +02:00
|
|
|
smgrextend(rel->rd_smgr, MAIN_FORKNUM, lastblock, zerobuf.data, false);
|
2006-11-19 22:33:23 +01:00
|
|
|
|
2007-04-19 22:24:04 +02:00
|
|
|
return true;
|
2006-11-19 22:33:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
1996-07-09 08:22:35 +02:00
|
|
|
/*
|
2003-09-01 22:26:34 +02:00
|
|
|
* _hash_splitbucket -- split 'obucket' into 'obucket' and 'nbucket'
|
1996-07-09 08:22:35 +02:00
|
|
|
*
|
2017-03-01 10:13:38 +01:00
|
|
|
* This routine is used to partition the tuples between old and new bucket and
|
|
|
|
* is used to finish the incomplete split operations. To finish the previously
|
|
|
|
* interrupted split operation, the caller needs to fill htab. If htab is set,
|
|
|
|
* then we skip the movement of tuples that exists in htab, otherwise NULL
|
|
|
|
* value of htab indicates movement of all the tuples that belong to the new
|
|
|
|
* bucket.
|
|
|
|
*
|
2003-09-01 22:26:34 +02:00
|
|
|
* We are splitting a bucket that consists of a base bucket page and zero
|
|
|
|
* or more overflow (bucket chain) pages. We must relocate tuples that
|
2017-03-01 10:13:38 +01:00
|
|
|
* belong in the new bucket.
|
2003-09-05 00:06:27 +02:00
|
|
|
*
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
* The caller must hold cleanup locks on both buckets to ensure that
|
2003-09-05 00:06:27 +02:00
|
|
|
* no one else is trying to access them (see README).
|
|
|
|
*
|
|
|
|
* The caller must hold a pin, but no lock, on the metapage buffer.
|
|
|
|
* The buffer is returned in the same state. (The metapage is only
|
|
|
|
* touched if it becomes necessary to add or remove overflow pages.)
|
2015-03-30 22:40:05 +02:00
|
|
|
*
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
* Split needs to retain pin on primary bucket pages of both old and new
|
|
|
|
* buckets till end of operation. This is to prevent vacuum from starting
|
|
|
|
* while a split is in progress.
|
|
|
|
*
|
2015-03-30 22:40:05 +02:00
|
|
|
* In addition, the caller must have created the new bucket's base page,
|
2017-08-05 01:33:01 +02:00
|
|
|
* which is passed in buffer nbuf, pinned and write-locked. The lock will be
|
|
|
|
* released here and pin must be released by the caller. (The API is set up
|
|
|
|
* this way because we must do _hash_getnewbuf() before releasing the metapage
|
|
|
|
* write lock. So instead of passing the new bucket's start block number, we
|
|
|
|
* pass an actual buffer.)
|
1996-07-09 08:22:35 +02:00
|
|
|
*/
|
|
|
|
static void
|
2003-09-01 22:26:34 +02:00
|
|
|
_hash_splitbucket(Relation rel,
|
|
|
|
Buffer metabuf,
|
|
|
|
Bucket obucket,
|
2003-09-05 00:06:27 +02:00
|
|
|
Bucket nbucket,
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
Buffer obuf,
|
2015-04-03 22:49:11 +02:00
|
|
|
Buffer nbuf,
|
2017-03-01 10:13:38 +01:00
|
|
|
HTAB *htab,
|
2003-09-05 00:06:27 +02:00
|
|
|
uint32 maxbucket,
|
|
|
|
uint32 highmask,
|
|
|
|
uint32 lowmask)
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
{
|
|
|
|
Buffer bucket_obuf;
|
|
|
|
Buffer bucket_nbuf;
|
|
|
|
Page opage;
|
|
|
|
Page npage;
|
|
|
|
HashPageOpaque oopaque;
|
|
|
|
HashPageOpaque nopaque;
|
2017-03-14 18:27:02 +01:00
|
|
|
OffsetNumber itup_offsets[MaxIndexTuplesPerPage];
|
|
|
|
IndexTuple itups[MaxIndexTuplesPerPage];
|
|
|
|
Size all_tups_size = 0;
|
|
|
|
int i;
|
|
|
|
uint16 nitups = 0;
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
|
|
|
|
bucket_obuf = obuf;
|
|
|
|
opage = BufferGetPage(obuf);
|
|
|
|
oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
|
|
|
|
|
|
|
|
bucket_nbuf = nbuf;
|
|
|
|
npage = BufferGetPage(nbuf);
|
|
|
|
nopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
|
|
|
|
|
2018-04-07 15:59:14 +02:00
|
|
|
/* Copy the predicate locks from old bucket to new bucket. */
|
|
|
|
PredicateLockPageSplit(rel,
|
|
|
|
BufferGetBlockNumber(bucket_obuf),
|
|
|
|
BufferGetBlockNumber(bucket_nbuf));
|
|
|
|
|
1997-09-07 07:04:48 +02:00
|
|
|
/*
|
2005-10-15 04:49:52 +02:00
|
|
|
* Partition the tuples in the old bucket between the old bucket and the
|
|
|
|
* new bucket, advancing along the old bucket's overflow bucket chain and
|
2010-02-26 03:01:40 +01:00
|
|
|
* adding overflow pages to the new bucket as needed. Outer loop iterates
|
|
|
|
* once per page in old bucket.
|
1997-09-07 07:04:48 +02:00
|
|
|
*/
|
|
|
|
for (;;)
|
|
|
|
{
|
2015-04-03 22:49:11 +02:00
|
|
|
BlockNumber oblkno;
|
2009-11-01 22:25:25 +01:00
|
|
|
OffsetNumber ooffnum;
|
|
|
|
OffsetNumber omaxoffnum;
|
|
|
|
|
|
|
|
/* Scan each tuple in old page */
|
|
|
|
omaxoffnum = PageGetMaxOffsetNumber(opage);
|
|
|
|
for (ooffnum = FirstOffsetNumber;
|
|
|
|
ooffnum <= omaxoffnum;
|
|
|
|
ooffnum = OffsetNumberNext(ooffnum))
|
1997-09-07 07:04:48 +02:00
|
|
|
{
|
2009-11-01 22:25:25 +01:00
|
|
|
IndexTuple itup;
|
|
|
|
Size itemsz;
|
|
|
|
Bucket bucket;
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
bool found = false;
|
2004-08-29 07:07:03 +02:00
|
|
|
|
2016-11-08 16:47:52 +01:00
|
|
|
/* skip dead tuples */
|
|
|
|
if (ItemIdIsDead(PageGetItemId(opage, ooffnum)))
|
|
|
|
continue;
|
|
|
|
|
2003-09-05 00:06:27 +02:00
|
|
|
/*
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
* Before inserting a tuple, probe the hash table containing TIDs
|
|
|
|
* of tuples belonging to new bucket, if we find a match, then
|
|
|
|
* skip that tuple, else fetch the item's hash key (conveniently
|
|
|
|
* stored in the item) and determine which bucket it now belongs
|
|
|
|
* in.
|
2003-09-05 00:06:27 +02:00
|
|
|
*/
|
2009-11-01 22:25:25 +01:00
|
|
|
itup = (IndexTuple) PageGetItem(opage,
|
|
|
|
PageGetItemId(opage, ooffnum));
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
|
|
|
|
if (htab)
|
|
|
|
(void) hash_search(htab, &itup->t_tid, HASH_FIND, &found);
|
|
|
|
|
|
|
|
if (found)
|
|
|
|
continue;
|
|
|
|
|
2009-11-01 22:25:25 +01:00
|
|
|
bucket = _hash_hashkey2bucket(_hash_get_indextuple_hashkey(itup),
|
|
|
|
maxbucket, highmask, lowmask);
|
2003-09-05 00:06:27 +02:00
|
|
|
|
2009-11-01 22:25:25 +01:00
|
|
|
if (bucket == nbucket)
|
|
|
|
{
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
IndexTuple new_itup;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* make a copy of index tuple as we have to scribble on it.
|
|
|
|
*/
|
|
|
|
new_itup = CopyIndexTuple(itup);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* mark the index tuple as moved by split, such tuples are
|
|
|
|
* skipped by scan if there is split in progress for a bucket.
|
|
|
|
*/
|
|
|
|
new_itup->t_info |= INDEX_MOVED_BY_SPLIT_MASK;
|
|
|
|
|
2009-11-01 22:25:25 +01:00
|
|
|
/*
|
|
|
|
* insert the tuple into the new bucket. if it doesn't fit on
|
|
|
|
* the current page in the new bucket, we must allocate a new
|
|
|
|
* overflow page and place the tuple on that page instead.
|
|
|
|
*/
|
2018-03-01 01:25:54 +01:00
|
|
|
itemsz = IndexTupleSize(new_itup);
|
2009-11-01 22:25:25 +01:00
|
|
|
itemsz = MAXALIGN(itemsz);
|
|
|
|
|
2017-03-14 18:27:02 +01:00
|
|
|
if (PageGetFreeSpaceForMultipleTuples(npage, nitups + 1) < (all_tups_size + itemsz))
|
2009-11-01 22:25:25 +01:00
|
|
|
{
|
2017-03-14 18:27:02 +01:00
|
|
|
/*
|
|
|
|
* Change the shared buffer state in critical section,
|
|
|
|
* otherwise any error could make it unrecoverable.
|
|
|
|
*/
|
|
|
|
START_CRIT_SECTION();
|
|
|
|
|
|
|
|
_hash_pgaddmultitup(rel, nbuf, itups, itup_offsets, nitups);
|
2016-12-23 13:14:37 +01:00
|
|
|
MarkBufferDirty(nbuf);
|
2017-03-14 18:27:02 +01:00
|
|
|
/* log the split operation before releasing the lock */
|
|
|
|
log_split_page(rel, nbuf);
|
|
|
|
|
|
|
|
END_CRIT_SECTION();
|
|
|
|
|
2017-03-01 10:13:38 +01:00
|
|
|
/* drop lock, but keep pin */
|
2016-12-23 13:14:37 +01:00
|
|
|
LockBuffer(nbuf, BUFFER_LOCK_UNLOCK);
|
2017-03-14 18:27:02 +01:00
|
|
|
|
|
|
|
/* be tidy */
|
|
|
|
for (i = 0; i < nitups; i++)
|
|
|
|
pfree(itups[i]);
|
|
|
|
nitups = 0;
|
|
|
|
all_tups_size = 0;
|
|
|
|
|
2009-11-01 22:25:25 +01:00
|
|
|
/* chain to a new overflow page */
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
nbuf = _hash_addovflpage(rel, metabuf, nbuf, (nbuf == bucket_nbuf) ? true : false);
|
2016-04-20 15:31:19 +02:00
|
|
|
npage = BufferGetPage(nbuf);
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
nopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
|
2009-11-01 22:25:25 +01:00
|
|
|
}
|
|
|
|
|
2017-03-14 18:27:02 +01:00
|
|
|
itups[nitups++] = new_itup;
|
|
|
|
all_tups_size += itemsz;
|
2009-11-01 22:25:25 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* the tuple stays on this page, so nothing to do.
|
|
|
|
*/
|
|
|
|
Assert(bucket == obucket);
|
|
|
|
}
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
1997-09-07 07:04:48 +02:00
|
|
|
|
2009-11-01 22:25:25 +01:00
|
|
|
oblkno = oopaque->hasho_nextblkno;
|
|
|
|
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
/* retain the pin on the old primary bucket */
|
|
|
|
if (obuf == bucket_obuf)
|
2016-12-23 13:14:37 +01:00
|
|
|
LockBuffer(obuf, BUFFER_LOCK_UNLOCK);
|
1997-09-07 07:04:48 +02:00
|
|
|
else
|
2009-11-01 22:25:25 +01:00
|
|
|
_hash_relbuf(rel, obuf);
|
|
|
|
|
|
|
|
/* Exit loop if no more overflow pages in old bucket */
|
|
|
|
if (!BlockNumberIsValid(oblkno))
|
2017-03-01 10:13:38 +01:00
|
|
|
{
|
2017-03-14 18:27:02 +01:00
|
|
|
/*
|
|
|
|
* Change the shared buffer state in critical section, otherwise
|
|
|
|
* any error could make it unrecoverable.
|
|
|
|
*/
|
|
|
|
START_CRIT_SECTION();
|
|
|
|
|
|
|
|
_hash_pgaddmultitup(rel, nbuf, itups, itup_offsets, nitups);
|
2017-03-01 10:13:38 +01:00
|
|
|
MarkBufferDirty(nbuf);
|
2017-03-14 18:27:02 +01:00
|
|
|
/* log the split operation before releasing the lock */
|
|
|
|
log_split_page(rel, nbuf);
|
|
|
|
|
|
|
|
END_CRIT_SECTION();
|
|
|
|
|
2017-03-01 10:13:38 +01:00
|
|
|
if (nbuf == bucket_nbuf)
|
|
|
|
LockBuffer(nbuf, BUFFER_LOCK_UNLOCK);
|
|
|
|
else
|
|
|
|
_hash_relbuf(rel, nbuf);
|
2017-03-14 18:27:02 +01:00
|
|
|
|
|
|
|
/* be tidy */
|
|
|
|
for (i = 0; i < nitups; i++)
|
|
|
|
pfree(itups[i]);
|
2009-11-01 22:25:25 +01:00
|
|
|
break;
|
2017-03-01 10:13:38 +01:00
|
|
|
}
|
2009-11-01 22:25:25 +01:00
|
|
|
|
|
|
|
/* Else, advance to next old page */
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
obuf = _hash_getbuf(rel, oblkno, HASH_READ, LH_OVERFLOW_PAGE);
|
2016-04-20 15:31:19 +02:00
|
|
|
opage = BufferGetPage(obuf);
|
2009-11-01 22:25:25 +01:00
|
|
|
oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
2003-09-05 00:06:27 +02:00
|
|
|
|
|
|
|
/*
|
2005-10-15 04:49:52 +02:00
|
|
|
* We're at the end of the old bucket chain, so we're done partitioning
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
* the tuples. Mark the old and new buckets to indicate split is
|
|
|
|
* finished.
|
|
|
|
*
|
|
|
|
* To avoid deadlocks due to locking order of buckets, first lock the old
|
|
|
|
* bucket and then the new bucket.
|
2003-09-05 00:06:27 +02:00
|
|
|
*/
|
2016-12-23 13:14:37 +01:00
|
|
|
LockBuffer(bucket_obuf, BUFFER_LOCK_EXCLUSIVE);
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
opage = BufferGetPage(bucket_obuf);
|
|
|
|
oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
|
|
|
|
|
2016-12-23 13:14:37 +01:00
|
|
|
LockBuffer(bucket_nbuf, BUFFER_LOCK_EXCLUSIVE);
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
npage = BufferGetPage(bucket_nbuf);
|
|
|
|
nopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
|
|
|
|
|
2017-03-14 18:27:02 +01:00
|
|
|
START_CRIT_SECTION();
|
|
|
|
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
oopaque->hasho_flag &= ~LH_BUCKET_BEING_SPLIT;
|
|
|
|
nopaque->hasho_flag &= ~LH_BUCKET_BEING_POPULATED;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* After the split is finished, mark the old bucket to indicate that it
|
2017-08-05 01:33:01 +02:00
|
|
|
* contains deletable tuples. We will clear split-cleanup flag after
|
|
|
|
* deleting such tuples either at the end of split or at the next split
|
|
|
|
* from old bucket or at the time of vacuum.
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
*/
|
|
|
|
oopaque->hasho_flag |= LH_BUCKET_NEEDS_SPLIT_CLEANUP;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* now write the buffers, here we don't release the locks as caller is
|
|
|
|
* responsible to release locks.
|
|
|
|
*/
|
|
|
|
MarkBufferDirty(bucket_obuf);
|
|
|
|
MarkBufferDirty(bucket_nbuf);
|
2017-03-14 18:27:02 +01:00
|
|
|
|
|
|
|
if (RelationNeedsWAL(rel))
|
|
|
|
{
|
|
|
|
XLogRecPtr recptr;
|
|
|
|
xl_hash_split_complete xlrec;
|
|
|
|
|
|
|
|
xlrec.old_bucket_flag = oopaque->hasho_flag;
|
|
|
|
xlrec.new_bucket_flag = nopaque->hasho_flag;
|
|
|
|
|
|
|
|
XLogBeginInsert();
|
|
|
|
|
|
|
|
XLogRegisterData((char *) &xlrec, SizeOfHashSplitComplete);
|
|
|
|
|
|
|
|
XLogRegisterBuffer(0, bucket_obuf, REGBUF_STANDARD);
|
|
|
|
XLogRegisterBuffer(1, bucket_nbuf, REGBUF_STANDARD);
|
|
|
|
|
|
|
|
recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_SPLIT_COMPLETE);
|
|
|
|
|
|
|
|
PageSetLSN(BufferGetPage(bucket_obuf), recptr);
|
|
|
|
PageSetLSN(BufferGetPage(bucket_nbuf), recptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
END_CRIT_SECTION();
|
2017-08-05 01:33:01 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If possible, clean up the old bucket. We might not be able to do this
|
|
|
|
* if someone else has a pin on it, but if not then we can go ahead. This
|
2017-08-14 23:29:33 +02:00
|
|
|
* isn't absolutely necessary, but it reduces bloat; if we don't do it
|
|
|
|
* now, VACUUM will do it eventually, but maybe not until new overflow
|
|
|
|
* pages have been allocated. Note that there's no need to clean up the
|
|
|
|
* new bucket.
|
2017-08-05 01:33:01 +02:00
|
|
|
*/
|
|
|
|
if (IsBufferCleanupOK(bucket_obuf))
|
|
|
|
{
|
|
|
|
LockBuffer(bucket_nbuf, BUFFER_LOCK_UNLOCK);
|
|
|
|
hashbucketcleanup(rel, obucket, bucket_obuf,
|
|
|
|
BufferGetBlockNumber(bucket_obuf), NULL,
|
|
|
|
maxbucket, highmask, lowmask, NULL, NULL, true,
|
|
|
|
NULL, NULL);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
LockBuffer(bucket_nbuf, BUFFER_LOCK_UNLOCK);
|
|
|
|
LockBuffer(bucket_obuf, BUFFER_LOCK_UNLOCK);
|
|
|
|
}
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* _hash_finish_split() -- Finish the previously interrupted split operation
|
|
|
|
*
|
|
|
|
* To complete the split operation, we form the hash table of TIDs in new
|
|
|
|
* bucket which is then used by split operation to skip tuples that are
|
|
|
|
* already moved before the split operation was previously interrupted.
|
|
|
|
*
|
|
|
|
* The caller must hold a pin, but no lock, on the metapage and old bucket's
|
2017-02-06 10:33:58 +01:00
|
|
|
* primary page buffer. The buffers are returned in the same state. (The
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
* metapage is only touched if it becomes necessary to add or remove overflow
|
|
|
|
* pages.)
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
_hash_finish_split(Relation rel, Buffer metabuf, Buffer obuf, Bucket obucket,
|
|
|
|
uint32 maxbucket, uint32 highmask, uint32 lowmask)
|
|
|
|
{
|
|
|
|
HASHCTL hash_ctl;
|
|
|
|
HTAB *tidhtab;
|
|
|
|
Buffer bucket_nbuf = InvalidBuffer;
|
|
|
|
Buffer nbuf;
|
|
|
|
Page npage;
|
|
|
|
BlockNumber nblkno;
|
|
|
|
BlockNumber bucket_nblkno;
|
|
|
|
HashPageOpaque npageopaque;
|
|
|
|
Bucket nbucket;
|
|
|
|
bool found;
|
|
|
|
|
|
|
|
/* Initialize hash tables used to track TIDs */
|
|
|
|
memset(&hash_ctl, 0, sizeof(hash_ctl));
|
|
|
|
hash_ctl.keysize = sizeof(ItemPointerData);
|
|
|
|
hash_ctl.entrysize = sizeof(ItemPointerData);
|
|
|
|
hash_ctl.hcxt = CurrentMemoryContext;
|
|
|
|
|
|
|
|
tidhtab =
|
|
|
|
hash_create("bucket ctids",
|
|
|
|
256, /* arbitrary initial size */
|
|
|
|
&hash_ctl,
|
|
|
|
HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
|
|
|
|
|
|
|
|
bucket_nblkno = nblkno = _hash_get_newblock_from_oldbucket(rel, obucket);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Scan the new bucket and build hash table of TIDs
|
|
|
|
*/
|
|
|
|
for (;;)
|
|
|
|
{
|
|
|
|
OffsetNumber noffnum;
|
|
|
|
OffsetNumber nmaxoffnum;
|
|
|
|
|
|
|
|
nbuf = _hash_getbuf(rel, nblkno, HASH_READ,
|
|
|
|
LH_BUCKET_PAGE | LH_OVERFLOW_PAGE);
|
|
|
|
|
|
|
|
/* remember the primary bucket buffer to acquire cleanup lock on it. */
|
|
|
|
if (nblkno == bucket_nblkno)
|
|
|
|
bucket_nbuf = nbuf;
|
|
|
|
|
|
|
|
npage = BufferGetPage(nbuf);
|
|
|
|
npageopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
|
|
|
|
|
|
|
|
/* Scan each tuple in new page */
|
|
|
|
nmaxoffnum = PageGetMaxOffsetNumber(npage);
|
|
|
|
for (noffnum = FirstOffsetNumber;
|
|
|
|
noffnum <= nmaxoffnum;
|
|
|
|
noffnum = OffsetNumberNext(noffnum))
|
|
|
|
{
|
|
|
|
IndexTuple itup;
|
|
|
|
|
|
|
|
/* Fetch the item's TID and insert it in hash table. */
|
|
|
|
itup = (IndexTuple) PageGetItem(npage,
|
|
|
|
PageGetItemId(npage, noffnum));
|
|
|
|
|
|
|
|
(void) hash_search(tidhtab, &itup->t_tid, HASH_ENTER, &found);
|
|
|
|
|
|
|
|
Assert(!found);
|
|
|
|
}
|
|
|
|
|
|
|
|
nblkno = npageopaque->hasho_nextblkno;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* release our write lock without modifying buffer and ensure to
|
|
|
|
* retain the pin on primary bucket.
|
|
|
|
*/
|
|
|
|
if (nbuf == bucket_nbuf)
|
2016-12-23 13:14:37 +01:00
|
|
|
LockBuffer(nbuf, BUFFER_LOCK_UNLOCK);
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
else
|
|
|
|
_hash_relbuf(rel, nbuf);
|
|
|
|
|
|
|
|
/* Exit loop if no more overflow pages in new bucket */
|
|
|
|
if (!BlockNumberIsValid(nblkno))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Conditionally get the cleanup lock on old and new buckets to perform
|
|
|
|
* the split operation. If we don't get the cleanup locks, silently give
|
|
|
|
* up and next insertion on old bucket will try again to complete the
|
|
|
|
* split.
|
|
|
|
*/
|
|
|
|
if (!ConditionalLockBufferForCleanup(obuf))
|
|
|
|
{
|
|
|
|
hash_destroy(tidhtab);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (!ConditionalLockBufferForCleanup(bucket_nbuf))
|
|
|
|
{
|
2016-12-23 13:14:37 +01:00
|
|
|
LockBuffer(obuf, BUFFER_LOCK_UNLOCK);
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
hash_destroy(tidhtab);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
npage = BufferGetPage(bucket_nbuf);
|
|
|
|
npageopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
|
|
|
|
nbucket = npageopaque->hasho_bucket;
|
|
|
|
|
2017-03-01 10:13:38 +01:00
|
|
|
_hash_splitbucket(rel, metabuf, obucket,
|
|
|
|
nbucket, obuf, bucket_nbuf, tidhtab,
|
|
|
|
maxbucket, highmask, lowmask);
|
2003-09-05 00:06:27 +02:00
|
|
|
|
2017-08-05 01:33:01 +02:00
|
|
|
_hash_dropbuf(rel, bucket_nbuf);
|
Improve hash index bucket split behavior.
Previously, the right to split a bucket was represented by a
heavyweight lock on the page number of the primary bucket page.
Unfortunately, this meant that every scan needed to take a heavyweight
lock on that bucket also, which was bad for concurrency. Instead, use
a cleanup lock on the primary bucket page to indicate the right to
begin a split, so that scans only need to retain a pin on that page,
which is they would have to acquire anyway, and which is also much
cheaper.
In addition to reducing the locking cost, this also avoids locking out
scans and inserts for the entire lifetime of the split: while the new
bucket is being populated with copies of the appropriate tuples from
the old bucket, scans and inserts can happen in parallel. There are
minor concurrency improvements for vacuum operations as well, though
the situation there is still far from ideal.
This patch also removes the unworldly assumption that a split will
never be interrupted. With the new code, a split is done in a series
of small steps and the system can pick up where it left off if it is
interrupted prior to completion. While this patch does not itself add
write-ahead logging for hash indexes, it is clearly a necessary first
step, since one of the things that could interrupt a split is the
removal of electrical power from the machine performing it.
Amit Kapila. I wrote the original design on which this patch is
based, and did a good bit of work on the comments and README through
multiple rounds of review, but all of the code is Amit's. Also
reviewed by Jesper Pedersen, Jeff Janes, and others.
Discussion: http://postgr.es/m/CAA4eK1LfzcZYxLoXS874Ad0+S-ZM60U9bwcyiUZx9mHZ-KCWhw@mail.gmail.com
2016-11-30 21:39:21 +01:00
|
|
|
hash_destroy(tidhtab);
|
1996-07-09 08:22:35 +02:00
|
|
|
}
|
Cache hash index's metapage in rel->rd_amcache.
This avoids a very significant amount of buffer manager traffic and
contention when scanning hash indexes, because it's no longer
necessary to lock and pin the metapage for every scan. We do need
some way of figuring out when the cache is too stale to use any more,
so that when we lock the primary bucket page to which the cached
metapage points us, we can tell whether a split has occurred since we
cached the metapage data. To do that, we use the hash_prevblkno field
in the primary bucket page, which would otherwise always be set to
InvalidBuffer.
This patch contains code so that it will continue working (although
less efficiently) with hash indexes built before this change, but
perhaps we should consider bumping the hash version and ripping out
the compatibility code. That decision can be made later, though.
Mithun Cy, reviewed by Jesper Pedersen, Amit Kapila, and by me.
Before committing, I made a number of cosmetic changes to the last
posted version of the patch, adjusted _hash_getcachedmetap to be more
careful about order of operation, and made some necessary updates to
the pageinspect documentation and regression tests.
2017-02-07 18:24:25 +01:00
|
|
|
|
2017-03-14 18:27:02 +01:00
|
|
|
/*
|
|
|
|
* log_split_page() -- Log the split operation
|
|
|
|
*
|
|
|
|
* We log the split operation when the new page in new bucket gets full,
|
|
|
|
* so we log the entire page.
|
|
|
|
*
|
|
|
|
* 'buf' must be locked by the caller which is also responsible for unlocking
|
|
|
|
* it.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
log_split_page(Relation rel, Buffer buf)
|
|
|
|
{
|
|
|
|
if (RelationNeedsWAL(rel))
|
|
|
|
{
|
|
|
|
XLogRecPtr recptr;
|
|
|
|
|
|
|
|
XLogBeginInsert();
|
|
|
|
|
|
|
|
XLogRegisterBuffer(0, buf, REGBUF_FORCE_IMAGE | REGBUF_STANDARD);
|
|
|
|
|
|
|
|
recptr = XLogInsert(RM_HASH_ID, XLOG_HASH_SPLIT_PAGE);
|
|
|
|
|
|
|
|
PageSetLSN(BufferGetPage(buf), recptr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Cache hash index's metapage in rel->rd_amcache.
This avoids a very significant amount of buffer manager traffic and
contention when scanning hash indexes, because it's no longer
necessary to lock and pin the metapage for every scan. We do need
some way of figuring out when the cache is too stale to use any more,
so that when we lock the primary bucket page to which the cached
metapage points us, we can tell whether a split has occurred since we
cached the metapage data. To do that, we use the hash_prevblkno field
in the primary bucket page, which would otherwise always be set to
InvalidBuffer.
This patch contains code so that it will continue working (although
less efficiently) with hash indexes built before this change, but
perhaps we should consider bumping the hash version and ripping out
the compatibility code. That decision can be made later, though.
Mithun Cy, reviewed by Jesper Pedersen, Amit Kapila, and by me.
Before committing, I made a number of cosmetic changes to the last
posted version of the patch, adjusted _hash_getcachedmetap to be more
careful about order of operation, and made some necessary updates to
the pageinspect documentation and regression tests.
2017-02-07 18:24:25 +01:00
|
|
|
/*
|
|
|
|
* _hash_getcachedmetap() -- Returns cached metapage data.
|
|
|
|
*
|
|
|
|
* If metabuf is not InvalidBuffer, caller must hold a pin, but no lock, on
|
2017-05-17 22:31:56 +02:00
|
|
|
* the metapage. If not set, we'll set it before returning if we have to
|
|
|
|
* refresh the cache, and return with a pin but no lock on it; caller is
|
|
|
|
* responsible for releasing the pin.
|
Cache hash index's metapage in rel->rd_amcache.
This avoids a very significant amount of buffer manager traffic and
contention when scanning hash indexes, because it's no longer
necessary to lock and pin the metapage for every scan. We do need
some way of figuring out when the cache is too stale to use any more,
so that when we lock the primary bucket page to which the cached
metapage points us, we can tell whether a split has occurred since we
cached the metapage data. To do that, we use the hash_prevblkno field
in the primary bucket page, which would otherwise always be set to
InvalidBuffer.
This patch contains code so that it will continue working (although
less efficiently) with hash indexes built before this change, but
perhaps we should consider bumping the hash version and ripping out
the compatibility code. That decision can be made later, though.
Mithun Cy, reviewed by Jesper Pedersen, Amit Kapila, and by me.
Before committing, I made a number of cosmetic changes to the last
posted version of the patch, adjusted _hash_getcachedmetap to be more
careful about order of operation, and made some necessary updates to
the pageinspect documentation and regression tests.
2017-02-07 18:24:25 +01:00
|
|
|
*
|
2017-05-17 22:31:56 +02:00
|
|
|
* We refresh the cache if it's not initialized yet or force_refresh is true.
|
Cache hash index's metapage in rel->rd_amcache.
This avoids a very significant amount of buffer manager traffic and
contention when scanning hash indexes, because it's no longer
necessary to lock and pin the metapage for every scan. We do need
some way of figuring out when the cache is too stale to use any more,
so that when we lock the primary bucket page to which the cached
metapage points us, we can tell whether a split has occurred since we
cached the metapage data. To do that, we use the hash_prevblkno field
in the primary bucket page, which would otherwise always be set to
InvalidBuffer.
This patch contains code so that it will continue working (although
less efficiently) with hash indexes built before this change, but
perhaps we should consider bumping the hash version and ripping out
the compatibility code. That decision can be made later, though.
Mithun Cy, reviewed by Jesper Pedersen, Amit Kapila, and by me.
Before committing, I made a number of cosmetic changes to the last
posted version of the patch, adjusted _hash_getcachedmetap to be more
careful about order of operation, and made some necessary updates to
the pageinspect documentation and regression tests.
2017-02-07 18:24:25 +01:00
|
|
|
*/
|
|
|
|
HashMetaPage
|
|
|
|
_hash_getcachedmetap(Relation rel, Buffer *metabuf, bool force_refresh)
|
|
|
|
{
|
|
|
|
Page page;
|
|
|
|
|
|
|
|
Assert(metabuf);
|
|
|
|
if (force_refresh || rel->rd_amcache == NULL)
|
|
|
|
{
|
2017-05-17 22:31:56 +02:00
|
|
|
char *cache = NULL;
|
Cache hash index's metapage in rel->rd_amcache.
This avoids a very significant amount of buffer manager traffic and
contention when scanning hash indexes, because it's no longer
necessary to lock and pin the metapage for every scan. We do need
some way of figuring out when the cache is too stale to use any more,
so that when we lock the primary bucket page to which the cached
metapage points us, we can tell whether a split has occurred since we
cached the metapage data. To do that, we use the hash_prevblkno field
in the primary bucket page, which would otherwise always be set to
InvalidBuffer.
This patch contains code so that it will continue working (although
less efficiently) with hash indexes built before this change, but
perhaps we should consider bumping the hash version and ripping out
the compatibility code. That decision can be made later, though.
Mithun Cy, reviewed by Jesper Pedersen, Amit Kapila, and by me.
Before committing, I made a number of cosmetic changes to the last
posted version of the patch, adjusted _hash_getcachedmetap to be more
careful about order of operation, and made some necessary updates to
the pageinspect documentation and regression tests.
2017-02-07 18:24:25 +01:00
|
|
|
|
|
|
|
/*
|
2017-05-17 22:31:56 +02:00
|
|
|
* It's important that we don't set rd_amcache to an invalid value.
|
|
|
|
* Either MemoryContextAlloc or _hash_getbuf could fail, so don't
|
|
|
|
* install a pointer to the newly-allocated storage in the actual
|
2019-08-13 06:53:41 +02:00
|
|
|
* relcache entry until both have succeeded.
|
Cache hash index's metapage in rel->rd_amcache.
This avoids a very significant amount of buffer manager traffic and
contention when scanning hash indexes, because it's no longer
necessary to lock and pin the metapage for every scan. We do need
some way of figuring out when the cache is too stale to use any more,
so that when we lock the primary bucket page to which the cached
metapage points us, we can tell whether a split has occurred since we
cached the metapage data. To do that, we use the hash_prevblkno field
in the primary bucket page, which would otherwise always be set to
InvalidBuffer.
This patch contains code so that it will continue working (although
less efficiently) with hash indexes built before this change, but
perhaps we should consider bumping the hash version and ripping out
the compatibility code. That decision can be made later, though.
Mithun Cy, reviewed by Jesper Pedersen, Amit Kapila, and by me.
Before committing, I made a number of cosmetic changes to the last
posted version of the patch, adjusted _hash_getcachedmetap to be more
careful about order of operation, and made some necessary updates to
the pageinspect documentation and regression tests.
2017-02-07 18:24:25 +01:00
|
|
|
*/
|
|
|
|
if (rel->rd_amcache == NULL)
|
|
|
|
cache = MemoryContextAlloc(rel->rd_indexcxt,
|
|
|
|
sizeof(HashMetaPageData));
|
|
|
|
|
|
|
|
/* Read the metapage. */
|
|
|
|
if (BufferIsValid(*metabuf))
|
|
|
|
LockBuffer(*metabuf, BUFFER_LOCK_SHARE);
|
|
|
|
else
|
|
|
|
*metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ,
|
|
|
|
LH_META_PAGE);
|
|
|
|
page = BufferGetPage(*metabuf);
|
|
|
|
|
|
|
|
/* Populate the cache. */
|
|
|
|
if (rel->rd_amcache == NULL)
|
|
|
|
rel->rd_amcache = cache;
|
|
|
|
memcpy(rel->rd_amcache, HashPageGetMeta(page),
|
|
|
|
sizeof(HashMetaPageData));
|
|
|
|
|
|
|
|
/* Release metapage lock, but keep the pin. */
|
|
|
|
LockBuffer(*metabuf, BUFFER_LOCK_UNLOCK);
|
|
|
|
}
|
|
|
|
|
|
|
|
return (HashMetaPage) rel->rd_amcache;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* _hash_getbucketbuf_from_hashkey() -- Get the bucket's buffer for the given
|
|
|
|
* hashkey.
|
|
|
|
*
|
|
|
|
* Bucket pages do not move or get removed once they are allocated. This give
|
|
|
|
* us an opportunity to use the previously saved metapage contents to reach
|
|
|
|
* the target bucket buffer, instead of reading from the metapage every time.
|
|
|
|
* This saves one buffer access every time we want to reach the target bucket
|
2017-05-17 22:31:56 +02:00
|
|
|
* buffer, which is very helpful savings in bufmgr traffic and contention.
|
Cache hash index's metapage in rel->rd_amcache.
This avoids a very significant amount of buffer manager traffic and
contention when scanning hash indexes, because it's no longer
necessary to lock and pin the metapage for every scan. We do need
some way of figuring out when the cache is too stale to use any more,
so that when we lock the primary bucket page to which the cached
metapage points us, we can tell whether a split has occurred since we
cached the metapage data. To do that, we use the hash_prevblkno field
in the primary bucket page, which would otherwise always be set to
InvalidBuffer.
This patch contains code so that it will continue working (although
less efficiently) with hash indexes built before this change, but
perhaps we should consider bumping the hash version and ripping out
the compatibility code. That decision can be made later, though.
Mithun Cy, reviewed by Jesper Pedersen, Amit Kapila, and by me.
Before committing, I made a number of cosmetic changes to the last
posted version of the patch, adjusted _hash_getcachedmetap to be more
careful about order of operation, and made some necessary updates to
the pageinspect documentation and regression tests.
2017-02-07 18:24:25 +01:00
|
|
|
*
|
|
|
|
* The access type parameter (HASH_READ or HASH_WRITE) indicates whether the
|
|
|
|
* bucket buffer has to be locked for reading or writing.
|
|
|
|
*
|
|
|
|
* The out parameter cachedmetap is set with metapage contents used for
|
|
|
|
* hashkey to bucket buffer mapping. Some callers need this info to reach the
|
|
|
|
* old bucket in case of bucket split, see _hash_doinsert().
|
|
|
|
*/
|
|
|
|
Buffer
|
|
|
|
_hash_getbucketbuf_from_hashkey(Relation rel, uint32 hashkey, int access,
|
|
|
|
HashMetaPage *cachedmetap)
|
|
|
|
{
|
|
|
|
HashMetaPage metap;
|
|
|
|
Buffer buf;
|
|
|
|
Buffer metabuf = InvalidBuffer;
|
|
|
|
Page page;
|
|
|
|
Bucket bucket;
|
|
|
|
BlockNumber blkno;
|
|
|
|
HashPageOpaque opaque;
|
|
|
|
|
|
|
|
/* We read from target bucket buffer, hence locking is must. */
|
|
|
|
Assert(access == HASH_READ || access == HASH_WRITE);
|
|
|
|
|
|
|
|
metap = _hash_getcachedmetap(rel, &metabuf, false);
|
|
|
|
Assert(metap != NULL);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Loop until we get a lock on the correct target bucket.
|
|
|
|
*/
|
|
|
|
for (;;)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Compute the target bucket number, and convert to block number.
|
|
|
|
*/
|
|
|
|
bucket = _hash_hashkey2bucket(hashkey,
|
|
|
|
metap->hashm_maxbucket,
|
|
|
|
metap->hashm_highmask,
|
|
|
|
metap->hashm_lowmask);
|
|
|
|
|
|
|
|
blkno = BUCKET_TO_BLKNO(metap, bucket);
|
|
|
|
|
|
|
|
/* Fetch the primary bucket page for the bucket */
|
|
|
|
buf = _hash_getbuf(rel, blkno, access, LH_BUCKET_PAGE);
|
|
|
|
page = BufferGetPage(buf);
|
|
|
|
opaque = (HashPageOpaque) PageGetSpecialPointer(page);
|
|
|
|
Assert(opaque->hasho_bucket == bucket);
|
2017-05-10 05:44:21 +02:00
|
|
|
Assert(opaque->hasho_prevblkno != InvalidBlockNumber);
|
Cache hash index's metapage in rel->rd_amcache.
This avoids a very significant amount of buffer manager traffic and
contention when scanning hash indexes, because it's no longer
necessary to lock and pin the metapage for every scan. We do need
some way of figuring out when the cache is too stale to use any more,
so that when we lock the primary bucket page to which the cached
metapage points us, we can tell whether a split has occurred since we
cached the metapage data. To do that, we use the hash_prevblkno field
in the primary bucket page, which would otherwise always be set to
InvalidBuffer.
This patch contains code so that it will continue working (although
less efficiently) with hash indexes built before this change, but
perhaps we should consider bumping the hash version and ripping out
the compatibility code. That decision can be made later, though.
Mithun Cy, reviewed by Jesper Pedersen, Amit Kapila, and by me.
Before committing, I made a number of cosmetic changes to the last
posted version of the patch, adjusted _hash_getcachedmetap to be more
careful about order of operation, and made some necessary updates to
the pageinspect documentation and regression tests.
2017-02-07 18:24:25 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If this bucket hasn't been split, we're done.
|
|
|
|
*/
|
2017-05-10 05:44:21 +02:00
|
|
|
if (opaque->hasho_prevblkno <= metap->hashm_maxbucket)
|
Cache hash index's metapage in rel->rd_amcache.
This avoids a very significant amount of buffer manager traffic and
contention when scanning hash indexes, because it's no longer
necessary to lock and pin the metapage for every scan. We do need
some way of figuring out when the cache is too stale to use any more,
so that when we lock the primary bucket page to which the cached
metapage points us, we can tell whether a split has occurred since we
cached the metapage data. To do that, we use the hash_prevblkno field
in the primary bucket page, which would otherwise always be set to
InvalidBuffer.
This patch contains code so that it will continue working (although
less efficiently) with hash indexes built before this change, but
perhaps we should consider bumping the hash version and ripping out
the compatibility code. That decision can be made later, though.
Mithun Cy, reviewed by Jesper Pedersen, Amit Kapila, and by me.
Before committing, I made a number of cosmetic changes to the last
posted version of the patch, adjusted _hash_getcachedmetap to be more
careful about order of operation, and made some necessary updates to
the pageinspect documentation and regression tests.
2017-02-07 18:24:25 +01:00
|
|
|
break;
|
|
|
|
|
|
|
|
/* Drop lock on this buffer, update cached metapage, and retry. */
|
|
|
|
_hash_relbuf(rel, buf);
|
|
|
|
metap = _hash_getcachedmetap(rel, &metabuf, true);
|
|
|
|
Assert(metap != NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (BufferIsValid(metabuf))
|
|
|
|
_hash_dropbuf(rel, metabuf);
|
|
|
|
|
|
|
|
if (cachedmetap)
|
|
|
|
*cachedmetap = metap;
|
|
|
|
|
|
|
|
return buf;
|
|
|
|
}
|