Change internal RelFileNode references to RelFileNumber or RelFileLocator.

We have been using the term RelFileNode to refer to either (1) the
integer that is used to name the sequence of files for a certain relation
within the directory set aside for that tablespace/database combination;
or (2) that value plus the OIDs of the tablespace and database; or
occasionally (3) the whole series of files created for a relation
based on those values. Using the same name for more than one thing is
confusing.

Replace RelFileNode with RelFileNumber when we're talking about just the
single number, i.e. (1) from above, and with RelFileLocator when we're
talking about all the things that are needed to locate a relation's files
on disk, i.e. (2) from above. In the places where we refer to (3) as
a relfilenode, instead refer to "relation storage".

Since there is a ton of SQL code in the world that knows about
pg_class.relfilenode, don't change the name of that column, or of other
SQL-facing things that derive their name from it.

On the other hand, do adjust closely-related internal terminology. For
example, the structure member names dbNode and spcNode appear to be
derived from the fact that the structure itself was called RelFileNode,
so change those to dbOid and spcOid. Likewise, various variables with
names like rnode and relnode get renamed appropriately, according to
how they're being used in context.

Hopefully, this is clearer than before. It is also preparation for
future patches that intend to widen the relfilenumber fields from its
current width of 32 bits. Variables that store a relfilenumber are now
declared as type RelFileNumber rather than type Oid; right now, these
are the same, but that can now more easily be changed.

Dilip Kumar, per an idea from me. Reviewed also by Andres Freund.
I fixed some whitespace issues, changed a couple of words in a
comment, and made one other minor correction.

Discussion: http://postgr.es/m/CA+TgmoamOtXbVAQf9hWFzonUo6bhhjS6toZQd7HZ-pmojtAmag@mail.gmail.com
Discussion: http://postgr.es/m/CA+Tgmobp7+7kmi4gkq7Y+4AM9fTvL+O1oQ4-5gFTT+6Ng-dQ=g@mail.gmail.com
Discussion: http://postgr.es/m/CAFiTN-vTe79M8uDH1yprOU64MNFE+R3ODRuA+JWf27JbhY4hJw@mail.gmail.com
This commit is contained in:
Robert Haas 2022-07-06 11:39:09 -04:00
parent 7775c748db
commit b0a55e4329
138 changed files with 1640 additions and 1606 deletions

View File

@ -179,7 +179,7 @@ blbuildempty(Relation index)
PageSetChecksumInplace(metapage, BLOOM_METAPAGE_BLKNO);
smgrwrite(RelationGetSmgr(index), INIT_FORKNUM, BLOOM_METAPAGE_BLKNO,
(char *) metapage, true);
log_newpage(&(RelationGetSmgr(index))->smgr_rnode.node, INIT_FORKNUM,
log_newpage(&(RelationGetSmgr(index))->smgr_rlocator.locator, INIT_FORKNUM,
BLOOM_METAPAGE_BLKNO, metapage, true);
/*

View File

@ -30,7 +30,7 @@ struct options
{
eary *tables;
eary *oids;
eary *filenodes;
eary *filenumbers;
bool quiet;
bool systables;
@ -125,9 +125,9 @@ get_opts(int argc, char **argv, struct options *my_opts)
my_opts->dbname = pg_strdup(optarg);
break;
/* specify one filenode to show */
/* specify one filenumber to show */
case 'f':
add_one_elt(optarg, my_opts->filenodes);
add_one_elt(optarg, my_opts->filenumbers);
break;
/* host to connect to */
@ -494,7 +494,7 @@ sql_exec_dumpalltables(PGconn *conn, struct options *opts)
}
/*
* Show oid, filenode, name, schema and tablespace for each of the
* Show oid, filenumber, name, schema and tablespace for each of the
* given objects in the current database.
*/
void
@ -504,19 +504,19 @@ sql_exec_searchtables(PGconn *conn, struct options *opts)
char *qualifiers,
*ptr;
char *comma_oids,
*comma_filenodes,
*comma_filenumbers,
*comma_tables;
bool written = false;
char *addfields = ",c.oid AS \"Oid\", nspname AS \"Schema\", spcname as \"Tablespace\" ";
/* get tables qualifiers, whether names, filenodes, or OIDs */
/* get tables qualifiers, whether names, filenumbers, or OIDs */
comma_oids = get_comma_elts(opts->oids);
comma_tables = get_comma_elts(opts->tables);
comma_filenodes = get_comma_elts(opts->filenodes);
comma_filenumbers = get_comma_elts(opts->filenumbers);
/* 80 extra chars for SQL expression */
qualifiers = (char *) pg_malloc(strlen(comma_oids) + strlen(comma_tables) +
strlen(comma_filenodes) + 80);
strlen(comma_filenumbers) + 80);
ptr = qualifiers;
if (opts->oids->num > 0)
@ -524,11 +524,12 @@ sql_exec_searchtables(PGconn *conn, struct options *opts)
ptr += sprintf(ptr, "c.oid IN (%s)", comma_oids);
written = true;
}
if (opts->filenodes->num > 0)
if (opts->filenumbers->num > 0)
{
if (written)
ptr += sprintf(ptr, " OR ");
ptr += sprintf(ptr, "pg_catalog.pg_relation_filenode(c.oid) IN (%s)", comma_filenodes);
ptr += sprintf(ptr, "pg_catalog.pg_relation_filenode(c.oid) IN (%s)",
comma_filenumbers);
written = true;
}
if (opts->tables->num > 0)
@ -539,7 +540,7 @@ sql_exec_searchtables(PGconn *conn, struct options *opts)
}
free(comma_oids);
free(comma_tables);
free(comma_filenodes);
free(comma_filenumbers);
/* now build the query */
todo = psprintf("SELECT pg_catalog.pg_relation_filenode(c.oid) as \"Filenode\", relname as \"Table Name\" %s\n"
@ -588,11 +589,11 @@ main(int argc, char **argv)
my_opts->oids = (eary *) pg_malloc(sizeof(eary));
my_opts->tables = (eary *) pg_malloc(sizeof(eary));
my_opts->filenodes = (eary *) pg_malloc(sizeof(eary));
my_opts->filenumbers = (eary *) pg_malloc(sizeof(eary));
my_opts->oids->num = my_opts->oids->alloc = 0;
my_opts->tables->num = my_opts->tables->alloc = 0;
my_opts->filenodes->num = my_opts->filenodes->alloc = 0;
my_opts->filenumbers->num = my_opts->filenumbers->alloc = 0;
/* parse the opts */
get_opts(argc, argv, my_opts);
@ -618,7 +619,7 @@ main(int argc, char **argv)
/* display the given elements in the database */
if (my_opts->oids->num > 0 ||
my_opts->tables->num > 0 ||
my_opts->filenodes->num > 0)
my_opts->filenumbers->num > 0)
{
if (!my_opts->quiet)
printf("From database \"%s\":\n", my_opts->dbname);

View File

@ -26,7 +26,7 @@ PG_MODULE_MAGIC;
typedef struct
{
uint32 bufferid;
Oid relfilenode;
RelFileNumber relfilenumber;
Oid reltablespace;
Oid reldatabase;
ForkNumber forknum;
@ -153,9 +153,9 @@ pg_buffercache_pages(PG_FUNCTION_ARGS)
buf_state = LockBufHdr(bufHdr);
fctx->record[i].bufferid = BufferDescriptorGetBuffer(bufHdr);
fctx->record[i].relfilenode = bufHdr->tag.rnode.relNode;
fctx->record[i].reltablespace = bufHdr->tag.rnode.spcNode;
fctx->record[i].reldatabase = bufHdr->tag.rnode.dbNode;
fctx->record[i].relfilenumber = bufHdr->tag.rlocator.relNumber;
fctx->record[i].reltablespace = bufHdr->tag.rlocator.spcOid;
fctx->record[i].reldatabase = bufHdr->tag.rlocator.dbOid;
fctx->record[i].forknum = bufHdr->tag.forkNum;
fctx->record[i].blocknum = bufHdr->tag.blockNum;
fctx->record[i].usagecount = BUF_STATE_GET_USAGECOUNT(buf_state);
@ -209,7 +209,7 @@ pg_buffercache_pages(PG_FUNCTION_ARGS)
}
else
{
values[1] = ObjectIdGetDatum(fctx->record[i].relfilenode);
values[1] = ObjectIdGetDatum(fctx->record[i].relfilenumber);
nulls[1] = false;
values[2] = ObjectIdGetDatum(fctx->record[i].reltablespace);
nulls[2] = false;

View File

@ -52,7 +52,7 @@
#include "utils/guc.h"
#include "utils/memutils.h"
#include "utils/rel.h"
#include "utils/relfilenodemap.h"
#include "utils/relfilenumbermap.h"
#include "utils/resowner.h"
#define AUTOPREWARM_FILE "autoprewarm.blocks"
@ -62,7 +62,7 @@ typedef struct BlockInfoRecord
{
Oid database;
Oid tablespace;
Oid filenode;
RelFileNumber filenumber;
ForkNumber forknum;
BlockNumber blocknum;
} BlockInfoRecord;
@ -347,7 +347,7 @@ apw_load_buffers(void)
unsigned forknum;
if (fscanf(file, "%u,%u,%u,%u,%u\n", &blkinfo[i].database,
&blkinfo[i].tablespace, &blkinfo[i].filenode,
&blkinfo[i].tablespace, &blkinfo[i].filenumber,
&forknum, &blkinfo[i].blocknum) != 5)
ereport(ERROR,
(errmsg("autoprewarm block dump file is corrupted at line %d",
@ -494,7 +494,7 @@ autoprewarm_database_main(Datum main_arg)
* relation. Note that rel will be NULL if try_relation_open failed
* previously; in that case, there is nothing to close.
*/
if (old_blk != NULL && old_blk->filenode != blk->filenode &&
if (old_blk != NULL && old_blk->filenumber != blk->filenumber &&
rel != NULL)
{
relation_close(rel, AccessShareLock);
@ -506,13 +506,13 @@ autoprewarm_database_main(Datum main_arg)
* Try to open each new relation, but only once, when we first
* encounter it. If it's been dropped, skip the associated blocks.
*/
if (old_blk == NULL || old_blk->filenode != blk->filenode)
if (old_blk == NULL || old_blk->filenumber != blk->filenumber)
{
Oid reloid;
Assert(rel == NULL);
StartTransactionCommand();
reloid = RelidByRelfilenode(blk->tablespace, blk->filenode);
reloid = RelidByRelfilenumber(blk->tablespace, blk->filenumber);
if (OidIsValid(reloid))
rel = try_relation_open(reloid, AccessShareLock);
@ -527,7 +527,7 @@ autoprewarm_database_main(Datum main_arg)
/* Once per fork, check for fork existence and size. */
if (old_blk == NULL ||
old_blk->filenode != blk->filenode ||
old_blk->filenumber != blk->filenumber ||
old_blk->forknum != blk->forknum)
{
/*
@ -631,9 +631,9 @@ apw_dump_now(bool is_bgworker, bool dump_unlogged)
if (buf_state & BM_TAG_VALID &&
((buf_state & BM_PERMANENT) || dump_unlogged))
{
block_info_array[num_blocks].database = bufHdr->tag.rnode.dbNode;
block_info_array[num_blocks].tablespace = bufHdr->tag.rnode.spcNode;
block_info_array[num_blocks].filenode = bufHdr->tag.rnode.relNode;
block_info_array[num_blocks].database = bufHdr->tag.rlocator.dbOid;
block_info_array[num_blocks].tablespace = bufHdr->tag.rlocator.spcOid;
block_info_array[num_blocks].filenumber = bufHdr->tag.rlocator.relNumber;
block_info_array[num_blocks].forknum = bufHdr->tag.forkNum;
block_info_array[num_blocks].blocknum = bufHdr->tag.blockNum;
++num_blocks;
@ -671,7 +671,7 @@ apw_dump_now(bool is_bgworker, bool dump_unlogged)
ret = fprintf(file, "%u,%u,%u,%u,%u\n",
block_info_array[i].database,
block_info_array[i].tablespace,
block_info_array[i].filenode,
block_info_array[i].filenumber,
(uint32) block_info_array[i].forknum,
block_info_array[i].blocknum);
if (ret < 0)
@ -900,7 +900,7 @@ do { \
* We depend on all records for a particular database being consecutive
* in the dump file; each per-database worker will preload blocks until
* it sees a block for some other database. Sorting by tablespace,
* filenode, forknum, and blocknum isn't critical for correctness, but
* filenumber, forknum, and blocknum isn't critical for correctness, but
* helps us get a sequential I/O pattern.
*/
static int
@ -911,7 +911,7 @@ apw_compare_blockinfo(const void *p, const void *q)
cmp_member_elem(database);
cmp_member_elem(tablespace);
cmp_member_elem(filenode);
cmp_member_elem(filenumber);
cmp_member_elem(forknum);
cmp_member_elem(blocknum);

View File

@ -407,7 +407,7 @@ pg_truncate_visibility_map(PG_FUNCTION_ARGS)
xl_smgr_truncate xlrec;
xlrec.blkno = 0;
xlrec.rnode = rel->rd_node;
xlrec.rlocator = rel->rd_locator;
xlrec.flags = SMGR_TRUNCATE_VM;
XLogBeginInsert();

View File

@ -90,7 +90,7 @@ bool trace_syncscan = false;
*/
typedef struct ss_scan_location_t
{
RelFileNode relfilenode; /* identity of a relation */
RelFileLocator relfilelocator; /* identity of a relation */
BlockNumber location; /* last-reported location in the relation */
} ss_scan_location_t;
@ -115,7 +115,7 @@ typedef struct ss_scan_locations_t
static ss_scan_locations_t *scan_locations;
/* prototypes for internal functions */
static BlockNumber ss_search(RelFileNode relfilenode,
static BlockNumber ss_search(RelFileLocator relfilelocator,
BlockNumber location, bool set);
@ -159,9 +159,9 @@ SyncScanShmemInit(void)
* these invalid entries will fall off the LRU list and get
* replaced with real entries.
*/
item->location.relfilenode.spcNode = InvalidOid;
item->location.relfilenode.dbNode = InvalidOid;
item->location.relfilenode.relNode = InvalidOid;
item->location.relfilelocator.spcOid = InvalidOid;
item->location.relfilelocator.dbOid = InvalidOid;
item->location.relfilelocator.relNumber = InvalidRelFileNumber;
item->location.location = InvalidBlockNumber;
item->prev = (i > 0) ?
@ -176,10 +176,10 @@ SyncScanShmemInit(void)
/*
* ss_search --- search the scan_locations structure for an entry with the
* given relfilenode.
* given relfilelocator.
*
* If "set" is true, the location is updated to the given location. If no
* entry for the given relfilenode is found, it will be created at the head
* entry for the given relfilelocator is found, it will be created at the head
* of the list with the given location, even if "set" is false.
*
* In any case, the location after possible update is returned.
@ -188,7 +188,7 @@ SyncScanShmemInit(void)
* data structure.
*/
static BlockNumber
ss_search(RelFileNode relfilenode, BlockNumber location, bool set)
ss_search(RelFileLocator relfilelocator, BlockNumber location, bool set)
{
ss_lru_item_t *item;
@ -197,7 +197,8 @@ ss_search(RelFileNode relfilenode, BlockNumber location, bool set)
{
bool match;
match = RelFileNodeEquals(item->location.relfilenode, relfilenode);
match = RelFileLocatorEquals(item->location.relfilelocator,
relfilelocator);
if (match || item->next == NULL)
{
@ -207,7 +208,7 @@ ss_search(RelFileNode relfilenode, BlockNumber location, bool set)
*/
if (!match)
{
item->location.relfilenode = relfilenode;
item->location.relfilelocator = relfilelocator;
item->location.location = location;
}
else if (set)
@ -255,7 +256,7 @@ ss_get_location(Relation rel, BlockNumber relnblocks)
BlockNumber startloc;
LWLockAcquire(SyncScanLock, LW_EXCLUSIVE);
startloc = ss_search(rel->rd_node, 0, false);
startloc = ss_search(rel->rd_locator, 0, false);
LWLockRelease(SyncScanLock);
/*
@ -281,8 +282,8 @@ ss_get_location(Relation rel, BlockNumber relnblocks)
* ss_report_location --- update the current scan location
*
* Writes an entry into the shared Sync Scan state of the form
* (relfilenode, blocknumber), overwriting any existing entry for the
* same relfilenode.
* (relfilelocator, blocknumber), overwriting any existing entry for the
* same relfilelocator.
*/
void
ss_report_location(Relation rel, BlockNumber location)
@ -309,7 +310,7 @@ ss_report_location(Relation rel, BlockNumber location)
{
if (LWLockConditionalAcquire(SyncScanLock, LW_EXCLUSIVE))
{
(void) ss_search(rel->rd_node, location, true);
(void) ss_search(rel->rd_locator, location, true);
LWLockRelease(SyncScanLock);
}
#ifdef TRACE_SYNCSCAN

View File

@ -470,7 +470,7 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack,
savedRightLink = GinPageGetOpaque(page)->rightlink;
/* Begin setting up WAL record */
data.node = btree->index->rd_node;
data.locator = btree->index->rd_locator;
data.flags = xlflags;
if (BufferIsValid(childbuf))
{

View File

@ -235,7 +235,7 @@ ginHeapTupleFastInsert(GinState *ginstate, GinTupleCollector *collector)
needWal = RelationNeedsWAL(index);
data.node = index->rd_node;
data.locator = index->rd_locator;
data.ntuples = 0;
data.newRightlink = data.prevTail = InvalidBlockNumber;

View File

@ -688,7 +688,7 @@ ginUpdateStats(Relation index, const GinStatsData *stats, bool is_build)
XLogRecPtr recptr;
ginxlogUpdateMeta data;
data.node = index->rd_node;
data.locator = index->rd_locator;
data.ntuples = 0;
data.newRightlink = data.prevTail = InvalidBlockNumber;
memcpy(&data.metadata, metadata, sizeof(GinMetaPageData));

View File

@ -95,13 +95,13 @@ ginRedoInsertEntry(Buffer buffer, bool isLeaf, BlockNumber rightblkno, void *rda
if (PageAddItem(page, (Item) itup, IndexTupleSize(itup), offset, false, false) == InvalidOffsetNumber)
{
RelFileNode node;
RelFileLocator locator;
ForkNumber forknum;
BlockNumber blknum;
BufferGetTag(buffer, &node, &forknum, &blknum);
BufferGetTag(buffer, &locator, &forknum, &blknum);
elog(ERROR, "failed to add item to index page in %u/%u/%u",
node.spcNode, node.dbNode, node.relNode);
locator.spcOid, locator.dbOid, locator.relNumber);
}
}

View File

@ -462,7 +462,7 @@ gist_indexsortbuild(GISTBuildState *state)
smgrwrite(RelationGetSmgr(state->indexrel), MAIN_FORKNUM, GIST_ROOT_BLKNO,
levelstate->pages[0], true);
if (RelationNeedsWAL(state->indexrel))
log_newpage(&state->indexrel->rd_node, MAIN_FORKNUM, GIST_ROOT_BLKNO,
log_newpage(&state->indexrel->rd_locator, MAIN_FORKNUM, GIST_ROOT_BLKNO,
levelstate->pages[0], true);
pfree(levelstate->pages[0]);
@ -663,7 +663,7 @@ gist_indexsortbuild_flush_ready_pages(GISTBuildState *state)
}
if (RelationNeedsWAL(state->indexrel))
log_newpages(&state->indexrel->rd_node, MAIN_FORKNUM, state->ready_num_pages,
log_newpages(&state->indexrel->rd_locator, MAIN_FORKNUM, state->ready_num_pages,
state->ready_blknos, state->ready_pages, true);
for (int i = 0; i < state->ready_num_pages; i++)

View File

@ -191,11 +191,12 @@ gistRedoDeleteRecord(XLogReaderState *record)
*/
if (InHotStandby)
{
RelFileNode rnode;
RelFileLocator rlocator;
XLogRecGetBlockTag(record, 0, &rnode, NULL, NULL);
XLogRecGetBlockTag(record, 0, &rlocator, NULL, NULL);
ResolveRecoveryConflictWithSnapshot(xldata->latestRemovedXid, rnode);
ResolveRecoveryConflictWithSnapshot(xldata->latestRemovedXid,
rlocator);
}
if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
@ -395,7 +396,7 @@ gistRedoPageReuse(XLogReaderState *record)
*/
if (InHotStandby)
ResolveRecoveryConflictWithSnapshotFullXid(xlrec->latestRemovedFullXid,
xlrec->node);
xlrec->locator);
}
void
@ -607,7 +608,7 @@ gistXLogPageReuse(Relation rel, BlockNumber blkno, FullTransactionId latestRemov
*/
/* XLOG stuff */
xlrec_reuse.node = rel->rd_node;
xlrec_reuse.locator = rel->rd_locator;
xlrec_reuse.block = blkno;
xlrec_reuse.latestRemovedFullXid = latestRemovedXid;

View File

@ -999,10 +999,10 @@ hash_xlog_vacuum_one_page(XLogReaderState *record)
*/
if (InHotStandby)
{
RelFileNode rnode;
RelFileLocator rlocator;
XLogRecGetBlockTag(record, 0, &rnode, NULL, NULL);
ResolveRecoveryConflictWithSnapshot(xldata->latestRemovedXid, rnode);
XLogRecGetBlockTag(record, 0, &rlocator, NULL, NULL);
ResolveRecoveryConflictWithSnapshot(xldata->latestRemovedXid, rlocator);
}
action = XLogReadBufferForRedoExtended(record, 0, RBM_NORMAL, true, &buffer);

View File

@ -428,7 +428,7 @@ _hash_init(Relation rel, double num_tuples, ForkNumber forkNum)
MarkBufferDirty(buf);
if (use_wal)
log_newpage(&rel->rd_node,
log_newpage(&rel->rd_locator,
forkNum,
blkno,
BufferGetPage(buf),
@ -1019,7 +1019,7 @@ _hash_alloc_buckets(Relation rel, BlockNumber firstblock, uint32 nblocks)
ovflopaque->hasho_page_id = HASHO_PAGE_ID;
if (RelationNeedsWAL(rel))
log_newpage(&rel->rd_node,
log_newpage(&rel->rd_locator,
MAIN_FORKNUM,
lastblock,
zerobuf.data,

View File

@ -8189,7 +8189,7 @@ log_heap_freeze(Relation reln, Buffer buffer, TransactionId cutoff_xid,
* heap_buffer, if necessary.
*/
XLogRecPtr
log_heap_visible(RelFileNode rnode, Buffer heap_buffer, Buffer vm_buffer,
log_heap_visible(RelFileLocator rlocator, Buffer heap_buffer, Buffer vm_buffer,
TransactionId cutoff_xid, uint8 vmflags)
{
xl_heap_visible xlrec;
@ -8454,7 +8454,7 @@ log_heap_new_cid(Relation relation, HeapTuple tup)
Assert(tup->t_tableOid != InvalidOid);
xlrec.top_xid = GetTopTransactionId();
xlrec.target_node = relation->rd_node;
xlrec.target_locator = relation->rd_locator;
xlrec.target_tid = tup->t_self;
/*
@ -8623,18 +8623,18 @@ heap_xlog_prune(XLogReaderState *record)
XLogRecPtr lsn = record->EndRecPtr;
xl_heap_prune *xlrec = (xl_heap_prune *) XLogRecGetData(record);
Buffer buffer;
RelFileNode rnode;
RelFileLocator rlocator;
BlockNumber blkno;
XLogRedoAction action;
XLogRecGetBlockTag(record, 0, &rnode, NULL, &blkno);
XLogRecGetBlockTag(record, 0, &rlocator, NULL, &blkno);
/*
* We're about to remove tuples. In Hot Standby mode, ensure that there's
* no queries running for which the removed tuples are still visible.
*/
if (InHotStandby)
ResolveRecoveryConflictWithSnapshot(xlrec->latestRemovedXid, rnode);
ResolveRecoveryConflictWithSnapshot(xlrec->latestRemovedXid, rlocator);
/*
* If we have a full-page image, restore it (using a cleanup lock) and
@ -8694,7 +8694,7 @@ heap_xlog_prune(XLogReaderState *record)
* Do this regardless of a full-page image being applied, since the
* FSM data is not in the page anyway.
*/
XLogRecordPageWithFreeSpace(rnode, blkno, freespace);
XLogRecordPageWithFreeSpace(rlocator, blkno, freespace);
}
}
@ -8751,9 +8751,9 @@ heap_xlog_vacuum(XLogReaderState *record)
if (BufferIsValid(buffer))
{
Size freespace = PageGetHeapFreeSpace(BufferGetPage(buffer));
RelFileNode rnode;
RelFileLocator rlocator;
XLogRecGetBlockTag(record, 0, &rnode, NULL, &blkno);
XLogRecGetBlockTag(record, 0, &rlocator, NULL, &blkno);
UnlockReleaseBuffer(buffer);
@ -8766,7 +8766,7 @@ heap_xlog_vacuum(XLogReaderState *record)
* Do this regardless of a full-page image being applied, since the
* FSM data is not in the page anyway.
*/
XLogRecordPageWithFreeSpace(rnode, blkno, freespace);
XLogRecordPageWithFreeSpace(rlocator, blkno, freespace);
}
}
@ -8786,11 +8786,11 @@ heap_xlog_visible(XLogReaderState *record)
Buffer vmbuffer = InvalidBuffer;
Buffer buffer;
Page page;
RelFileNode rnode;
RelFileLocator rlocator;
BlockNumber blkno;
XLogRedoAction action;
XLogRecGetBlockTag(record, 1, &rnode, NULL, &blkno);
XLogRecGetBlockTag(record, 1, &rlocator, NULL, &blkno);
/*
* If there are any Hot Standby transactions running that have an xmin
@ -8802,7 +8802,7 @@ heap_xlog_visible(XLogReaderState *record)
* rather than killing the transaction outright.
*/
if (InHotStandby)
ResolveRecoveryConflictWithSnapshot(xlrec->cutoff_xid, rnode);
ResolveRecoveryConflictWithSnapshot(xlrec->cutoff_xid, rlocator);
/*
* Read the heap page, if it still exists. If the heap file has dropped or
@ -8865,7 +8865,7 @@ heap_xlog_visible(XLogReaderState *record)
* FSM data is not in the page anyway.
*/
if (xlrec->flags & VISIBILITYMAP_VALID_BITS)
XLogRecordPageWithFreeSpace(rnode, blkno, space);
XLogRecordPageWithFreeSpace(rlocator, blkno, space);
}
/*
@ -8890,7 +8890,7 @@ heap_xlog_visible(XLogReaderState *record)
*/
LockBuffer(vmbuffer, BUFFER_LOCK_UNLOCK);
reln = CreateFakeRelcacheEntry(rnode);
reln = CreateFakeRelcacheEntry(rlocator);
visibilitymap_pin(reln, blkno, &vmbuffer);
/*
@ -8933,13 +8933,13 @@ heap_xlog_freeze_page(XLogReaderState *record)
*/
if (InHotStandby)
{
RelFileNode rnode;
RelFileLocator rlocator;
TransactionId latestRemovedXid = cutoff_xid;
TransactionIdRetreat(latestRemovedXid);
XLogRecGetBlockTag(record, 0, &rnode, NULL, NULL);
ResolveRecoveryConflictWithSnapshot(latestRemovedXid, rnode);
XLogRecGetBlockTag(record, 0, &rlocator, NULL, NULL);
ResolveRecoveryConflictWithSnapshot(latestRemovedXid, rlocator);
}
if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
@ -9007,10 +9007,10 @@ heap_xlog_delete(XLogReaderState *record)
ItemId lp = NULL;
HeapTupleHeader htup;
BlockNumber blkno;
RelFileNode target_node;
RelFileLocator target_locator;
ItemPointerData target_tid;
XLogRecGetBlockTag(record, 0, &target_node, NULL, &blkno);
XLogRecGetBlockTag(record, 0, &target_locator, NULL, &blkno);
ItemPointerSetBlockNumber(&target_tid, blkno);
ItemPointerSetOffsetNumber(&target_tid, xlrec->offnum);
@ -9020,7 +9020,7 @@ heap_xlog_delete(XLogReaderState *record)
*/
if (xlrec->flags & XLH_DELETE_ALL_VISIBLE_CLEARED)
{
Relation reln = CreateFakeRelcacheEntry(target_node);
Relation reln = CreateFakeRelcacheEntry(target_locator);
Buffer vmbuffer = InvalidBuffer;
visibilitymap_pin(reln, blkno, &vmbuffer);
@ -9086,12 +9086,12 @@ heap_xlog_insert(XLogReaderState *record)
xl_heap_header xlhdr;
uint32 newlen;
Size freespace = 0;
RelFileNode target_node;
RelFileLocator target_locator;
BlockNumber blkno;
ItemPointerData target_tid;
XLogRedoAction action;
XLogRecGetBlockTag(record, 0, &target_node, NULL, &blkno);
XLogRecGetBlockTag(record, 0, &target_locator, NULL, &blkno);
ItemPointerSetBlockNumber(&target_tid, blkno);
ItemPointerSetOffsetNumber(&target_tid, xlrec->offnum);
@ -9101,7 +9101,7 @@ heap_xlog_insert(XLogReaderState *record)
*/
if (xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED)
{
Relation reln = CreateFakeRelcacheEntry(target_node);
Relation reln = CreateFakeRelcacheEntry(target_locator);
Buffer vmbuffer = InvalidBuffer;
visibilitymap_pin(reln, blkno, &vmbuffer);
@ -9184,7 +9184,7 @@ heap_xlog_insert(XLogReaderState *record)
* totally accurate anyway.
*/
if (action == BLK_NEEDS_REDO && freespace < BLCKSZ / 5)
XLogRecordPageWithFreeSpace(target_node, blkno, freespace);
XLogRecordPageWithFreeSpace(target_locator, blkno, freespace);
}
/*
@ -9195,7 +9195,7 @@ heap_xlog_multi_insert(XLogReaderState *record)
{
XLogRecPtr lsn = record->EndRecPtr;
xl_heap_multi_insert *xlrec;
RelFileNode rnode;
RelFileLocator rlocator;
BlockNumber blkno;
Buffer buffer;
Page page;
@ -9217,7 +9217,7 @@ heap_xlog_multi_insert(XLogReaderState *record)
*/
xlrec = (xl_heap_multi_insert *) XLogRecGetData(record);
XLogRecGetBlockTag(record, 0, &rnode, NULL, &blkno);
XLogRecGetBlockTag(record, 0, &rlocator, NULL, &blkno);
/* check that the mutually exclusive flags are not both set */
Assert(!((xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED) &&
@ -9229,7 +9229,7 @@ heap_xlog_multi_insert(XLogReaderState *record)
*/
if (xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED)
{
Relation reln = CreateFakeRelcacheEntry(rnode);
Relation reln = CreateFakeRelcacheEntry(rlocator);
Buffer vmbuffer = InvalidBuffer;
visibilitymap_pin(reln, blkno, &vmbuffer);
@ -9331,7 +9331,7 @@ heap_xlog_multi_insert(XLogReaderState *record)
* totally accurate anyway.
*/
if (action == BLK_NEEDS_REDO && freespace < BLCKSZ / 5)
XLogRecordPageWithFreeSpace(rnode, blkno, freespace);
XLogRecordPageWithFreeSpace(rlocator, blkno, freespace);
}
/*
@ -9342,7 +9342,7 @@ heap_xlog_update(XLogReaderState *record, bool hot_update)
{
XLogRecPtr lsn = record->EndRecPtr;
xl_heap_update *xlrec = (xl_heap_update *) XLogRecGetData(record);
RelFileNode rnode;
RelFileLocator rlocator;
BlockNumber oldblk;
BlockNumber newblk;
ItemPointerData newtid;
@ -9371,7 +9371,7 @@ heap_xlog_update(XLogReaderState *record, bool hot_update)
oldtup.t_data = NULL;
oldtup.t_len = 0;
XLogRecGetBlockTag(record, 0, &rnode, NULL, &newblk);
XLogRecGetBlockTag(record, 0, &rlocator, NULL, &newblk);
if (XLogRecGetBlockTagExtended(record, 1, NULL, NULL, &oldblk, NULL))
{
/* HOT updates are never done across pages */
@ -9388,7 +9388,7 @@ heap_xlog_update(XLogReaderState *record, bool hot_update)
*/
if (xlrec->flags & XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED)
{
Relation reln = CreateFakeRelcacheEntry(rnode);
Relation reln = CreateFakeRelcacheEntry(rlocator);
Buffer vmbuffer = InvalidBuffer;
visibilitymap_pin(reln, oldblk, &vmbuffer);
@ -9472,7 +9472,7 @@ heap_xlog_update(XLogReaderState *record, bool hot_update)
*/
if (xlrec->flags & XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED)
{
Relation reln = CreateFakeRelcacheEntry(rnode);
Relation reln = CreateFakeRelcacheEntry(rlocator);
Buffer vmbuffer = InvalidBuffer;
visibilitymap_pin(reln, newblk, &vmbuffer);
@ -9606,7 +9606,7 @@ heap_xlog_update(XLogReaderState *record, bool hot_update)
* totally accurate anyway.
*/
if (newaction == BLK_NEEDS_REDO && !hot_update && freespace < BLCKSZ / 5)
XLogRecordPageWithFreeSpace(rnode, newblk, freespace);
XLogRecordPageWithFreeSpace(rlocator, newblk, freespace);
}
static void
@ -9662,13 +9662,13 @@ heap_xlog_lock(XLogReaderState *record)
*/
if (xlrec->flags & XLH_LOCK_ALL_FROZEN_CLEARED)
{
RelFileNode rnode;
RelFileLocator rlocator;
Buffer vmbuffer = InvalidBuffer;
BlockNumber block;
Relation reln;
XLogRecGetBlockTag(record, 0, &rnode, NULL, &block);
reln = CreateFakeRelcacheEntry(rnode);
XLogRecGetBlockTag(record, 0, &rlocator, NULL, &block);
reln = CreateFakeRelcacheEntry(rlocator);
visibilitymap_pin(reln, block, &vmbuffer);
visibilitymap_clear(reln, block, vmbuffer, VISIBILITYMAP_ALL_FROZEN);
@ -9735,13 +9735,13 @@ heap_xlog_lock_updated(XLogReaderState *record)
*/
if (xlrec->flags & XLH_LOCK_ALL_FROZEN_CLEARED)
{
RelFileNode rnode;
RelFileLocator rlocator;
Buffer vmbuffer = InvalidBuffer;
BlockNumber block;
Relation reln;
XLogRecGetBlockTag(record, 0, &rnode, NULL, &block);
reln = CreateFakeRelcacheEntry(rnode);
XLogRecGetBlockTag(record, 0, &rlocator, NULL, &block);
reln = CreateFakeRelcacheEntry(rlocator);
visibilitymap_pin(reln, block, &vmbuffer);
visibilitymap_clear(reln, block, vmbuffer, VISIBILITYMAP_ALL_FROZEN);

View File

@ -566,11 +566,11 @@ tuple_lock_retry:
*/
static void
heapam_relation_set_new_filenode(Relation rel,
const RelFileNode *newrnode,
char persistence,
TransactionId *freezeXid,
MultiXactId *minmulti)
heapam_relation_set_new_filelocator(Relation rel,
const RelFileLocator *newrlocator,
char persistence,
TransactionId *freezeXid,
MultiXactId *minmulti)
{
SMgrRelation srel;
@ -591,7 +591,7 @@ heapam_relation_set_new_filenode(Relation rel,
*/
*minmulti = GetOldestMultiXactId();
srel = RelationCreateStorage(*newrnode, persistence, true);
srel = RelationCreateStorage(*newrlocator, persistence, true);
/*
* If required, set up an init fork for an unlogged table so that it can
@ -608,7 +608,7 @@ heapam_relation_set_new_filenode(Relation rel,
rel->rd_rel->relkind == RELKIND_MATVIEW ||
rel->rd_rel->relkind == RELKIND_TOASTVALUE);
smgrcreate(srel, INIT_FORKNUM, false);
log_smgrcreate(newrnode, INIT_FORKNUM);
log_smgrcreate(newrlocator, INIT_FORKNUM);
smgrimmedsync(srel, INIT_FORKNUM);
}
@ -622,11 +622,11 @@ heapam_relation_nontransactional_truncate(Relation rel)
}
static void
heapam_relation_copy_data(Relation rel, const RelFileNode *newrnode)
heapam_relation_copy_data(Relation rel, const RelFileLocator *newrlocator)
{
SMgrRelation dstrel;
dstrel = smgropen(*newrnode, rel->rd_backend);
dstrel = smgropen(*newrlocator, rel->rd_backend);
/*
* Since we copy the file directly without looking at the shared buffers,
@ -640,10 +640,10 @@ heapam_relation_copy_data(Relation rel, const RelFileNode *newrnode)
* Create and copy all forks of the relation, and schedule unlinking of
* old physical files.
*
* NOTE: any conflict in relfilenode value will be caught in
* NOTE: any conflict in relfilenumber value will be caught in
* RelationCreateStorage().
*/
RelationCreateStorage(*newrnode, rel->rd_rel->relpersistence, true);
RelationCreateStorage(*newrlocator, rel->rd_rel->relpersistence, true);
/* copy main fork */
RelationCopyStorage(RelationGetSmgr(rel), dstrel, MAIN_FORKNUM,
@ -664,7 +664,7 @@ heapam_relation_copy_data(Relation rel, const RelFileNode *newrnode)
if (RelationIsPermanent(rel) ||
(rel->rd_rel->relpersistence == RELPERSISTENCE_UNLOGGED &&
forkNum == INIT_FORKNUM))
log_smgrcreate(newrnode, forkNum);
log_smgrcreate(newrlocator, forkNum);
RelationCopyStorage(RelationGetSmgr(rel), dstrel, forkNum,
rel->rd_rel->relpersistence);
}
@ -2569,7 +2569,7 @@ static const TableAmRoutine heapam_methods = {
.tuple_satisfies_snapshot = heapam_tuple_satisfies_snapshot,
.index_delete_tuples = heap_index_delete_tuples,
.relation_set_new_filenode = heapam_relation_set_new_filenode,
.relation_set_new_filelocator = heapam_relation_set_new_filelocator,
.relation_nontransactional_truncate = heapam_relation_nontransactional_truncate,
.relation_copy_data = heapam_relation_copy_data,
.relation_copy_for_cluster = heapam_relation_copy_for_cluster,

View File

@ -318,7 +318,7 @@ end_heap_rewrite(RewriteState state)
if (state->rs_buffer_valid)
{
if (RelationNeedsWAL(state->rs_new_rel))
log_newpage(&state->rs_new_rel->rd_node,
log_newpage(&state->rs_new_rel->rd_locator,
MAIN_FORKNUM,
state->rs_blockno,
state->rs_buffer,
@ -679,7 +679,7 @@ raw_heap_insert(RewriteState state, HeapTuple tup)
/* XLOG stuff */
if (RelationNeedsWAL(state->rs_new_rel))
log_newpage(&state->rs_new_rel->rd_node,
log_newpage(&state->rs_new_rel->rd_locator,
MAIN_FORKNUM,
state->rs_blockno,
page,
@ -742,7 +742,7 @@ raw_heap_insert(RewriteState state, HeapTuple tup)
* When doing logical decoding - which relies on using cmin/cmax of catalog
* tuples, via xl_heap_new_cid records - heap rewrites have to log enough
* information to allow the decoding backend to update its internal mapping
* of (relfilenode,ctid) => (cmin, cmax) to be correct for the rewritten heap.
* of (relfilelocator,ctid) => (cmin, cmax) to be correct for the rewritten heap.
*
* For that, every time we find a tuple that's been modified in a catalog
* relation within the xmin horizon of any decoding slot, we log a mapping
@ -1080,9 +1080,9 @@ logical_rewrite_heap_tuple(RewriteState state, ItemPointerData old_tid,
return;
/* fill out mapping information */
map.old_node = state->rs_old_rel->rd_node;
map.old_locator = state->rs_old_rel->rd_locator;
map.old_tid = old_tid;
map.new_node = state->rs_new_rel->rd_node;
map.new_locator = state->rs_new_rel->rd_locator;
map.new_tid = new_tid;
/* ---

View File

@ -283,7 +283,7 @@ visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf,
if (XLogRecPtrIsInvalid(recptr))
{
Assert(!InRecovery);
recptr = log_heap_visible(rel->rd_node, heapBuf, vmBuf,
recptr = log_heap_visible(rel->rd_locator, heapBuf, vmBuf,
cutoff_xid, flags);
/*
@ -668,7 +668,7 @@ vm_extend(Relation rel, BlockNumber vm_nblocks)
* to keep checking for creation or extension of the file, which happens
* infrequently.
*/
CacheInvalidateSmgr(reln->smgr_rnode);
CacheInvalidateSmgr(reln->smgr_rlocator);
UnlockRelationForExtension(rel, ExclusiveLock);
}

View File

@ -836,7 +836,7 @@ _bt_log_reuse_page(Relation rel, BlockNumber blkno, FullTransactionId safexid)
*/
/* XLOG stuff */
xlrec_reuse.node = rel->rd_node;
xlrec_reuse.locator = rel->rd_locator;
xlrec_reuse.block = blkno;
xlrec_reuse.latestRemovedFullXid = safexid;

View File

@ -166,7 +166,7 @@ btbuildempty(Relation index)
PageSetChecksumInplace(metapage, BTREE_METAPAGE);
smgrwrite(RelationGetSmgr(index), INIT_FORKNUM, BTREE_METAPAGE,
(char *) metapage, true);
log_newpage(&RelationGetSmgr(index)->smgr_rnode.node, INIT_FORKNUM,
log_newpage(&RelationGetSmgr(index)->smgr_rlocator.locator, INIT_FORKNUM,
BTREE_METAPAGE, metapage, true);
/*

View File

@ -647,7 +647,7 @@ _bt_blwritepage(BTWriteState *wstate, Page page, BlockNumber blkno)
if (wstate->btws_use_wal)
{
/* We use the XLOG_FPI record type for this */
log_newpage(&wstate->index->rd_node, MAIN_FORKNUM, blkno, page, true);
log_newpage(&wstate->index->rd_locator, MAIN_FORKNUM, blkno, page, true);
}
/*

View File

@ -664,11 +664,11 @@ btree_xlog_delete(XLogReaderState *record)
*/
if (InHotStandby)
{
RelFileNode rnode;
RelFileLocator rlocator;
XLogRecGetBlockTag(record, 0, &rnode, NULL, NULL);
XLogRecGetBlockTag(record, 0, &rlocator, NULL, NULL);
ResolveRecoveryConflictWithSnapshot(xlrec->latestRemovedXid, rnode);
ResolveRecoveryConflictWithSnapshot(xlrec->latestRemovedXid, rlocator);
}
/*
@ -1006,7 +1006,7 @@ btree_xlog_reuse_page(XLogReaderState *record)
if (InHotStandby)
ResolveRecoveryConflictWithSnapshotFullXid(xlrec->latestRemovedFullXid,
xlrec->node);
xlrec->locator);
}
void

View File

@ -15,7 +15,7 @@
#include "access/generic_xlog.h"
#include "lib/stringinfo.h"
#include "storage/relfilenode.h"
#include "storage/relfilelocator.h"
/*
* Description of generic xlog record: write page regions that this record

View File

@ -17,7 +17,7 @@
#include "access/ginxlog.h"
#include "access/xlogutils.h"
#include "lib/stringinfo.h"
#include "storage/relfilenode.h"
#include "storage/relfilelocator.h"
static void
desc_recompress_leaf(StringInfo buf, ginxlogRecompressDataLeaf *insertData)

View File

@ -16,7 +16,7 @@
#include "access/gistxlog.h"
#include "lib/stringinfo.h"
#include "storage/relfilenode.h"
#include "storage/relfilelocator.h"
static void
out_gistxlogPageUpdate(StringInfo buf, gistxlogPageUpdate *xlrec)
@ -27,8 +27,8 @@ static void
out_gistxlogPageReuse(StringInfo buf, gistxlogPageReuse *xlrec)
{
appendStringInfo(buf, "rel %u/%u/%u; blk %u; latestRemovedXid %u:%u",
xlrec->node.spcNode, xlrec->node.dbNode,
xlrec->node.relNode, xlrec->block,
xlrec->locator.spcOid, xlrec->locator.dbOid,
xlrec->locator.relNumber, xlrec->block,
EpochFromFullTransactionId(xlrec->latestRemovedFullXid),
XidFromFullTransactionId(xlrec->latestRemovedFullXid));
}

View File

@ -170,9 +170,9 @@ heap2_desc(StringInfo buf, XLogReaderState *record)
xl_heap_new_cid *xlrec = (xl_heap_new_cid *) rec;
appendStringInfo(buf, "rel %u/%u/%u; tid %u/%u",
xlrec->target_node.spcNode,
xlrec->target_node.dbNode,
xlrec->target_node.relNode,
xlrec->target_locator.spcOid,
xlrec->target_locator.dbOid,
xlrec->target_locator.relNumber,
ItemPointerGetBlockNumber(&(xlrec->target_tid)),
ItemPointerGetOffsetNumber(&(xlrec->target_tid)));
appendStringInfo(buf, "; cmin: %u, cmax: %u, combo: %u",

View File

@ -101,8 +101,8 @@ btree_desc(StringInfo buf, XLogReaderState *record)
xl_btree_reuse_page *xlrec = (xl_btree_reuse_page *) rec;
appendStringInfo(buf, "rel %u/%u/%u; latestRemovedXid %u:%u",
xlrec->node.spcNode, xlrec->node.dbNode,
xlrec->node.relNode,
xlrec->locator.spcOid, xlrec->locator.dbOid,
xlrec->locator.relNumber,
EpochFromFullTransactionId(xlrec->latestRemovedFullXid),
XidFromFullTransactionId(xlrec->latestRemovedFullXid));
break;

View File

@ -26,8 +26,8 @@ seq_desc(StringInfo buf, XLogReaderState *record)
if (info == XLOG_SEQ_LOG)
appendStringInfo(buf, "rel %u/%u/%u",
xlrec->node.spcNode, xlrec->node.dbNode,
xlrec->node.relNode);
xlrec->locator.spcOid, xlrec->locator.dbOid,
xlrec->locator.relNumber);
}
const char *

View File

@ -26,7 +26,7 @@ smgr_desc(StringInfo buf, XLogReaderState *record)
if (info == XLOG_SMGR_CREATE)
{
xl_smgr_create *xlrec = (xl_smgr_create *) rec;
char *path = relpathperm(xlrec->rnode, xlrec->forkNum);
char *path = relpathperm(xlrec->rlocator, xlrec->forkNum);
appendStringInfoString(buf, path);
pfree(path);
@ -34,7 +34,7 @@ smgr_desc(StringInfo buf, XLogReaderState *record)
else if (info == XLOG_SMGR_TRUNCATE)
{
xl_smgr_truncate *xlrec = (xl_smgr_truncate *) rec;
char *path = relpathperm(xlrec->rnode, MAIN_FORKNUM);
char *path = relpathperm(xlrec->rlocator, MAIN_FORKNUM);
appendStringInfo(buf, "%s to %u blocks flags %d", path,
xlrec->blkno, xlrec->flags);

View File

@ -73,15 +73,15 @@ ParseCommitRecord(uint8 info, xl_xact_commit *xlrec, xl_xact_parsed_commit *pars
data += parsed->nsubxacts * sizeof(TransactionId);
}
if (parsed->xinfo & XACT_XINFO_HAS_RELFILENODES)
if (parsed->xinfo & XACT_XINFO_HAS_RELFILELOCATORS)
{
xl_xact_relfilenodes *xl_relfilenodes = (xl_xact_relfilenodes *) data;
xl_xact_relfilelocators *xl_rellocators = (xl_xact_relfilelocators *) data;
parsed->nrels = xl_relfilenodes->nrels;
parsed->xnodes = xl_relfilenodes->xnodes;
parsed->nrels = xl_rellocators->nrels;
parsed->xlocators = xl_rellocators->xlocators;
data += MinSizeOfXactRelfilenodes;
data += xl_relfilenodes->nrels * sizeof(RelFileNode);
data += MinSizeOfXactRelfileLocators;
data += xl_rellocators->nrels * sizeof(RelFileLocator);
}
if (parsed->xinfo & XACT_XINFO_HAS_DROPPED_STATS)
@ -179,15 +179,15 @@ ParseAbortRecord(uint8 info, xl_xact_abort *xlrec, xl_xact_parsed_abort *parsed)
data += parsed->nsubxacts * sizeof(TransactionId);
}
if (parsed->xinfo & XACT_XINFO_HAS_RELFILENODES)
if (parsed->xinfo & XACT_XINFO_HAS_RELFILELOCATORS)
{
xl_xact_relfilenodes *xl_relfilenodes = (xl_xact_relfilenodes *) data;
xl_xact_relfilelocators *xl_rellocator = (xl_xact_relfilelocators *) data;
parsed->nrels = xl_relfilenodes->nrels;
parsed->xnodes = xl_relfilenodes->xnodes;
parsed->nrels = xl_rellocator->nrels;
parsed->xlocators = xl_rellocator->xlocators;
data += MinSizeOfXactRelfilenodes;
data += xl_relfilenodes->nrels * sizeof(RelFileNode);
data += MinSizeOfXactRelfileLocators;
data += xl_rellocator->nrels * sizeof(RelFileLocator);
}
if (parsed->xinfo & XACT_XINFO_HAS_DROPPED_STATS)
@ -260,11 +260,11 @@ ParsePrepareRecord(uint8 info, xl_xact_prepare *xlrec, xl_xact_parsed_prepare *p
parsed->subxacts = (TransactionId *) bufptr;
bufptr += MAXALIGN(xlrec->nsubxacts * sizeof(TransactionId));
parsed->xnodes = (RelFileNode *) bufptr;
bufptr += MAXALIGN(xlrec->ncommitrels * sizeof(RelFileNode));
parsed->xlocators = (RelFileLocator *) bufptr;
bufptr += MAXALIGN(xlrec->ncommitrels * sizeof(RelFileLocator));
parsed->abortnodes = (RelFileNode *) bufptr;
bufptr += MAXALIGN(xlrec->nabortrels * sizeof(RelFileNode));
parsed->abortlocators = (RelFileLocator *) bufptr;
bufptr += MAXALIGN(xlrec->nabortrels * sizeof(RelFileLocator));
parsed->stats = (xl_xact_stats_item *) bufptr;
bufptr += MAXALIGN(xlrec->ncommitstats * sizeof(xl_xact_stats_item));
@ -278,7 +278,7 @@ ParsePrepareRecord(uint8 info, xl_xact_prepare *xlrec, xl_xact_parsed_prepare *p
static void
xact_desc_relations(StringInfo buf, char *label, int nrels,
RelFileNode *xnodes)
RelFileLocator *xlocators)
{
int i;
@ -287,7 +287,7 @@ xact_desc_relations(StringInfo buf, char *label, int nrels,
appendStringInfo(buf, "; %s:", label);
for (i = 0; i < nrels; i++)
{
char *path = relpathperm(xnodes[i], MAIN_FORKNUM);
char *path = relpathperm(xlocators[i], MAIN_FORKNUM);
appendStringInfo(buf, " %s", path);
pfree(path);
@ -340,7 +340,7 @@ xact_desc_commit(StringInfo buf, uint8 info, xl_xact_commit *xlrec, RepOriginId
appendStringInfoString(buf, timestamptz_to_str(xlrec->xact_time));
xact_desc_relations(buf, "rels", parsed.nrels, parsed.xnodes);
xact_desc_relations(buf, "rels", parsed.nrels, parsed.xlocators);
xact_desc_subxacts(buf, parsed.nsubxacts, parsed.subxacts);
xact_desc_stats(buf, "", parsed.nstats, parsed.stats);
@ -376,7 +376,7 @@ xact_desc_abort(StringInfo buf, uint8 info, xl_xact_abort *xlrec, RepOriginId or
appendStringInfoString(buf, timestamptz_to_str(xlrec->xact_time));
xact_desc_relations(buf, "rels", parsed.nrels, parsed.xnodes);
xact_desc_relations(buf, "rels", parsed.nrels, parsed.xlocators);
xact_desc_subxacts(buf, parsed.nsubxacts, parsed.subxacts);
if (parsed.xinfo & XACT_XINFO_HAS_ORIGIN)
@ -400,9 +400,9 @@ xact_desc_prepare(StringInfo buf, uint8 info, xl_xact_prepare *xlrec, RepOriginI
appendStringInfo(buf, "gid %s: ", parsed.twophase_gid);
appendStringInfoString(buf, timestamptz_to_str(parsed.xact_time));
xact_desc_relations(buf, "rels(commit)", parsed.nrels, parsed.xnodes);
xact_desc_relations(buf, "rels(commit)", parsed.nrels, parsed.xlocators);
xact_desc_relations(buf, "rels(abort)", parsed.nabortrels,
parsed.abortnodes);
parsed.abortlocators);
xact_desc_stats(buf, "commit ", parsed.nstats, parsed.stats);
xact_desc_stats(buf, "abort ", parsed.nabortstats, parsed.abortstats);
xact_desc_subxacts(buf, parsed.nsubxacts, parsed.subxacts);

View File

@ -219,12 +219,12 @@ XLogRecGetBlockRefInfo(XLogReaderState *record, bool pretty,
for (block_id = 0; block_id <= XLogRecMaxBlockId(record); block_id++)
{
RelFileNode rnode;
RelFileLocator rlocator;
ForkNumber forknum;
BlockNumber blk;
if (!XLogRecGetBlockTagExtended(record, block_id,
&rnode, &forknum, &blk, NULL))
&rlocator, &forknum, &blk, NULL))
continue;
if (detailed_format)
@ -239,7 +239,7 @@ XLogRecGetBlockRefInfo(XLogReaderState *record, bool pretty,
appendStringInfo(buf,
"blkref #%d: rel %u/%u/%u fork %s blk %u",
block_id,
rnode.spcNode, rnode.dbNode, rnode.relNode,
rlocator.spcOid, rlocator.dbOid, rlocator.relNumber,
forkNames[forknum],
blk);
@ -299,7 +299,7 @@ XLogRecGetBlockRefInfo(XLogReaderState *record, bool pretty,
appendStringInfo(buf,
", blkref #%d: rel %u/%u/%u fork %s blk %u",
block_id,
rnode.spcNode, rnode.dbNode, rnode.relNode,
rlocator.spcOid, rlocator.dbOid, rlocator.relNumber,
forkNames[forknum],
blk);
}
@ -308,7 +308,7 @@ XLogRecGetBlockRefInfo(XLogReaderState *record, bool pretty,
appendStringInfo(buf,
", blkref #%d: rel %u/%u/%u blk %u",
block_id,
rnode.spcNode, rnode.dbNode, rnode.relNode,
rlocator.spcOid, rlocator.dbOid, rlocator.relNumber,
blk);
}

View File

@ -171,7 +171,7 @@ spgbuildempty(Relation index)
PageSetChecksumInplace(page, SPGIST_METAPAGE_BLKNO);
smgrwrite(RelationGetSmgr(index), INIT_FORKNUM, SPGIST_METAPAGE_BLKNO,
(char *) page, true);
log_newpage(&(RelationGetSmgr(index))->smgr_rnode.node, INIT_FORKNUM,
log_newpage(&(RelationGetSmgr(index))->smgr_rlocator.locator, INIT_FORKNUM,
SPGIST_METAPAGE_BLKNO, page, true);
/* Likewise for the root page. */
@ -180,7 +180,7 @@ spgbuildempty(Relation index)
PageSetChecksumInplace(page, SPGIST_ROOT_BLKNO);
smgrwrite(RelationGetSmgr(index), INIT_FORKNUM, SPGIST_ROOT_BLKNO,
(char *) page, true);
log_newpage(&(RelationGetSmgr(index))->smgr_rnode.node, INIT_FORKNUM,
log_newpage(&(RelationGetSmgr(index))->smgr_rlocator.locator, INIT_FORKNUM,
SPGIST_ROOT_BLKNO, page, true);
/* Likewise for the null-tuples root page. */
@ -189,7 +189,7 @@ spgbuildempty(Relation index)
PageSetChecksumInplace(page, SPGIST_NULL_BLKNO);
smgrwrite(RelationGetSmgr(index), INIT_FORKNUM, SPGIST_NULL_BLKNO,
(char *) page, true);
log_newpage(&(RelationGetSmgr(index))->smgr_rnode.node, INIT_FORKNUM,
log_newpage(&(RelationGetSmgr(index))->smgr_rlocator.locator, INIT_FORKNUM,
SPGIST_NULL_BLKNO, page, true);
/*

View File

@ -877,11 +877,11 @@ spgRedoVacuumRedirect(XLogReaderState *record)
{
if (TransactionIdIsValid(xldata->newestRedirectXid))
{
RelFileNode node;
RelFileLocator locator;
XLogRecGetBlockTag(record, 0, &node, NULL, NULL);
XLogRecGetBlockTag(record, 0, &locator, NULL, NULL);
ResolveRecoveryConflictWithSnapshot(xldata->newestRedirectXid,
node);
locator);
}
}

View File

@ -82,7 +82,7 @@ GetTableAmRoutine(Oid amhandler)
Assert(routine->tuple_update != NULL);
Assert(routine->tuple_lock != NULL);
Assert(routine->relation_set_new_filenode != NULL);
Assert(routine->relation_set_new_filelocator != NULL);
Assert(routine->relation_nontransactional_truncate != NULL);
Assert(routine->relation_copy_data != NULL);
Assert(routine->relation_copy_for_cluster != NULL);

View File

@ -557,7 +557,7 @@ void XLogRegisterBuffer(uint8 block_id, Buffer buf, uint8 flags);
XLogRegisterBuffer adds information about a data block to the WAL record.
block_id is an arbitrary number used to identify this page reference in
the redo routine. The information needed to re-find the page at redo -
relfilenode, fork, and block number - are included in the WAL record.
relfilelocator, fork, and block number - are included in the WAL record.
XLogInsert will automatically include a full copy of the page contents, if
this is the first modification of the buffer since the last checkpoint.
@ -692,7 +692,7 @@ by having database restart search for files that don't have any committed
entry in pg_class, but that currently isn't done because of the possibility
of deleting data that is useful for forensic analysis of the crash.
Orphan files are harmless --- at worst they waste a bit of disk space ---
because we check for on-disk collisions when allocating new relfilenode
because we check for on-disk collisions when allocating new relfilenumber
OIDs. So cleaning up isn't really necessary.
3. Deleting a table, which requires an unlink() that could fail.
@ -725,10 +725,10 @@ then restart recovery. This is part of the reason for not writing a WAL
entry until we've successfully done the original action.
Skipping WAL for New RelFileNode
Skipping WAL for New RelFileLocator
--------------------------------
Under wal_level=minimal, if a change modifies a relfilenode that ROLLBACK
Under wal_level=minimal, if a change modifies a relfilenumber that ROLLBACK
would unlink, in-tree access methods write no WAL for that change. Code that
writes WAL without calling RelationNeedsWAL() must check for this case. This
skipping is mandatory. If a WAL-writing change preceded a WAL-skipping change
@ -748,9 +748,9 @@ unconditionally for permanent relations. Under these approaches, the access
method callbacks must not call functions that react to RelationNeedsWAL().
This applies only to WAL records whose replay would modify bytes stored in the
new relfilenode. It does not apply to other records about the relfilenode,
new relfilenumber. It does not apply to other records about the relfilenumber,
such as XLOG_SMGR_CREATE. Because it operates at the level of individual
relfilenodes, RelationNeedsWAL() can differ for tightly-coupled relations.
relfilenumbers, RelationNeedsWAL() can differ for tightly-coupled relations.
Consider "CREATE TABLE t (); BEGIN; ALTER TABLE t ADD c text; ..." in which
ALTER TABLE adds a TOAST relation. The TOAST relation will skip WAL, while
the table owning it will not. ALTER TABLE SET TABLESPACE will cause a table
@ -860,7 +860,7 @@ Changes to a temp table are not WAL-logged, hence could reach disk in
advance of T1's commit, but we don't care since temp table contents don't
survive crashes anyway.
Database writes that skip WAL for new relfilenodes are also safe. In these
Database writes that skip WAL for new relfilenumbers are also safe. In these
cases it's entirely possible for the data to reach disk before T1's commit,
because T1 will fsync it down to disk without any sort of interlock. However,
all these paths are designed to write data that no other transaction can see

View File

@ -126,7 +126,7 @@ worker. This includes:
an index that is currently being rebuilt.
- Active relmapper.c mapping state. This is needed to allow consistent
answers when fetching the current relfilenode for relation oids of
answers when fetching the current relfilenumber for relation oids of
mapped relations.
To prevent unprincipled deadlocks when running in parallel mode, this code

View File

@ -204,7 +204,7 @@ static void RecordTransactionCommitPrepared(TransactionId xid,
int nchildren,
TransactionId *children,
int nrels,
RelFileNode *rels,
RelFileLocator *rels,
int nstats,
xl_xact_stats_item *stats,
int ninvalmsgs,
@ -215,7 +215,7 @@ static void RecordTransactionAbortPrepared(TransactionId xid,
int nchildren,
TransactionId *children,
int nrels,
RelFileNode *rels,
RelFileLocator *rels,
int nstats,
xl_xact_stats_item *stats,
const char *gid);
@ -951,8 +951,8 @@ TwoPhaseGetDummyProc(TransactionId xid, bool lock_held)
*
* 1. TwoPhaseFileHeader
* 2. TransactionId[] (subtransactions)
* 3. RelFileNode[] (files to be deleted at commit)
* 4. RelFileNode[] (files to be deleted at abort)
* 3. RelFileLocator[] (files to be deleted at commit)
* 4. RelFileLocator[] (files to be deleted at abort)
* 5. SharedInvalidationMessage[] (inval messages to be sent at commit)
* 6. TwoPhaseRecordOnDisk
* 7. ...
@ -1047,8 +1047,8 @@ StartPrepare(GlobalTransaction gxact)
TransactionId xid = gxact->xid;
TwoPhaseFileHeader hdr;
TransactionId *children;
RelFileNode *commitrels;
RelFileNode *abortrels;
RelFileLocator *commitrels;
RelFileLocator *abortrels;
xl_xact_stats_item *abortstats = NULL;
xl_xact_stats_item *commitstats = NULL;
SharedInvalidationMessage *invalmsgs;
@ -1102,12 +1102,12 @@ StartPrepare(GlobalTransaction gxact)
}
if (hdr.ncommitrels > 0)
{
save_state_data(commitrels, hdr.ncommitrels * sizeof(RelFileNode));
save_state_data(commitrels, hdr.ncommitrels * sizeof(RelFileLocator));
pfree(commitrels);
}
if (hdr.nabortrels > 0)
{
save_state_data(abortrels, hdr.nabortrels * sizeof(RelFileNode));
save_state_data(abortrels, hdr.nabortrels * sizeof(RelFileLocator));
pfree(abortrels);
}
if (hdr.ncommitstats > 0)
@ -1489,9 +1489,9 @@ FinishPreparedTransaction(const char *gid, bool isCommit)
TwoPhaseFileHeader *hdr;
TransactionId latestXid;
TransactionId *children;
RelFileNode *commitrels;
RelFileNode *abortrels;
RelFileNode *delrels;
RelFileLocator *commitrels;
RelFileLocator *abortrels;
RelFileLocator *delrels;
int ndelrels;
xl_xact_stats_item *commitstats;
xl_xact_stats_item *abortstats;
@ -1525,10 +1525,10 @@ FinishPreparedTransaction(const char *gid, bool isCommit)
bufptr += MAXALIGN(hdr->gidlen);
children = (TransactionId *) bufptr;
bufptr += MAXALIGN(hdr->nsubxacts * sizeof(TransactionId));
commitrels = (RelFileNode *) bufptr;
bufptr += MAXALIGN(hdr->ncommitrels * sizeof(RelFileNode));
abortrels = (RelFileNode *) bufptr;
bufptr += MAXALIGN(hdr->nabortrels * sizeof(RelFileNode));
commitrels = (RelFileLocator *) bufptr;
bufptr += MAXALIGN(hdr->ncommitrels * sizeof(RelFileLocator));
abortrels = (RelFileLocator *) bufptr;
bufptr += MAXALIGN(hdr->nabortrels * sizeof(RelFileLocator));
commitstats = (xl_xact_stats_item *) bufptr;
bufptr += MAXALIGN(hdr->ncommitstats * sizeof(xl_xact_stats_item));
abortstats = (xl_xact_stats_item *) bufptr;
@ -2100,8 +2100,8 @@ RecoverPreparedTransactions(void)
bufptr += MAXALIGN(hdr->gidlen);
subxids = (TransactionId *) bufptr;
bufptr += MAXALIGN(hdr->nsubxacts * sizeof(TransactionId));
bufptr += MAXALIGN(hdr->ncommitrels * sizeof(RelFileNode));
bufptr += MAXALIGN(hdr->nabortrels * sizeof(RelFileNode));
bufptr += MAXALIGN(hdr->ncommitrels * sizeof(RelFileLocator));
bufptr += MAXALIGN(hdr->nabortrels * sizeof(RelFileLocator));
bufptr += MAXALIGN(hdr->ncommitstats * sizeof(xl_xact_stats_item));
bufptr += MAXALIGN(hdr->nabortstats * sizeof(xl_xact_stats_item));
bufptr += MAXALIGN(hdr->ninvalmsgs * sizeof(SharedInvalidationMessage));
@ -2285,7 +2285,7 @@ RecordTransactionCommitPrepared(TransactionId xid,
int nchildren,
TransactionId *children,
int nrels,
RelFileNode *rels,
RelFileLocator *rels,
int nstats,
xl_xact_stats_item *stats,
int ninvalmsgs,
@ -2383,7 +2383,7 @@ RecordTransactionAbortPrepared(TransactionId xid,
int nchildren,
TransactionId *children,
int nrels,
RelFileNode *rels,
RelFileLocator *rels,
int nstats,
xl_xact_stats_item *stats,
const char *gid)

View File

@ -521,7 +521,7 @@ ForceTransactionIdLimitUpdate(void)
* wide, counter wraparound will occur eventually, and therefore it is unwise
* to assume they are unique unless precautions are taken to make them so.
* Hence, this routine should generally not be used directly. The only direct
* callers should be GetNewOidWithIndex() and GetNewRelFileNode() in
* callers should be GetNewOidWithIndex() and GetNewRelFileNumber() in
* catalog/catalog.c.
*/
Oid

View File

@ -1282,7 +1282,7 @@ RecordTransactionCommit(void)
bool markXidCommitted = TransactionIdIsValid(xid);
TransactionId latestXid = InvalidTransactionId;
int nrels;
RelFileNode *rels;
RelFileLocator *rels;
int nchildren;
TransactionId *children;
int ndroppedstats = 0;
@ -1705,7 +1705,7 @@ RecordTransactionAbort(bool isSubXact)
TransactionId xid = GetCurrentTransactionIdIfAny();
TransactionId latestXid;
int nrels;
RelFileNode *rels;
RelFileLocator *rels;
int ndroppedstats = 0;
xl_xact_stats_item *droppedstats = NULL;
int nchildren;
@ -5586,7 +5586,7 @@ xactGetCommittedChildren(TransactionId **ptr)
XLogRecPtr
XactLogCommitRecord(TimestampTz commit_time,
int nsubxacts, TransactionId *subxacts,
int nrels, RelFileNode *rels,
int nrels, RelFileLocator *rels,
int ndroppedstats, xl_xact_stats_item *droppedstats,
int nmsgs, SharedInvalidationMessage *msgs,
bool relcacheInval,
@ -5597,7 +5597,7 @@ XactLogCommitRecord(TimestampTz commit_time,
xl_xact_xinfo xl_xinfo;
xl_xact_dbinfo xl_dbinfo;
xl_xact_subxacts xl_subxacts;
xl_xact_relfilenodes xl_relfilenodes;
xl_xact_relfilelocators xl_relfilelocators;
xl_xact_stats_items xl_dropped_stats;
xl_xact_invals xl_invals;
xl_xact_twophase xl_twophase;
@ -5651,8 +5651,8 @@ XactLogCommitRecord(TimestampTz commit_time,
if (nrels > 0)
{
xl_xinfo.xinfo |= XACT_XINFO_HAS_RELFILENODES;
xl_relfilenodes.nrels = nrels;
xl_xinfo.xinfo |= XACT_XINFO_HAS_RELFILELOCATORS;
xl_relfilelocators.nrels = nrels;
info |= XLR_SPECIAL_REL_UPDATE;
}
@ -5710,12 +5710,12 @@ XactLogCommitRecord(TimestampTz commit_time,
nsubxacts * sizeof(TransactionId));
}
if (xl_xinfo.xinfo & XACT_XINFO_HAS_RELFILENODES)
if (xl_xinfo.xinfo & XACT_XINFO_HAS_RELFILELOCATORS)
{
XLogRegisterData((char *) (&xl_relfilenodes),
MinSizeOfXactRelfilenodes);
XLogRegisterData((char *) (&xl_relfilelocators),
MinSizeOfXactRelfileLocators);
XLogRegisterData((char *) rels,
nrels * sizeof(RelFileNode));
nrels * sizeof(RelFileLocator));
}
if (xl_xinfo.xinfo & XACT_XINFO_HAS_DROPPED_STATS)
@ -5758,7 +5758,7 @@ XactLogCommitRecord(TimestampTz commit_time,
XLogRecPtr
XactLogAbortRecord(TimestampTz abort_time,
int nsubxacts, TransactionId *subxacts,
int nrels, RelFileNode *rels,
int nrels, RelFileLocator *rels,
int ndroppedstats, xl_xact_stats_item *droppedstats,
int xactflags, TransactionId twophase_xid,
const char *twophase_gid)
@ -5766,7 +5766,7 @@ XactLogAbortRecord(TimestampTz abort_time,
xl_xact_abort xlrec;
xl_xact_xinfo xl_xinfo;
xl_xact_subxacts xl_subxacts;
xl_xact_relfilenodes xl_relfilenodes;
xl_xact_relfilelocators xl_relfilelocators;
xl_xact_stats_items xl_dropped_stats;
xl_xact_twophase xl_twophase;
xl_xact_dbinfo xl_dbinfo;
@ -5800,8 +5800,8 @@ XactLogAbortRecord(TimestampTz abort_time,
if (nrels > 0)
{
xl_xinfo.xinfo |= XACT_XINFO_HAS_RELFILENODES;
xl_relfilenodes.nrels = nrels;
xl_xinfo.xinfo |= XACT_XINFO_HAS_RELFILELOCATORS;
xl_relfilelocators.nrels = nrels;
info |= XLR_SPECIAL_REL_UPDATE;
}
@ -5864,12 +5864,12 @@ XactLogAbortRecord(TimestampTz abort_time,
nsubxacts * sizeof(TransactionId));
}
if (xl_xinfo.xinfo & XACT_XINFO_HAS_RELFILENODES)
if (xl_xinfo.xinfo & XACT_XINFO_HAS_RELFILELOCATORS)
{
XLogRegisterData((char *) (&xl_relfilenodes),
MinSizeOfXactRelfilenodes);
XLogRegisterData((char *) (&xl_relfilelocators),
MinSizeOfXactRelfileLocators);
XLogRegisterData((char *) rels,
nrels * sizeof(RelFileNode));
nrels * sizeof(RelFileLocator));
}
if (xl_xinfo.xinfo & XACT_XINFO_HAS_DROPPED_STATS)
@ -6010,7 +6010,7 @@ xact_redo_commit(xl_xact_parsed_commit *parsed,
XLogFlush(lsn);
/* Make sure files supposed to be dropped are dropped */
DropRelationFiles(parsed->xnodes, parsed->nrels, true);
DropRelationFiles(parsed->xlocators, parsed->nrels, true);
}
if (parsed->nstats > 0)
@ -6121,7 +6121,7 @@ xact_redo_abort(xl_xact_parsed_abort *parsed, TransactionId xid,
*/
XLogFlush(lsn);
DropRelationFiles(parsed->xnodes, parsed->nrels, true);
DropRelationFiles(parsed->xlocators, parsed->nrels, true);
}
if (parsed->nstats > 0)

View File

@ -70,7 +70,7 @@ typedef struct
{
bool in_use; /* is this slot in use? */
uint8 flags; /* REGBUF_* flags */
RelFileNode rnode; /* identifies the relation and block */
RelFileLocator rlocator; /* identifies the relation and block */
ForkNumber forkno;
BlockNumber block;
Page page; /* page content */
@ -257,7 +257,7 @@ XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
regbuf = &registered_buffers[block_id];
BufferGetTag(buffer, &regbuf->rnode, &regbuf->forkno, &regbuf->block);
BufferGetTag(buffer, &regbuf->rlocator, &regbuf->forkno, &regbuf->block);
regbuf->page = BufferGetPage(buffer);
regbuf->flags = flags;
regbuf->rdata_tail = (XLogRecData *) &regbuf->rdata_head;
@ -278,7 +278,7 @@ XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
if (i == block_id || !regbuf_old->in_use)
continue;
Assert(!RelFileNodeEquals(regbuf_old->rnode, regbuf->rnode) ||
Assert(!RelFileLocatorEquals(regbuf_old->rlocator, regbuf->rlocator) ||
regbuf_old->forkno != regbuf->forkno ||
regbuf_old->block != regbuf->block);
}
@ -293,7 +293,7 @@ XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
* shared buffer pool (i.e. when you don't have a Buffer for it).
*/
void
XLogRegisterBlock(uint8 block_id, RelFileNode *rnode, ForkNumber forknum,
XLogRegisterBlock(uint8 block_id, RelFileLocator *rlocator, ForkNumber forknum,
BlockNumber blknum, Page page, uint8 flags)
{
registered_buffer *regbuf;
@ -308,7 +308,7 @@ XLogRegisterBlock(uint8 block_id, RelFileNode *rnode, ForkNumber forknum,
regbuf = &registered_buffers[block_id];
regbuf->rnode = *rnode;
regbuf->rlocator = *rlocator;
regbuf->forkno = forknum;
regbuf->block = blknum;
regbuf->page = page;
@ -331,7 +331,7 @@ XLogRegisterBlock(uint8 block_id, RelFileNode *rnode, ForkNumber forknum,
if (i == block_id || !regbuf_old->in_use)
continue;
Assert(!RelFileNodeEquals(regbuf_old->rnode, regbuf->rnode) ||
Assert(!RelFileLocatorEquals(regbuf_old->rlocator, regbuf->rlocator) ||
regbuf_old->forkno != regbuf->forkno ||
regbuf_old->block != regbuf->block);
}
@ -768,7 +768,7 @@ XLogRecordAssemble(RmgrId rmid, uint8 info,
rdt_datas_last = regbuf->rdata_tail;
}
if (prev_regbuf && RelFileNodeEquals(regbuf->rnode, prev_regbuf->rnode))
if (prev_regbuf && RelFileLocatorEquals(regbuf->rlocator, prev_regbuf->rlocator))
{
samerel = true;
bkpb.fork_flags |= BKPBLOCK_SAME_REL;
@ -793,8 +793,8 @@ XLogRecordAssemble(RmgrId rmid, uint8 info,
}
if (!samerel)
{
memcpy(scratch, &regbuf->rnode, sizeof(RelFileNode));
scratch += sizeof(RelFileNode);
memcpy(scratch, &regbuf->rlocator, sizeof(RelFileLocator));
scratch += sizeof(RelFileLocator);
}
memcpy(scratch, &regbuf->block, sizeof(BlockNumber));
scratch += sizeof(BlockNumber);
@ -1031,7 +1031,7 @@ XLogSaveBufferForHint(Buffer buffer, bool buffer_std)
int flags = 0;
PGAlignedBlock copied_buffer;
char *origdata = (char *) BufferGetBlock(buffer);
RelFileNode rnode;
RelFileLocator rlocator;
ForkNumber forkno;
BlockNumber blkno;
@ -1058,8 +1058,8 @@ XLogSaveBufferForHint(Buffer buffer, bool buffer_std)
if (buffer_std)
flags |= REGBUF_STANDARD;
BufferGetTag(buffer, &rnode, &forkno, &blkno);
XLogRegisterBlock(0, &rnode, forkno, blkno, copied_buffer.data, flags);
BufferGetTag(buffer, &rlocator, &forkno, &blkno);
XLogRegisterBlock(0, &rlocator, forkno, blkno, copied_buffer.data, flags);
recptr = XLogInsert(RM_XLOG_ID, XLOG_FPI_FOR_HINT);
}
@ -1080,7 +1080,7 @@ XLogSaveBufferForHint(Buffer buffer, bool buffer_std)
* the unused space to be left out from the WAL record, making it smaller.
*/
XLogRecPtr
log_newpage(RelFileNode *rnode, ForkNumber forkNum, BlockNumber blkno,
log_newpage(RelFileLocator *rlocator, ForkNumber forkNum, BlockNumber blkno,
Page page, bool page_std)
{
int flags;
@ -1091,7 +1091,7 @@ log_newpage(RelFileNode *rnode, ForkNumber forkNum, BlockNumber blkno,
flags |= REGBUF_STANDARD;
XLogBeginInsert();
XLogRegisterBlock(0, rnode, forkNum, blkno, page, flags);
XLogRegisterBlock(0, rlocator, forkNum, blkno, page, flags);
recptr = XLogInsert(RM_XLOG_ID, XLOG_FPI);
/*
@ -1112,7 +1112,7 @@ log_newpage(RelFileNode *rnode, ForkNumber forkNum, BlockNumber blkno,
* because we can write multiple pages in a single WAL record.
*/
void
log_newpages(RelFileNode *rnode, ForkNumber forkNum, int num_pages,
log_newpages(RelFileLocator *rlocator, ForkNumber forkNum, int num_pages,
BlockNumber *blknos, Page *pages, bool page_std)
{
int flags;
@ -1142,7 +1142,7 @@ log_newpages(RelFileNode *rnode, ForkNumber forkNum, int num_pages,
nbatch = 0;
while (nbatch < XLR_MAX_BLOCK_ID && i < num_pages)
{
XLogRegisterBlock(nbatch, rnode, forkNum, blknos[i], pages[i], flags);
XLogRegisterBlock(nbatch, rlocator, forkNum, blknos[i], pages[i], flags);
i++;
nbatch++;
}
@ -1177,16 +1177,16 @@ XLogRecPtr
log_newpage_buffer(Buffer buffer, bool page_std)
{
Page page = BufferGetPage(buffer);
RelFileNode rnode;
RelFileLocator rlocator;
ForkNumber forkNum;
BlockNumber blkno;
/* Shared buffers should be modified in a critical section. */
Assert(CritSectionCount > 0);
BufferGetTag(buffer, &rnode, &forkNum, &blkno);
BufferGetTag(buffer, &rlocator, &forkNum, &blkno);
return log_newpage(&rnode, forkNum, blkno, page, page_std);
return log_newpage(&rlocator, forkNum, blkno, page, page_std);
}
/*

View File

@ -138,7 +138,7 @@ struct XLogPrefetcher
dlist_head filter_queue;
/* Book-keeping to avoid repeat prefetches. */
RelFileNode recent_rnode[XLOGPREFETCHER_SEQ_WINDOW_SIZE];
RelFileLocator recent_rlocator[XLOGPREFETCHER_SEQ_WINDOW_SIZE];
BlockNumber recent_block[XLOGPREFETCHER_SEQ_WINDOW_SIZE];
int recent_idx;
@ -161,7 +161,7 @@ struct XLogPrefetcher
*/
typedef struct XLogPrefetcherFilter
{
RelFileNode rnode;
RelFileLocator rlocator;
XLogRecPtr filter_until_replayed;
BlockNumber filter_from_block;
dlist_node link;
@ -187,11 +187,11 @@ typedef struct XLogPrefetchStats
} XLogPrefetchStats;
static inline void XLogPrefetcherAddFilter(XLogPrefetcher *prefetcher,
RelFileNode rnode,
RelFileLocator rlocator,
BlockNumber blockno,
XLogRecPtr lsn);
static inline bool XLogPrefetcherIsFiltered(XLogPrefetcher *prefetcher,
RelFileNode rnode,
RelFileLocator rlocator,
BlockNumber blockno);
static inline void XLogPrefetcherCompleteFilters(XLogPrefetcher *prefetcher,
XLogRecPtr replaying_lsn);
@ -365,7 +365,7 @@ XLogPrefetcherAllocate(XLogReaderState *reader)
{
XLogPrefetcher *prefetcher;
static HASHCTL hash_table_ctl = {
.keysize = sizeof(RelFileNode),
.keysize = sizeof(RelFileLocator),
.entrysize = sizeof(XLogPrefetcherFilter)
};
@ -568,22 +568,23 @@ XLogPrefetcherNextBlock(uintptr_t pgsr_private, XLogRecPtr *lsn)
{
xl_dbase_create_file_copy_rec *xlrec =
(xl_dbase_create_file_copy_rec *) record->main_data;
RelFileNode rnode = {InvalidOid, xlrec->db_id, InvalidOid};
RelFileLocator rlocator =
{InvalidOid, xlrec->db_id, InvalidRelFileNumber};
/*
* Don't try to prefetch anything in this database until
* it has been created, or we might confuse the blocks of
* different generations, if a database OID or relfilenode
* is reused. It's also more efficient than discovering
* that relations don't exist on disk yet with ENOENT
* errors.
* different generations, if a database OID or
* relfilenumber is reused. It's also more efficient than
* discovering that relations don't exist on disk yet with
* ENOENT errors.
*/
XLogPrefetcherAddFilter(prefetcher, rnode, 0, record->lsn);
XLogPrefetcherAddFilter(prefetcher, rlocator, 0, record->lsn);
#ifdef XLOGPREFETCHER_DEBUG_LEVEL
elog(XLOGPREFETCHER_DEBUG_LEVEL,
"suppressing prefetch in database %u until %X/%X is replayed due to raw file copy",
rnode.dbNode,
rlocator.dbOid,
LSN_FORMAT_ARGS(record->lsn));
#endif
}
@ -601,19 +602,19 @@ XLogPrefetcherNextBlock(uintptr_t pgsr_private, XLogRecPtr *lsn)
* Don't prefetch anything for this whole relation
* until it has been created. Otherwise we might
* confuse the blocks of different generations, if a
* relfilenode is reused. This also avoids the need
* relfilenumber is reused. This also avoids the need
* to discover the problem via extra syscalls that
* report ENOENT.
*/
XLogPrefetcherAddFilter(prefetcher, xlrec->rnode, 0,
XLogPrefetcherAddFilter(prefetcher, xlrec->rlocator, 0,
record->lsn);
#ifdef XLOGPREFETCHER_DEBUG_LEVEL
elog(XLOGPREFETCHER_DEBUG_LEVEL,
"suppressing prefetch in relation %u/%u/%u until %X/%X is replayed, which creates the relation",
xlrec->rnode.spcNode,
xlrec->rnode.dbNode,
xlrec->rnode.relNode,
xlrec->rlocator.spcOid,
xlrec->rlocator.dbOid,
xlrec->rlocator.relNumber,
LSN_FORMAT_ARGS(record->lsn));
#endif
}
@ -627,16 +628,16 @@ XLogPrefetcherNextBlock(uintptr_t pgsr_private, XLogRecPtr *lsn)
* Don't consider prefetching anything in the truncated
* range until the truncation has been performed.
*/
XLogPrefetcherAddFilter(prefetcher, xlrec->rnode,
XLogPrefetcherAddFilter(prefetcher, xlrec->rlocator,
xlrec->blkno,
record->lsn);
#ifdef XLOGPREFETCHER_DEBUG_LEVEL
elog(XLOGPREFETCHER_DEBUG_LEVEL,
"suppressing prefetch in relation %u/%u/%u from block %u until %X/%X is replayed, which truncates the relation",
xlrec->rnode.spcNode,
xlrec->rnode.dbNode,
xlrec->rnode.relNode,
xlrec->rlocator.spcOid,
xlrec->rlocator.dbOid,
xlrec->rlocator.relNumber,
xlrec->blkno,
LSN_FORMAT_ARGS(record->lsn));
#endif
@ -688,7 +689,7 @@ XLogPrefetcherNextBlock(uintptr_t pgsr_private, XLogRecPtr *lsn)
}
/* Should we skip prefetching this block due to a filter? */
if (XLogPrefetcherIsFiltered(prefetcher, block->rnode, block->blkno))
if (XLogPrefetcherIsFiltered(prefetcher, block->rlocator, block->blkno))
{
XLogPrefetchIncrement(&SharedStats->skip_new);
return LRQ_NEXT_NO_IO;
@ -698,7 +699,7 @@ XLogPrefetcherNextBlock(uintptr_t pgsr_private, XLogRecPtr *lsn)
for (int i = 0; i < XLOGPREFETCHER_SEQ_WINDOW_SIZE; ++i)
{
if (block->blkno == prefetcher->recent_block[i] &&
RelFileNodeEquals(block->rnode, prefetcher->recent_rnode[i]))
RelFileLocatorEquals(block->rlocator, prefetcher->recent_rlocator[i]))
{
/*
* XXX If we also remembered where it was, we could set
@ -709,7 +710,7 @@ XLogPrefetcherNextBlock(uintptr_t pgsr_private, XLogRecPtr *lsn)
return LRQ_NEXT_NO_IO;
}
}
prefetcher->recent_rnode[prefetcher->recent_idx] = block->rnode;
prefetcher->recent_rlocator[prefetcher->recent_idx] = block->rlocator;
prefetcher->recent_block[prefetcher->recent_idx] = block->blkno;
prefetcher->recent_idx =
(prefetcher->recent_idx + 1) % XLOGPREFETCHER_SEQ_WINDOW_SIZE;
@ -719,7 +720,7 @@ XLogPrefetcherNextBlock(uintptr_t pgsr_private, XLogRecPtr *lsn)
* same relation (with some scheme to handle invalidations
* safely), but for now we'll call smgropen() every time.
*/
reln = smgropen(block->rnode, InvalidBackendId);
reln = smgropen(block->rlocator, InvalidBackendId);
/*
* If the relation file doesn't exist on disk, for example because
@ -733,12 +734,12 @@ XLogPrefetcherNextBlock(uintptr_t pgsr_private, XLogRecPtr *lsn)
#ifdef XLOGPREFETCHER_DEBUG_LEVEL
elog(XLOGPREFETCHER_DEBUG_LEVEL,
"suppressing all prefetch in relation %u/%u/%u until %X/%X is replayed, because the relation does not exist on disk",
reln->smgr_rnode.node.spcNode,
reln->smgr_rnode.node.dbNode,
reln->smgr_rnode.node.relNode,
reln->smgr_rlocator.locator.spcOid,
reln->smgr_rlocator.locator.dbOid,
reln->smgr_rlocator.locator.relNumber,
LSN_FORMAT_ARGS(record->lsn));
#endif
XLogPrefetcherAddFilter(prefetcher, block->rnode, 0,
XLogPrefetcherAddFilter(prefetcher, block->rlocator, 0,
record->lsn);
XLogPrefetchIncrement(&SharedStats->skip_new);
return LRQ_NEXT_NO_IO;
@ -754,13 +755,13 @@ XLogPrefetcherNextBlock(uintptr_t pgsr_private, XLogRecPtr *lsn)
#ifdef XLOGPREFETCHER_DEBUG_LEVEL
elog(XLOGPREFETCHER_DEBUG_LEVEL,
"suppressing prefetch in relation %u/%u/%u from block %u until %X/%X is replayed, because the relation is too small",
reln->smgr_rnode.node.spcNode,
reln->smgr_rnode.node.dbNode,
reln->smgr_rnode.node.relNode,
reln->smgr_rlocator.locator.spcOid,
reln->smgr_rlocator.locator.dbOid,
reln->smgr_rlocator.locator.relNumber,
block->blkno,
LSN_FORMAT_ARGS(record->lsn));
#endif
XLogPrefetcherAddFilter(prefetcher, block->rnode, block->blkno,
XLogPrefetcherAddFilter(prefetcher, block->rlocator, block->blkno,
record->lsn);
XLogPrefetchIncrement(&SharedStats->skip_new);
return LRQ_NEXT_NO_IO;
@ -793,9 +794,9 @@ XLogPrefetcherNextBlock(uintptr_t pgsr_private, XLogRecPtr *lsn)
*/
elog(ERROR,
"could not prefetch relation %u/%u/%u block %u",
reln->smgr_rnode.node.spcNode,
reln->smgr_rnode.node.dbNode,
reln->smgr_rnode.node.relNode,
reln->smgr_rlocator.locator.spcOid,
reln->smgr_rlocator.locator.dbOid,
reln->smgr_rlocator.locator.relNumber,
block->blkno);
}
}
@ -852,17 +853,17 @@ pg_stat_get_recovery_prefetch(PG_FUNCTION_ARGS)
}
/*
* Don't prefetch any blocks >= 'blockno' from a given 'rnode', until 'lsn'
* Don't prefetch any blocks >= 'blockno' from a given 'rlocator', until 'lsn'
* has been replayed.
*/
static inline void
XLogPrefetcherAddFilter(XLogPrefetcher *prefetcher, RelFileNode rnode,
XLogPrefetcherAddFilter(XLogPrefetcher *prefetcher, RelFileLocator rlocator,
BlockNumber blockno, XLogRecPtr lsn)
{
XLogPrefetcherFilter *filter;
bool found;
filter = hash_search(prefetcher->filter_table, &rnode, HASH_ENTER, &found);
filter = hash_search(prefetcher->filter_table, &rlocator, HASH_ENTER, &found);
if (!found)
{
/*
@ -875,9 +876,10 @@ XLogPrefetcherAddFilter(XLogPrefetcher *prefetcher, RelFileNode rnode,
else
{
/*
* We were already filtering this rnode. Extend the filter's lifetime
* to cover this WAL record, but leave the lower of the block numbers
* there because we don't want to have to track individual blocks.
* We were already filtering this rlocator. Extend the filter's
* lifetime to cover this WAL record, but leave the lower of the block
* numbers there because we don't want to have to track individual
* blocks.
*/
filter->filter_until_replayed = lsn;
dlist_delete(&filter->link);
@ -890,7 +892,7 @@ XLogPrefetcherAddFilter(XLogPrefetcher *prefetcher, RelFileNode rnode,
* Have we replayed any records that caused us to begin filtering a block
* range? That means that relations should have been created, extended or
* dropped as required, so we can stop filtering out accesses to a given
* relfilenode.
* relfilenumber.
*/
static inline void
XLogPrefetcherCompleteFilters(XLogPrefetcher *prefetcher, XLogRecPtr replaying_lsn)
@ -913,7 +915,7 @@ XLogPrefetcherCompleteFilters(XLogPrefetcher *prefetcher, XLogRecPtr replaying_l
* Check if a given block should be skipped due to a filter.
*/
static inline bool
XLogPrefetcherIsFiltered(XLogPrefetcher *prefetcher, RelFileNode rnode,
XLogPrefetcherIsFiltered(XLogPrefetcher *prefetcher, RelFileLocator rlocator,
BlockNumber blockno)
{
/*
@ -925,13 +927,13 @@ XLogPrefetcherIsFiltered(XLogPrefetcher *prefetcher, RelFileNode rnode,
XLogPrefetcherFilter *filter;
/* See if the block range is filtered. */
filter = hash_search(prefetcher->filter_table, &rnode, HASH_FIND, NULL);
filter = hash_search(prefetcher->filter_table, &rlocator, HASH_FIND, NULL);
if (filter && filter->filter_from_block <= blockno)
{
#ifdef XLOGPREFETCHER_DEBUG_LEVEL
elog(XLOGPREFETCHER_DEBUG_LEVEL,
"prefetch of %u/%u/%u block %u suppressed; filtering until LSN %X/%X is replayed (blocks >= %u filtered)",
rnode.spcNode, rnode.dbNode, rnode.relNode, blockno,
rlocator.spcOid, rlocator.dbOid, rlocator.relNumber, blockno,
LSN_FORMAT_ARGS(filter->filter_until_replayed),
filter->filter_from_block);
#endif
@ -939,15 +941,15 @@ XLogPrefetcherIsFiltered(XLogPrefetcher *prefetcher, RelFileNode rnode,
}
/* See if the whole database is filtered. */
rnode.relNode = InvalidOid;
rnode.spcNode = InvalidOid;
filter = hash_search(prefetcher->filter_table, &rnode, HASH_FIND, NULL);
rlocator.relNumber = InvalidRelFileNumber;
rlocator.spcOid = InvalidOid;
filter = hash_search(prefetcher->filter_table, &rlocator, HASH_FIND, NULL);
if (filter)
{
#ifdef XLOGPREFETCHER_DEBUG_LEVEL
elog(XLOGPREFETCHER_DEBUG_LEVEL,
"prefetch of %u/%u/%u block %u suppressed; filtering until LSN %X/%X is replayed (whole database)",
rnode.spcNode, rnode.dbNode, rnode.relNode, blockno,
rlocator.spcOid, rlocator.dbOid, rlocator.relNumber, blockno,
LSN_FORMAT_ARGS(filter->filter_until_replayed));
#endif
return true;

View File

@ -1638,7 +1638,7 @@ DecodeXLogRecord(XLogReaderState *state,
char *out;
uint32 remaining;
uint32 datatotal;
RelFileNode *rnode = NULL;
RelFileLocator *rlocator = NULL;
uint8 block_id;
decoded->header = *record;
@ -1823,12 +1823,12 @@ DecodeXLogRecord(XLogReaderState *state,
}
if (!(fork_flags & BKPBLOCK_SAME_REL))
{
COPY_HEADER_FIELD(&blk->rnode, sizeof(RelFileNode));
rnode = &blk->rnode;
COPY_HEADER_FIELD(&blk->rlocator, sizeof(RelFileLocator));
rlocator = &blk->rlocator;
}
else
{
if (rnode == NULL)
if (rlocator == NULL)
{
report_invalid_record(state,
"BKPBLOCK_SAME_REL set but no previous rel at %X/%X",
@ -1836,7 +1836,7 @@ DecodeXLogRecord(XLogReaderState *state,
goto err;
}
blk->rnode = *rnode;
blk->rlocator = *rlocator;
}
COPY_HEADER_FIELD(&blk->blkno, sizeof(BlockNumber));
}
@ -1926,10 +1926,11 @@ err:
*/
void
XLogRecGetBlockTag(XLogReaderState *record, uint8 block_id,
RelFileNode *rnode, ForkNumber *forknum, BlockNumber *blknum)
RelFileLocator *rlocator, ForkNumber *forknum,
BlockNumber *blknum)
{
if (!XLogRecGetBlockTagExtended(record, block_id, rnode, forknum, blknum,
NULL))
if (!XLogRecGetBlockTagExtended(record, block_id, rlocator, forknum,
blknum, NULL))
{
#ifndef FRONTEND
elog(ERROR, "failed to locate backup block with ID %d in WAL record",
@ -1945,13 +1946,13 @@ XLogRecGetBlockTag(XLogReaderState *record, uint8 block_id,
* Returns information about the block that a block reference refers to,
* optionally including the buffer that the block may already be in.
*
* If the WAL record contains a block reference with the given ID, *rnode,
* If the WAL record contains a block reference with the given ID, *rlocator,
* *forknum, *blknum and *prefetch_buffer are filled in (if not NULL), and
* returns true. Otherwise returns false.
*/
bool
XLogRecGetBlockTagExtended(XLogReaderState *record, uint8 block_id,
RelFileNode *rnode, ForkNumber *forknum,
RelFileLocator *rlocator, ForkNumber *forknum,
BlockNumber *blknum,
Buffer *prefetch_buffer)
{
@ -1961,8 +1962,8 @@ XLogRecGetBlockTagExtended(XLogReaderState *record, uint8 block_id,
return false;
bkpb = &record->record->blocks[block_id];
if (rnode)
*rnode = bkpb->rnode;
if (rlocator)
*rlocator = bkpb->rlocator;
if (forknum)
*forknum = bkpb->forknum;
if (blknum)

View File

@ -2166,24 +2166,26 @@ xlog_block_info(StringInfo buf, XLogReaderState *record)
/* decode block references */
for (block_id = 0; block_id <= XLogRecMaxBlockId(record); block_id++)
{
RelFileNode rnode;
RelFileLocator rlocator;
ForkNumber forknum;
BlockNumber blk;
if (!XLogRecGetBlockTagExtended(record, block_id,
&rnode, &forknum, &blk, NULL))
&rlocator, &forknum, &blk, NULL))
continue;
if (forknum != MAIN_FORKNUM)
appendStringInfo(buf, "; blkref #%d: rel %u/%u/%u, fork %u, blk %u",
block_id,
rnode.spcNode, rnode.dbNode, rnode.relNode,
rlocator.spcOid, rlocator.dbOid,
rlocator.relNumber,
forknum,
blk);
else
appendStringInfo(buf, "; blkref #%d: rel %u/%u/%u, blk %u",
block_id,
rnode.spcNode, rnode.dbNode, rnode.relNode,
rlocator.spcOid, rlocator.dbOid,
rlocator.relNumber,
blk);
if (XLogRecHasBlockImage(record, block_id))
appendStringInfoString(buf, " FPW");
@ -2285,7 +2287,7 @@ static void
verifyBackupPageConsistency(XLogReaderState *record)
{
RmgrData rmgr = GetRmgr(XLogRecGetRmid(record));
RelFileNode rnode;
RelFileLocator rlocator;
ForkNumber forknum;
BlockNumber blkno;
int block_id;
@ -2302,7 +2304,7 @@ verifyBackupPageConsistency(XLogReaderState *record)
Page page;
if (!XLogRecGetBlockTagExtended(record, block_id,
&rnode, &forknum, &blkno, NULL))
&rlocator, &forknum, &blkno, NULL))
{
/*
* WAL record doesn't contain a block reference with the given id.
@ -2327,7 +2329,7 @@ verifyBackupPageConsistency(XLogReaderState *record)
* Read the contents from the current buffer and store it in a
* temporary page.
*/
buf = XLogReadBufferExtended(rnode, forknum, blkno,
buf = XLogReadBufferExtended(rlocator, forknum, blkno,
RBM_NORMAL_NO_LOG,
InvalidBuffer);
if (!BufferIsValid(buf))
@ -2377,7 +2379,7 @@ verifyBackupPageConsistency(XLogReaderState *record)
{
elog(FATAL,
"inconsistent page found, rel %u/%u/%u, forknum %u, blkno %u",
rnode.spcNode, rnode.dbNode, rnode.relNode,
rlocator.spcOid, rlocator.dbOid, rlocator.relNumber,
forknum, blkno);
}
}

View File

@ -67,7 +67,7 @@ HotStandbyState standbyState = STANDBY_DISABLED;
*/
typedef struct xl_invalid_page_key
{
RelFileNode node; /* the relation */
RelFileLocator locator; /* the relation */
ForkNumber forkno; /* the fork number */
BlockNumber blkno; /* the page */
} xl_invalid_page_key;
@ -86,10 +86,10 @@ static int read_local_xlog_page_guts(XLogReaderState *state, XLogRecPtr targetPa
/* Report a reference to an invalid page */
static void
report_invalid_page(int elevel, RelFileNode node, ForkNumber forkno,
report_invalid_page(int elevel, RelFileLocator locator, ForkNumber forkno,
BlockNumber blkno, bool present)
{
char *path = relpathperm(node, forkno);
char *path = relpathperm(locator, forkno);
if (present)
elog(elevel, "page %u of relation %s is uninitialized",
@ -102,7 +102,7 @@ report_invalid_page(int elevel, RelFileNode node, ForkNumber forkno,
/* Log a reference to an invalid page */
static void
log_invalid_page(RelFileNode node, ForkNumber forkno, BlockNumber blkno,
log_invalid_page(RelFileLocator locator, ForkNumber forkno, BlockNumber blkno,
bool present)
{
xl_invalid_page_key key;
@ -119,7 +119,7 @@ log_invalid_page(RelFileNode node, ForkNumber forkno, BlockNumber blkno,
*/
if (reachedConsistency)
{
report_invalid_page(WARNING, node, forkno, blkno, present);
report_invalid_page(WARNING, locator, forkno, blkno, present);
elog(ignore_invalid_pages ? WARNING : PANIC,
"WAL contains references to invalid pages");
}
@ -130,7 +130,7 @@ log_invalid_page(RelFileNode node, ForkNumber forkno, BlockNumber blkno,
* something about the XLOG record that generated the reference).
*/
if (message_level_is_interesting(DEBUG1))
report_invalid_page(DEBUG1, node, forkno, blkno, present);
report_invalid_page(DEBUG1, locator, forkno, blkno, present);
if (invalid_page_tab == NULL)
{
@ -147,7 +147,7 @@ log_invalid_page(RelFileNode node, ForkNumber forkno, BlockNumber blkno,
}
/* we currently assume xl_invalid_page_key contains no padding */
key.node = node;
key.locator = locator;
key.forkno = forkno;
key.blkno = blkno;
hentry = (xl_invalid_page *)
@ -166,7 +166,8 @@ log_invalid_page(RelFileNode node, ForkNumber forkno, BlockNumber blkno,
/* Forget any invalid pages >= minblkno, because they've been dropped */
static void
forget_invalid_pages(RelFileNode node, ForkNumber forkno, BlockNumber minblkno)
forget_invalid_pages(RelFileLocator locator, ForkNumber forkno,
BlockNumber minblkno)
{
HASH_SEQ_STATUS status;
xl_invalid_page *hentry;
@ -178,13 +179,13 @@ forget_invalid_pages(RelFileNode node, ForkNumber forkno, BlockNumber minblkno)
while ((hentry = (xl_invalid_page *) hash_seq_search(&status)) != NULL)
{
if (RelFileNodeEquals(hentry->key.node, node) &&
if (RelFileLocatorEquals(hentry->key.locator, locator) &&
hentry->key.forkno == forkno &&
hentry->key.blkno >= minblkno)
{
if (message_level_is_interesting(DEBUG2))
{
char *path = relpathperm(hentry->key.node, forkno);
char *path = relpathperm(hentry->key.locator, forkno);
elog(DEBUG2, "page %u of relation %s has been dropped",
hentry->key.blkno, path);
@ -213,11 +214,11 @@ forget_invalid_pages_db(Oid dbid)
while ((hentry = (xl_invalid_page *) hash_seq_search(&status)) != NULL)
{
if (hentry->key.node.dbNode == dbid)
if (hentry->key.locator.dbOid == dbid)
{
if (message_level_is_interesting(DEBUG2))
{
char *path = relpathperm(hentry->key.node, hentry->key.forkno);
char *path = relpathperm(hentry->key.locator, hentry->key.forkno);
elog(DEBUG2, "page %u of relation %s has been dropped",
hentry->key.blkno, path);
@ -261,7 +262,7 @@ XLogCheckInvalidPages(void)
*/
while ((hentry = (xl_invalid_page *) hash_seq_search(&status)) != NULL)
{
report_invalid_page(WARNING, hentry->key.node, hentry->key.forkno,
report_invalid_page(WARNING, hentry->key.locator, hentry->key.forkno,
hentry->key.blkno, hentry->present);
foundone = true;
}
@ -356,7 +357,7 @@ XLogReadBufferForRedoExtended(XLogReaderState *record,
Buffer *buf)
{
XLogRecPtr lsn = record->EndRecPtr;
RelFileNode rnode;
RelFileLocator rlocator;
ForkNumber forknum;
BlockNumber blkno;
Buffer prefetch_buffer;
@ -364,7 +365,7 @@ XLogReadBufferForRedoExtended(XLogReaderState *record,
bool zeromode;
bool willinit;
if (!XLogRecGetBlockTagExtended(record, block_id, &rnode, &forknum, &blkno,
if (!XLogRecGetBlockTagExtended(record, block_id, &rlocator, &forknum, &blkno,
&prefetch_buffer))
{
/* Caller specified a bogus block_id */
@ -387,7 +388,7 @@ XLogReadBufferForRedoExtended(XLogReaderState *record,
if (XLogRecBlockImageApply(record, block_id))
{
Assert(XLogRecHasBlockImage(record, block_id));
*buf = XLogReadBufferExtended(rnode, forknum, blkno,
*buf = XLogReadBufferExtended(rlocator, forknum, blkno,
get_cleanup_lock ? RBM_ZERO_AND_CLEANUP_LOCK : RBM_ZERO_AND_LOCK,
prefetch_buffer);
page = BufferGetPage(*buf);
@ -418,7 +419,7 @@ XLogReadBufferForRedoExtended(XLogReaderState *record,
}
else
{
*buf = XLogReadBufferExtended(rnode, forknum, blkno, mode, prefetch_buffer);
*buf = XLogReadBufferExtended(rlocator, forknum, blkno, mode, prefetch_buffer);
if (BufferIsValid(*buf))
{
if (mode != RBM_ZERO_AND_LOCK && mode != RBM_ZERO_AND_CLEANUP_LOCK)
@ -468,7 +469,7 @@ XLogReadBufferForRedoExtended(XLogReaderState *record,
* they will be invisible to tools that need to know which pages are modified.
*/
Buffer
XLogReadBufferExtended(RelFileNode rnode, ForkNumber forknum,
XLogReadBufferExtended(RelFileLocator rlocator, ForkNumber forknum,
BlockNumber blkno, ReadBufferMode mode,
Buffer recent_buffer)
{
@ -481,14 +482,14 @@ XLogReadBufferExtended(RelFileNode rnode, ForkNumber forknum,
/* Do we have a clue where the buffer might be already? */
if (BufferIsValid(recent_buffer) &&
mode == RBM_NORMAL &&
ReadRecentBuffer(rnode, forknum, blkno, recent_buffer))
ReadRecentBuffer(rlocator, forknum, blkno, recent_buffer))
{
buffer = recent_buffer;
goto recent_buffer_fast_path;
}
/* Open the relation at smgr level */
smgr = smgropen(rnode, InvalidBackendId);
smgr = smgropen(rlocator, InvalidBackendId);
/*
* Create the target file if it doesn't already exist. This lets us cope
@ -505,7 +506,7 @@ XLogReadBufferExtended(RelFileNode rnode, ForkNumber forknum,
if (blkno < lastblock)
{
/* page exists in file */
buffer = ReadBufferWithoutRelcache(rnode, forknum, blkno,
buffer = ReadBufferWithoutRelcache(rlocator, forknum, blkno,
mode, NULL, true);
}
else
@ -513,7 +514,7 @@ XLogReadBufferExtended(RelFileNode rnode, ForkNumber forknum,
/* hm, page doesn't exist in file */
if (mode == RBM_NORMAL)
{
log_invalid_page(rnode, forknum, blkno, false);
log_invalid_page(rlocator, forknum, blkno, false);
return InvalidBuffer;
}
if (mode == RBM_NORMAL_NO_LOG)
@ -530,7 +531,7 @@ XLogReadBufferExtended(RelFileNode rnode, ForkNumber forknum,
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
ReleaseBuffer(buffer);
}
buffer = ReadBufferWithoutRelcache(rnode, forknum,
buffer = ReadBufferWithoutRelcache(rlocator, forknum,
P_NEW, mode, NULL, true);
}
while (BufferGetBlockNumber(buffer) < blkno);
@ -540,7 +541,7 @@ XLogReadBufferExtended(RelFileNode rnode, ForkNumber forknum,
if (mode == RBM_ZERO_AND_LOCK || mode == RBM_ZERO_AND_CLEANUP_LOCK)
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
ReleaseBuffer(buffer);
buffer = ReadBufferWithoutRelcache(rnode, forknum, blkno,
buffer = ReadBufferWithoutRelcache(rlocator, forknum, blkno,
mode, NULL, true);
}
}
@ -559,7 +560,7 @@ recent_buffer_fast_path:
if (PageIsNew(page))
{
ReleaseBuffer(buffer);
log_invalid_page(rnode, forknum, blkno, true);
log_invalid_page(rlocator, forknum, blkno, true);
return InvalidBuffer;
}
}
@ -594,7 +595,7 @@ typedef FakeRelCacheEntryData *FakeRelCacheEntry;
* Caller must free the returned entry with FreeFakeRelcacheEntry().
*/
Relation
CreateFakeRelcacheEntry(RelFileNode rnode)
CreateFakeRelcacheEntry(RelFileLocator rlocator)
{
FakeRelCacheEntry fakeentry;
Relation rel;
@ -604,7 +605,7 @@ CreateFakeRelcacheEntry(RelFileNode rnode)
rel = (Relation) fakeentry;
rel->rd_rel = &fakeentry->pgc;
rel->rd_node = rnode;
rel->rd_locator = rlocator;
/*
* We will never be working with temp rels during recovery or while
@ -615,18 +616,18 @@ CreateFakeRelcacheEntry(RelFileNode rnode)
/* It must be a permanent table here */
rel->rd_rel->relpersistence = RELPERSISTENCE_PERMANENT;
/* We don't know the name of the relation; use relfilenode instead */
sprintf(RelationGetRelationName(rel), "%u", rnode.relNode);
/* We don't know the name of the relation; use relfilenumber instead */
sprintf(RelationGetRelationName(rel), "%u", rlocator.relNumber);
/*
* We set up the lockRelId in case anything tries to lock the dummy
* relation. Note that this is fairly bogus since relNode may be
* relation. Note that this is fairly bogus since relNumber may be
* different from the relation's OID. It shouldn't really matter though.
* In recovery, we are running by ourselves and can't have any lock
* conflicts. While syncing, we already hold AccessExclusiveLock.
*/
rel->rd_lockInfo.lockRelId.dbId = rnode.dbNode;
rel->rd_lockInfo.lockRelId.relId = rnode.relNode;
rel->rd_lockInfo.lockRelId.dbId = rlocator.dbOid;
rel->rd_lockInfo.lockRelId.relId = rlocator.relNumber;
rel->rd_smgr = NULL;
@ -652,9 +653,9 @@ FreeFakeRelcacheEntry(Relation fakerel)
* any open "invalid-page" records for the relation.
*/
void
XLogDropRelation(RelFileNode rnode, ForkNumber forknum)
XLogDropRelation(RelFileLocator rlocator, ForkNumber forknum)
{
forget_invalid_pages(rnode, forknum, 0);
forget_invalid_pages(rlocator, forknum, 0);
}
/*
@ -682,10 +683,10 @@ XLogDropDatabase(Oid dbid)
* We need to clean up any open "invalid-page" records for the dropped pages.
*/
void
XLogTruncateRelation(RelFileNode rnode, ForkNumber forkNum,
XLogTruncateRelation(RelFileLocator rlocator, ForkNumber forkNum,
BlockNumber nblocks)
{
forget_invalid_pages(rnode, forkNum, nblocks);
forget_invalid_pages(rlocator, forkNum, nblocks);
}
/*

View File

@ -287,9 +287,9 @@ Boot_DeclareIndexStmt:
stmt->excludeOpNames = NIL;
stmt->idxcomment = NULL;
stmt->indexOid = InvalidOid;
stmt->oldNode = InvalidOid;
stmt->oldNumber = InvalidRelFileNumber;
stmt->oldCreateSubid = InvalidSubTransactionId;
stmt->oldFirstRelfilenodeSubid = InvalidSubTransactionId;
stmt->oldFirstRelfilelocatorSubid = InvalidSubTransactionId;
stmt->unique = false;
stmt->primary = false;
stmt->isconstraint = false;
@ -339,9 +339,9 @@ Boot_DeclareUniqueIndexStmt:
stmt->excludeOpNames = NIL;
stmt->idxcomment = NULL;
stmt->indexOid = InvalidOid;
stmt->oldNode = InvalidOid;
stmt->oldNumber = InvalidRelFileNumber;
stmt->oldCreateSubid = InvalidSubTransactionId;
stmt->oldFirstRelfilenodeSubid = InvalidSubTransactionId;
stmt->oldFirstRelfilelocatorSubid = InvalidSubTransactionId;
stmt->unique = true;
stmt->primary = false;
stmt->isconstraint = false;

View File

@ -481,14 +481,14 @@ GetNewOidWithIndex(Relation relation, Oid indexId, AttrNumber oidcolumn)
}
/*
* GetNewRelFileNode
* Generate a new relfilenode number that is unique within the
* GetNewRelFileNumber
* Generate a new relfilenumber that is unique within the
* database of the given tablespace.
*
* If the relfilenode will also be used as the relation's OID, pass the
* If the relfilenumber will also be used as the relation's OID, pass the
* opened pg_class catalog, and this routine will guarantee that the result
* is also an unused OID within pg_class. If the result is to be used only
* as a relfilenode for an existing relation, pass NULL for pg_class.
* as a relfilenumber for an existing relation, pass NULL for pg_class.
*
* As with GetNewOidWithIndex(), there is some theoretical risk of a race
* condition, but it doesn't seem worth worrying about.
@ -496,17 +496,17 @@ GetNewOidWithIndex(Relation relation, Oid indexId, AttrNumber oidcolumn)
* Note: we don't support using this in bootstrap mode. All relations
* created by bootstrap have preassigned OIDs, so there's no need.
*/
Oid
GetNewRelFileNode(Oid reltablespace, Relation pg_class, char relpersistence)
RelFileNumber
GetNewRelFileNumber(Oid reltablespace, Relation pg_class, char relpersistence)
{
RelFileNodeBackend rnode;
RelFileLocatorBackend rlocator;
char *rpath;
bool collides;
BackendId backend;
/*
* If we ever get here during pg_upgrade, there's something wrong; all
* relfilenode assignments during a binary-upgrade run should be
* relfilenumber assignments during a binary-upgrade run should be
* determined by commands in the dump script.
*/
Assert(!IsBinaryUpgrade);
@ -522,19 +522,21 @@ GetNewRelFileNode(Oid reltablespace, Relation pg_class, char relpersistence)
break;
default:
elog(ERROR, "invalid relpersistence: %c", relpersistence);
return InvalidOid; /* placate compiler */
return InvalidRelFileNumber; /* placate compiler */
}
/* This logic should match RelationInitPhysicalAddr */
rnode.node.spcNode = reltablespace ? reltablespace : MyDatabaseTableSpace;
rnode.node.dbNode = (rnode.node.spcNode == GLOBALTABLESPACE_OID) ? InvalidOid : MyDatabaseId;
rlocator.locator.spcOid = reltablespace ? reltablespace : MyDatabaseTableSpace;
rlocator.locator.dbOid =
(rlocator.locator.spcOid == GLOBALTABLESPACE_OID) ?
InvalidOid : MyDatabaseId;
/*
* The relpath will vary based on the backend ID, so we must initialize
* that properly here to make sure that any collisions based on filename
* are properly detected.
*/
rnode.backend = backend;
rlocator.backend = backend;
do
{
@ -542,13 +544,13 @@ GetNewRelFileNode(Oid reltablespace, Relation pg_class, char relpersistence)
/* Generate the OID */
if (pg_class)
rnode.node.relNode = GetNewOidWithIndex(pg_class, ClassOidIndexId,
Anum_pg_class_oid);
rlocator.locator.relNumber = GetNewOidWithIndex(pg_class, ClassOidIndexId,
Anum_pg_class_oid);
else
rnode.node.relNode = GetNewObjectId();
rlocator.locator.relNumber = GetNewObjectId();
/* Check for existing file of same name */
rpath = relpath(rnode, MAIN_FORKNUM);
rpath = relpath(rlocator, MAIN_FORKNUM);
if (access(rpath, F_OK) == 0)
{
@ -570,7 +572,7 @@ GetNewRelFileNode(Oid reltablespace, Relation pg_class, char relpersistence)
pfree(rpath);
} while (collides);
return rnode.node.relNode;
return rlocator.locator.relNumber;
}
/*

View File

@ -77,9 +77,9 @@
/* Potentially set by pg_upgrade_support functions */
Oid binary_upgrade_next_heap_pg_class_oid = InvalidOid;
Oid binary_upgrade_next_heap_pg_class_relfilenode = InvalidOid;
Oid binary_upgrade_next_toast_pg_class_oid = InvalidOid;
Oid binary_upgrade_next_toast_pg_class_relfilenode = InvalidOid;
RelFileNumber binary_upgrade_next_heap_pg_class_relfilenumber = InvalidRelFileNumber;
RelFileNumber binary_upgrade_next_toast_pg_class_relfilenumber = InvalidRelFileNumber;
static void AddNewRelationTuple(Relation pg_class_desc,
Relation new_rel_desc,
@ -273,7 +273,7 @@ SystemAttributeByName(const char *attname)
* heap_create - Create an uncataloged heap relation
*
* Note API change: the caller must now always provide the OID
* to use for the relation. The relfilenode may be (and in
* to use for the relation. The relfilenumber may be (and in
* the simplest cases is) left unspecified.
*
* create_storage indicates whether or not to create the storage.
@ -289,7 +289,7 @@ heap_create(const char *relname,
Oid relnamespace,
Oid reltablespace,
Oid relid,
Oid relfilenode,
RelFileNumber relfilenumber,
Oid accessmtd,
TupleDesc tupDesc,
char relkind,
@ -341,11 +341,11 @@ heap_create(const char *relname,
else
{
/*
* If relfilenode is unspecified by the caller then create storage
* If relfilenumber is unspecified by the caller then create storage
* with oid same as relid.
*/
if (!OidIsValid(relfilenode))
relfilenode = relid;
if (!RelFileNumberIsValid(relfilenumber))
relfilenumber = relid;
}
/*
@ -368,7 +368,7 @@ heap_create(const char *relname,
tupDesc,
relid,
accessmtd,
relfilenode,
relfilenumber,
reltablespace,
shared_relation,
mapped_relation,
@ -385,11 +385,11 @@ heap_create(const char *relname,
if (create_storage)
{
if (RELKIND_HAS_TABLE_AM(rel->rd_rel->relkind))
table_relation_set_new_filenode(rel, &rel->rd_node,
relpersistence,
relfrozenxid, relminmxid);
table_relation_set_new_filelocator(rel, &rel->rd_locator,
relpersistence,
relfrozenxid, relminmxid);
else if (RELKIND_HAS_STORAGE(rel->rd_rel->relkind))
RelationCreateStorage(rel->rd_node, relpersistence, true);
RelationCreateStorage(rel->rd_locator, relpersistence, true);
else
Assert(false);
}
@ -1069,7 +1069,7 @@ AddNewRelationType(const char *typeName,
* relkind: relkind for new rel
* relpersistence: rel's persistence status (permanent, temp, or unlogged)
* shared_relation: true if it's to be a shared relation
* mapped_relation: true if the relation will use the relfilenode map
* mapped_relation: true if the relation will use the relfilenumber map
* oncommit: ON COMMIT marking (only relevant if it's a temp table)
* reloptions: reloptions in Datum form, or (Datum) 0 if none
* use_user_acl: true if should look for user-defined default permissions;
@ -1115,7 +1115,7 @@ heap_create_with_catalog(const char *relname,
Oid new_type_oid;
/* By default set to InvalidOid unless overridden by binary-upgrade */
Oid relfilenode = InvalidOid;
RelFileNumber relfilenumber = InvalidRelFileNumber;
TransactionId relfrozenxid;
MultiXactId relminmxid;
@ -1173,12 +1173,12 @@ heap_create_with_catalog(const char *relname,
/*
* Allocate an OID for the relation, unless we were told what to use.
*
* The OID will be the relfilenode as well, so make sure it doesn't
* The OID will be the relfilenumber as well, so make sure it doesn't
* collide with either pg_class OIDs or existing physical files.
*/
if (!OidIsValid(relid))
{
/* Use binary-upgrade override for pg_class.oid and relfilenode */
/* Use binary-upgrade override for pg_class.oid and relfilenumber */
if (IsBinaryUpgrade)
{
/*
@ -1196,13 +1196,13 @@ heap_create_with_catalog(const char *relname,
relid = binary_upgrade_next_toast_pg_class_oid;
binary_upgrade_next_toast_pg_class_oid = InvalidOid;
if (!OidIsValid(binary_upgrade_next_toast_pg_class_relfilenode))
if (!RelFileNumberIsValid(binary_upgrade_next_toast_pg_class_relfilenumber))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("toast relfilenode value not set when in binary upgrade mode")));
errmsg("toast relfilenumber value not set when in binary upgrade mode")));
relfilenode = binary_upgrade_next_toast_pg_class_relfilenode;
binary_upgrade_next_toast_pg_class_relfilenode = InvalidOid;
relfilenumber = binary_upgrade_next_toast_pg_class_relfilenumber;
binary_upgrade_next_toast_pg_class_relfilenumber = InvalidRelFileNumber;
}
}
else
@ -1217,20 +1217,20 @@ heap_create_with_catalog(const char *relname,
if (RELKIND_HAS_STORAGE(relkind))
{
if (!OidIsValid(binary_upgrade_next_heap_pg_class_relfilenode))
if (!RelFileNumberIsValid(binary_upgrade_next_heap_pg_class_relfilenumber))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("relfilenode value not set when in binary upgrade mode")));
errmsg("relfilenumber value not set when in binary upgrade mode")));
relfilenode = binary_upgrade_next_heap_pg_class_relfilenode;
binary_upgrade_next_heap_pg_class_relfilenode = InvalidOid;
relfilenumber = binary_upgrade_next_heap_pg_class_relfilenumber;
binary_upgrade_next_heap_pg_class_relfilenumber = InvalidRelFileNumber;
}
}
}
if (!OidIsValid(relid))
relid = GetNewRelFileNode(reltablespace, pg_class_desc,
relpersistence);
relid = GetNewRelFileNumber(reltablespace, pg_class_desc,
relpersistence);
}
/*
@ -1273,7 +1273,7 @@ heap_create_with_catalog(const char *relname,
relnamespace,
reltablespace,
relid,
relfilenode,
relfilenumber,
accessmtd,
tupdesc,
relkind,

View File

@ -87,7 +87,8 @@
/* Potentially set by pg_upgrade_support functions */
Oid binary_upgrade_next_index_pg_class_oid = InvalidOid;
Oid binary_upgrade_next_index_pg_class_relfilenode = InvalidOid;
RelFileNumber binary_upgrade_next_index_pg_class_relfilenumber =
InvalidRelFileNumber;
/*
* Pointer-free representation of variables used when reindexing system
@ -662,8 +663,8 @@ UpdateIndexRelation(Oid indexoid,
* parent index; otherwise InvalidOid.
* parentConstraintId: if creating a constraint on a partition, the OID
* of the constraint in the parent; otherwise InvalidOid.
* relFileNode: normally, pass InvalidOid to get new storage. May be
* nonzero to attach an existing valid build.
* relFileNumber: normally, pass InvalidRelFileNumber to get new storage.
* May be nonzero to attach an existing valid build.
* indexInfo: same info executor uses to insert into the index
* indexColNames: column names to use for index (List of char *)
* accessMethodObjectId: OID of index AM to use
@ -703,7 +704,7 @@ index_create(Relation heapRelation,
Oid indexRelationId,
Oid parentIndexRelid,
Oid parentConstraintId,
Oid relFileNode,
RelFileNumber relFileNumber,
IndexInfo *indexInfo,
List *indexColNames,
Oid accessMethodObjectId,
@ -735,7 +736,7 @@ index_create(Relation heapRelation,
char relkind;
TransactionId relfrozenxid;
MultiXactId relminmxid;
bool create_storage = !OidIsValid(relFileNode);
bool create_storage = !RelFileNumberIsValid(relFileNumber);
/* constraint flags can only be set when a constraint is requested */
Assert((constr_flags == 0) ||
@ -751,7 +752,7 @@ index_create(Relation heapRelation,
/*
* The index will be in the same namespace as its parent table, and is
* shared across databases if and only if the parent is. Likewise, it
* will use the relfilenode map if and only if the parent does; and it
* will use the relfilenumber map if and only if the parent does; and it
* inherits the parent's relpersistence.
*/
namespaceId = RelationGetNamespace(heapRelation);
@ -902,12 +903,12 @@ index_create(Relation heapRelation,
/*
* Allocate an OID for the index, unless we were told what to use.
*
* The OID will be the relfilenode as well, so make sure it doesn't
* The OID will be the relfilenumber as well, so make sure it doesn't
* collide with either pg_class OIDs or existing physical files.
*/
if (!OidIsValid(indexRelationId))
{
/* Use binary-upgrade override for pg_class.oid and relfilenode */
/* Use binary-upgrade override for pg_class.oid and relfilenumber */
if (IsBinaryUpgrade)
{
if (!OidIsValid(binary_upgrade_next_index_pg_class_oid))
@ -918,14 +919,14 @@ index_create(Relation heapRelation,
indexRelationId = binary_upgrade_next_index_pg_class_oid;
binary_upgrade_next_index_pg_class_oid = InvalidOid;
/* Override the index relfilenode */
/* Override the index relfilenumber */
if ((relkind == RELKIND_INDEX) &&
(!OidIsValid(binary_upgrade_next_index_pg_class_relfilenode)))
(!RelFileNumberIsValid(binary_upgrade_next_index_pg_class_relfilenumber)))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("index relfilenode value not set when in binary upgrade mode")));
relFileNode = binary_upgrade_next_index_pg_class_relfilenode;
binary_upgrade_next_index_pg_class_relfilenode = InvalidOid;
errmsg("index relfilenumber value not set when in binary upgrade mode")));
relFileNumber = binary_upgrade_next_index_pg_class_relfilenumber;
binary_upgrade_next_index_pg_class_relfilenumber = InvalidRelFileNumber;
/*
* Note that we want create_storage = true for binary upgrade. The
@ -937,7 +938,7 @@ index_create(Relation heapRelation,
else
{
indexRelationId =
GetNewRelFileNode(tableSpaceId, pg_class, relpersistence);
GetNewRelFileNumber(tableSpaceId, pg_class, relpersistence);
}
}
@ -950,7 +951,7 @@ index_create(Relation heapRelation,
namespaceId,
tableSpaceId,
indexRelationId,
relFileNode,
relFileNumber,
accessMethodObjectId,
indexTupDesc,
relkind,
@ -1408,7 +1409,7 @@ index_concurrently_create_copy(Relation heapRelation, Oid oldIndexId,
InvalidOid, /* indexRelationId */
InvalidOid, /* parentIndexRelid */
InvalidOid, /* parentConstraintId */
InvalidOid, /* relFileNode */
InvalidRelFileNumber, /* relFileNumber */
newInfo,
indexColNames,
indexRelation->rd_rel->relam,
@ -3024,7 +3025,7 @@ index_build(Relation heapRelation,
* it -- but we must first check whether one already exists. If, for
* example, an unlogged relation is truncated in the transaction that
* created it, or truncated twice in a subsequent transaction, the
* relfilenode won't change, and nothing needs to be done here.
* relfilenumber won't change, and nothing needs to be done here.
*/
if (indexRelation->rd_rel->relpersistence == RELPERSISTENCE_UNLOGGED &&
!smgrexists(RelationGetSmgr(indexRelation), INIT_FORKNUM))
@ -3681,7 +3682,7 @@ reindex_index(Oid indexId, bool skip_constraint_checks, char persistence,
* Schedule unlinking of the old index storage at transaction commit.
*/
RelationDropStorage(iRel);
RelationAssumeNewRelfilenode(iRel);
RelationAssumeNewRelfilelocator(iRel);
/* Make sure the reltablespace change is visible */
CommandCounterIncrement();
@ -3711,7 +3712,7 @@ reindex_index(Oid indexId, bool skip_constraint_checks, char persistence,
SetReindexProcessing(heapId, indexId);
/* Create a new physical relation for the index */
RelationSetNewRelfilenode(iRel, persistence);
RelationSetNewRelfilenumber(iRel, persistence);
/* Initialize the index and rebuild */
/* Note: we do not need to re-establish pkey setting */

View File

@ -38,7 +38,7 @@
int wal_skip_threshold = 2048; /* in kilobytes */
/*
* We keep a list of all relations (represented as RelFileNode values)
* We keep a list of all relations (represented as RelFileLocator values)
* that have been created or deleted in the current transaction. When
* a relation is created, we create the physical file immediately, but
* remember it so that we can delete the file again if the current
@ -59,7 +59,7 @@ int wal_skip_threshold = 2048; /* in kilobytes */
typedef struct PendingRelDelete
{
RelFileNode relnode; /* relation that may need to be deleted */
RelFileLocator rlocator; /* relation that may need to be deleted */
BackendId backend; /* InvalidBackendId if not a temp rel */
bool atCommit; /* T=delete at commit; F=delete at abort */
int nestLevel; /* xact nesting level of request */
@ -68,7 +68,7 @@ typedef struct PendingRelDelete
typedef struct PendingRelSync
{
RelFileNode rnode;
RelFileLocator rlocator;
bool is_truncated; /* Has the file experienced truncation? */
} PendingRelSync;
@ -81,7 +81,7 @@ static HTAB *pendingSyncHash = NULL;
* Queue an at-commit fsync.
*/
static void
AddPendingSync(const RelFileNode *rnode)
AddPendingSync(const RelFileLocator *rlocator)
{
PendingRelSync *pending;
bool found;
@ -91,14 +91,14 @@ AddPendingSync(const RelFileNode *rnode)
{
HASHCTL ctl;
ctl.keysize = sizeof(RelFileNode);
ctl.keysize = sizeof(RelFileLocator);
ctl.entrysize = sizeof(PendingRelSync);
ctl.hcxt = TopTransactionContext;
pendingSyncHash = hash_create("pending sync hash", 16, &ctl,
HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
}
pending = hash_search(pendingSyncHash, rnode, HASH_ENTER, &found);
pending = hash_search(pendingSyncHash, rlocator, HASH_ENTER, &found);
Assert(!found);
pending->is_truncated = false;
}
@ -117,7 +117,7 @@ AddPendingSync(const RelFileNode *rnode)
* pass register_delete = false.
*/
SMgrRelation
RelationCreateStorage(RelFileNode rnode, char relpersistence,
RelationCreateStorage(RelFileLocator rlocator, char relpersistence,
bool register_delete)
{
SMgrRelation srel;
@ -145,11 +145,11 @@ RelationCreateStorage(RelFileNode rnode, char relpersistence,
return NULL; /* placate compiler */
}
srel = smgropen(rnode, backend);
srel = smgropen(rlocator, backend);
smgrcreate(srel, MAIN_FORKNUM, false);
if (needs_wal)
log_smgrcreate(&srel->smgr_rnode.node, MAIN_FORKNUM);
log_smgrcreate(&srel->smgr_rlocator.locator, MAIN_FORKNUM);
/*
* Add the relation to the list of stuff to delete at abort, if we are
@ -161,7 +161,7 @@ RelationCreateStorage(RelFileNode rnode, char relpersistence,
pending = (PendingRelDelete *)
MemoryContextAlloc(TopMemoryContext, sizeof(PendingRelDelete));
pending->relnode = rnode;
pending->rlocator = rlocator;
pending->backend = backend;
pending->atCommit = false; /* delete if abort */
pending->nestLevel = GetCurrentTransactionNestLevel();
@ -172,7 +172,7 @@ RelationCreateStorage(RelFileNode rnode, char relpersistence,
if (relpersistence == RELPERSISTENCE_PERMANENT && !XLogIsNeeded())
{
Assert(backend == InvalidBackendId);
AddPendingSync(&rnode);
AddPendingSync(&rlocator);
}
return srel;
@ -182,14 +182,14 @@ RelationCreateStorage(RelFileNode rnode, char relpersistence,
* Perform XLogInsert of an XLOG_SMGR_CREATE record to WAL.
*/
void
log_smgrcreate(const RelFileNode *rnode, ForkNumber forkNum)
log_smgrcreate(const RelFileLocator *rlocator, ForkNumber forkNum)
{
xl_smgr_create xlrec;
/*
* Make an XLOG entry reporting the file creation.
*/
xlrec.rnode = *rnode;
xlrec.rlocator = *rlocator;
xlrec.forkNum = forkNum;
XLogBeginInsert();
@ -209,7 +209,7 @@ RelationDropStorage(Relation rel)
/* Add the relation to the list of stuff to delete at commit */
pending = (PendingRelDelete *)
MemoryContextAlloc(TopMemoryContext, sizeof(PendingRelDelete));
pending->relnode = rel->rd_node;
pending->rlocator = rel->rd_locator;
pending->backend = rel->rd_backend;
pending->atCommit = true; /* delete if commit */
pending->nestLevel = GetCurrentTransactionNestLevel();
@ -247,7 +247,7 @@ RelationDropStorage(Relation rel)
* No-op if the relation is not among those scheduled for deletion.
*/
void
RelationPreserveStorage(RelFileNode rnode, bool atCommit)
RelationPreserveStorage(RelFileLocator rlocator, bool atCommit)
{
PendingRelDelete *pending;
PendingRelDelete *prev;
@ -257,7 +257,7 @@ RelationPreserveStorage(RelFileNode rnode, bool atCommit)
for (pending = pendingDeletes; pending != NULL; pending = next)
{
next = pending->next;
if (RelFileNodeEquals(rnode, pending->relnode)
if (RelFileLocatorEquals(rlocator, pending->rlocator)
&& pending->atCommit == atCommit)
{
/* unlink and delete list entry */
@ -369,7 +369,7 @@ RelationTruncate(Relation rel, BlockNumber nblocks)
xl_smgr_truncate xlrec;
xlrec.blkno = nblocks;
xlrec.rnode = rel->rd_node;
xlrec.rlocator = rel->rd_locator;
xlrec.flags = SMGR_TRUNCATE_ALL;
XLogBeginInsert();
@ -428,7 +428,7 @@ RelationPreTruncate(Relation rel)
return;
pending = hash_search(pendingSyncHash,
&(RelationGetSmgr(rel)->smgr_rnode.node),
&(RelationGetSmgr(rel)->smgr_rlocator.locator),
HASH_FIND, NULL);
if (pending)
pending->is_truncated = true;
@ -472,7 +472,7 @@ RelationCopyStorage(SMgrRelation src, SMgrRelation dst,
* We need to log the copied data in WAL iff WAL archiving/streaming is
* enabled AND it's a permanent relation. This gives the same answer as
* "RelationNeedsWAL(rel) || copying_initfork", because we know the
* current operation created a new relfilenode.
* current operation created new relation storage.
*/
use_wal = XLogIsNeeded() &&
(relpersistence == RELPERSISTENCE_PERMANENT || copying_initfork);
@ -496,8 +496,8 @@ RelationCopyStorage(SMgrRelation src, SMgrRelation dst,
* (errcontext callbacks shouldn't be risking any such thing, but
* people have been known to forget that rule.)
*/
char *relpath = relpathbackend(src->smgr_rnode.node,
src->smgr_rnode.backend,
char *relpath = relpathbackend(src->smgr_rlocator.locator,
src->smgr_rlocator.backend,
forkNum);
ereport(ERROR,
@ -512,7 +512,7 @@ RelationCopyStorage(SMgrRelation src, SMgrRelation dst,
* space.
*/
if (use_wal)
log_newpage(&dst->smgr_rnode.node, forkNum, blkno, page, false);
log_newpage(&dst->smgr_rlocator.locator, forkNum, blkno, page, false);
PageSetChecksumInplace(page, blkno);
@ -538,19 +538,19 @@ RelationCopyStorage(SMgrRelation src, SMgrRelation dst,
}
/*
* RelFileNodeSkippingWAL
* Check if a BM_PERMANENT relfilenode is using WAL.
* RelFileLocatorSkippingWAL
* Check if a BM_PERMANENT relfilelocator is using WAL.
*
* Changes of certain relfilenodes must not write WAL; see "Skipping WAL for
* New RelFileNode" in src/backend/access/transam/README. Though it is known
* from Relation efficiently, this function is intended for the code paths not
* having access to Relation.
* Changes to certain relations must not write WAL; see "Skipping WAL for
* New RelFileLocator" in src/backend/access/transam/README. Though it is
* known from Relation efficiently, this function is intended for the code
* paths not having access to Relation.
*/
bool
RelFileNodeSkippingWAL(RelFileNode rnode)
RelFileLocatorSkippingWAL(RelFileLocator rlocator)
{
if (!pendingSyncHash ||
hash_search(pendingSyncHash, &rnode, HASH_FIND, NULL) == NULL)
hash_search(pendingSyncHash, &rlocator, HASH_FIND, NULL) == NULL)
return false;
return true;
@ -566,7 +566,7 @@ EstimatePendingSyncsSpace(void)
long entries;
entries = pendingSyncHash ? hash_get_num_entries(pendingSyncHash) : 0;
return mul_size(1 + entries, sizeof(RelFileNode));
return mul_size(1 + entries, sizeof(RelFileLocator));
}
/*
@ -581,57 +581,58 @@ SerializePendingSyncs(Size maxSize, char *startAddress)
HASH_SEQ_STATUS scan;
PendingRelSync *sync;
PendingRelDelete *delete;
RelFileNode *src;
RelFileNode *dest = (RelFileNode *) startAddress;
RelFileLocator *src;
RelFileLocator *dest = (RelFileLocator *) startAddress;
if (!pendingSyncHash)
goto terminate;
/* Create temporary hash to collect active relfilenodes */
ctl.keysize = sizeof(RelFileNode);
ctl.entrysize = sizeof(RelFileNode);
/* Create temporary hash to collect active relfilelocators */
ctl.keysize = sizeof(RelFileLocator);
ctl.entrysize = sizeof(RelFileLocator);
ctl.hcxt = CurrentMemoryContext;
tmphash = hash_create("tmp relfilenodes",
tmphash = hash_create("tmp relfilelocators",
hash_get_num_entries(pendingSyncHash), &ctl,
HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
/* collect all rnodes from pending syncs */
/* collect all rlocator from pending syncs */
hash_seq_init(&scan, pendingSyncHash);
while ((sync = (PendingRelSync *) hash_seq_search(&scan)))
(void) hash_search(tmphash, &sync->rnode, HASH_ENTER, NULL);
(void) hash_search(tmphash, &sync->rlocator, HASH_ENTER, NULL);
/* remove deleted rnodes */
for (delete = pendingDeletes; delete != NULL; delete = delete->next)
if (delete->atCommit)
(void) hash_search(tmphash, (void *) &delete->relnode,
(void) hash_search(tmphash, (void *) &delete->rlocator,
HASH_REMOVE, NULL);
hash_seq_init(&scan, tmphash);
while ((src = (RelFileNode *) hash_seq_search(&scan)))
while ((src = (RelFileLocator *) hash_seq_search(&scan)))
*dest++ = *src;
hash_destroy(tmphash);
terminate:
MemSet(dest, 0, sizeof(RelFileNode));
MemSet(dest, 0, sizeof(RelFileLocator));
}
/*
* RestorePendingSyncs
* Restore syncs within a parallel worker.
*
* RelationNeedsWAL() and RelFileNodeSkippingWAL() must offer the correct
* RelationNeedsWAL() and RelFileLocatorSkippingWAL() must offer the correct
* answer to parallel workers. Only smgrDoPendingSyncs() reads the
* is_truncated field, at end of transaction. Hence, don't restore it.
*/
void
RestorePendingSyncs(char *startAddress)
{
RelFileNode *rnode;
RelFileLocator *rlocator;
Assert(pendingSyncHash == NULL);
for (rnode = (RelFileNode *) startAddress; rnode->relNode != 0; rnode++)
AddPendingSync(rnode);
for (rlocator = (RelFileLocator *) startAddress; rlocator->relNumber != 0;
rlocator++)
AddPendingSync(rlocator);
}
/*
@ -677,7 +678,7 @@ smgrDoPendingDeletes(bool isCommit)
{
SMgrRelation srel;
srel = smgropen(pending->relnode, pending->backend);
srel = smgropen(pending->rlocator, pending->backend);
/* allocate the initial array, or extend it, if needed */
if (maxrels == 0)
@ -747,7 +748,7 @@ smgrDoPendingSyncs(bool isCommit, bool isParallelWorker)
/* Skip syncing nodes that smgrDoPendingDeletes() will delete. */
for (pending = pendingDeletes; pending != NULL; pending = pending->next)
if (pending->atCommit)
(void) hash_search(pendingSyncHash, (void *) &pending->relnode,
(void) hash_search(pendingSyncHash, (void *) &pending->rlocator,
HASH_REMOVE, NULL);
hash_seq_init(&scan, pendingSyncHash);
@ -758,7 +759,7 @@ smgrDoPendingSyncs(bool isCommit, bool isParallelWorker)
BlockNumber total_blocks = 0;
SMgrRelation srel;
srel = smgropen(pendingsync->rnode, InvalidBackendId);
srel = smgropen(pendingsync->rlocator, InvalidBackendId);
/*
* We emit newpage WAL records for smaller relations.
@ -832,7 +833,7 @@ smgrDoPendingSyncs(bool isCommit, bool isParallelWorker)
* page including any unused space. ReadBufferExtended()
* counts some pgstat events; unfortunately, we discard them.
*/
rel = CreateFakeRelcacheEntry(srel->smgr_rnode.node);
rel = CreateFakeRelcacheEntry(srel->smgr_rlocator.locator);
log_newpage_range(rel, fork, 0, n, false);
FreeFakeRelcacheEntry(rel);
}
@ -852,7 +853,7 @@ smgrDoPendingSyncs(bool isCommit, bool isParallelWorker)
* smgrGetPendingDeletes() -- Get a list of non-temp relations to be deleted.
*
* The return value is the number of relations scheduled for termination.
* *ptr is set to point to a freshly-palloc'd array of RelFileNodes.
* *ptr is set to point to a freshly-palloc'd array of RelFileLocators.
* If there are no relations to be deleted, *ptr is set to NULL.
*
* Only non-temporary relations are included in the returned list. This is OK
@ -866,11 +867,11 @@ smgrDoPendingSyncs(bool isCommit, bool isParallelWorker)
* by upper-level transactions.
*/
int
smgrGetPendingDeletes(bool forCommit, RelFileNode **ptr)
smgrGetPendingDeletes(bool forCommit, RelFileLocator **ptr)
{
int nestLevel = GetCurrentTransactionNestLevel();
int nrels;
RelFileNode *rptr;
RelFileLocator *rptr;
PendingRelDelete *pending;
nrels = 0;
@ -885,14 +886,14 @@ smgrGetPendingDeletes(bool forCommit, RelFileNode **ptr)
*ptr = NULL;
return 0;
}
rptr = (RelFileNode *) palloc(nrels * sizeof(RelFileNode));
rptr = (RelFileLocator *) palloc(nrels * sizeof(RelFileLocator));
*ptr = rptr;
for (pending = pendingDeletes; pending != NULL; pending = pending->next)
{
if (pending->nestLevel >= nestLevel && pending->atCommit == forCommit
&& pending->backend == InvalidBackendId)
{
*rptr = pending->relnode;
*rptr = pending->rlocator;
rptr++;
}
}
@ -967,7 +968,7 @@ smgr_redo(XLogReaderState *record)
xl_smgr_create *xlrec = (xl_smgr_create *) XLogRecGetData(record);
SMgrRelation reln;
reln = smgropen(xlrec->rnode, InvalidBackendId);
reln = smgropen(xlrec->rlocator, InvalidBackendId);
smgrcreate(reln, xlrec->forkNum, true);
}
else if (info == XLOG_SMGR_TRUNCATE)
@ -980,7 +981,7 @@ smgr_redo(XLogReaderState *record)
int nforks = 0;
bool need_fsm_vacuum = false;
reln = smgropen(xlrec->rnode, InvalidBackendId);
reln = smgropen(xlrec->rlocator, InvalidBackendId);
/*
* Forcibly create relation if it doesn't exist (which suggests that
@ -1015,11 +1016,11 @@ smgr_redo(XLogReaderState *record)
nforks++;
/* Also tell xlogutils.c about it */
XLogTruncateRelation(xlrec->rnode, MAIN_FORKNUM, xlrec->blkno);
XLogTruncateRelation(xlrec->rlocator, MAIN_FORKNUM, xlrec->blkno);
}
/* Prepare for truncation of FSM and VM too */
rel = CreateFakeRelcacheEntry(xlrec->rnode);
rel = CreateFakeRelcacheEntry(xlrec->rlocator);
if ((xlrec->flags & SMGR_TRUNCATE_FSM) != 0 &&
smgrexists(reln, FSM_FORKNUM))

View File

@ -293,7 +293,7 @@ cluster_multiple_rels(List *rtcs, ClusterParams *params)
* cluster_rel
*
* This clusters the table by creating a new, clustered table and
* swapping the relfilenodes of the new table and the old table, so
* swapping the relfilenumbers of the new table and the old table, so
* the OID of the original table is preserved. Thus we do not lose
* GRANT, inheritance nor references to this table (this was a bug
* in releases through 7.3).
@ -1025,8 +1025,8 @@ copy_table_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex, bool verbose,
/*
* Swap the physical files of two given relations.
*
* We swap the physical identity (reltablespace, relfilenode) while keeping the
* same logical identities of the two relations. relpersistence is also
* We swap the physical identity (reltablespace, relfilenumber) while keeping
* the same logical identities of the two relations. relpersistence is also
* swapped, which is critical since it determines where buffers live for each
* relation.
*
@ -1061,9 +1061,9 @@ swap_relation_files(Oid r1, Oid r2, bool target_is_pg_class,
reltup2;
Form_pg_class relform1,
relform2;
Oid relfilenode1,
relfilenode2;
Oid swaptemp;
RelFileNumber relfilenumber1,
relfilenumber2;
RelFileNumber swaptemp;
char swptmpchr;
/* We need writable copies of both pg_class tuples. */
@ -1079,13 +1079,14 @@ swap_relation_files(Oid r1, Oid r2, bool target_is_pg_class,
elog(ERROR, "cache lookup failed for relation %u", r2);
relform2 = (Form_pg_class) GETSTRUCT(reltup2);
relfilenode1 = relform1->relfilenode;
relfilenode2 = relform2->relfilenode;
relfilenumber1 = relform1->relfilenode;
relfilenumber2 = relform2->relfilenode;
if (OidIsValid(relfilenode1) && OidIsValid(relfilenode2))
if (RelFileNumberIsValid(relfilenumber1) &&
RelFileNumberIsValid(relfilenumber2))
{
/*
* Normal non-mapped relations: swap relfilenodes, reltablespaces,
* Normal non-mapped relations: swap relfilenumbers, reltablespaces,
* relpersistence
*/
Assert(!target_is_pg_class);
@ -1120,7 +1121,8 @@ swap_relation_files(Oid r1, Oid r2, bool target_is_pg_class,
* Mapped-relation case. Here we have to swap the relation mappings
* instead of modifying the pg_class columns. Both must be mapped.
*/
if (OidIsValid(relfilenode1) || OidIsValid(relfilenode2))
if (RelFileNumberIsValid(relfilenumber1) ||
RelFileNumberIsValid(relfilenumber2))
elog(ERROR, "cannot swap mapped relation \"%s\" with non-mapped relation",
NameStr(relform1->relname));
@ -1148,12 +1150,12 @@ swap_relation_files(Oid r1, Oid r2, bool target_is_pg_class,
/*
* Fetch the mappings --- shouldn't fail, but be paranoid
*/
relfilenode1 = RelationMapOidToFilenode(r1, relform1->relisshared);
if (!OidIsValid(relfilenode1))
relfilenumber1 = RelationMapOidToFilenumber(r1, relform1->relisshared);
if (!RelFileNumberIsValid(relfilenumber1))
elog(ERROR, "could not find relation mapping for relation \"%s\", OID %u",
NameStr(relform1->relname), r1);
relfilenode2 = RelationMapOidToFilenode(r2, relform2->relisshared);
if (!OidIsValid(relfilenode2))
relfilenumber2 = RelationMapOidToFilenumber(r2, relform2->relisshared);
if (!RelFileNumberIsValid(relfilenumber2))
elog(ERROR, "could not find relation mapping for relation \"%s\", OID %u",
NameStr(relform2->relname), r2);
@ -1161,15 +1163,15 @@ swap_relation_files(Oid r1, Oid r2, bool target_is_pg_class,
* Send replacement mappings to relmapper. Note these won't actually
* take effect until CommandCounterIncrement.
*/
RelationMapUpdateMap(r1, relfilenode2, relform1->relisshared, false);
RelationMapUpdateMap(r2, relfilenode1, relform2->relisshared, false);
RelationMapUpdateMap(r1, relfilenumber2, relform1->relisshared, false);
RelationMapUpdateMap(r2, relfilenumber1, relform2->relisshared, false);
/* Pass OIDs of mapped r2 tables back to caller */
*mapped_tables++ = r2;
}
/*
* Recognize that rel1's relfilenode (swapped from rel2) is new in this
* Recognize that rel1's relfilenumber (swapped from rel2) is new in this
* subtransaction. The rel2 storage (swapped from rel1) may or may not be
* new.
*/
@ -1180,9 +1182,9 @@ swap_relation_files(Oid r1, Oid r2, bool target_is_pg_class,
rel1 = relation_open(r1, NoLock);
rel2 = relation_open(r2, NoLock);
rel2->rd_createSubid = rel1->rd_createSubid;
rel2->rd_newRelfilenodeSubid = rel1->rd_newRelfilenodeSubid;
rel2->rd_firstRelfilenodeSubid = rel1->rd_firstRelfilenodeSubid;
RelationAssumeNewRelfilenode(rel1);
rel2->rd_newRelfilelocatorSubid = rel1->rd_newRelfilelocatorSubid;
rel2->rd_firstRelfilelocatorSubid = rel1->rd_firstRelfilelocatorSubid;
RelationAssumeNewRelfilelocator(rel1);
relation_close(rel1, NoLock);
relation_close(rel2, NoLock);
}
@ -1523,7 +1525,7 @@ finish_heap_swap(Oid OIDOldHeap, Oid OIDNewHeap,
table_close(relRelation, RowExclusiveLock);
}
/* Destroy new heap with old filenode */
/* Destroy new heap with old filenumber */
object.classId = RelationRelationId;
object.objectId = OIDNewHeap;
object.objectSubId = 0;

View File

@ -593,12 +593,12 @@ CopyFrom(CopyFromState cstate)
*/
if (RELKIND_HAS_STORAGE(cstate->rel->rd_rel->relkind) &&
(cstate->rel->rd_createSubid != InvalidSubTransactionId ||
cstate->rel->rd_firstRelfilenodeSubid != InvalidSubTransactionId))
cstate->rel->rd_firstRelfilelocatorSubid != InvalidSubTransactionId))
ti_options |= TABLE_INSERT_SKIP_FSM;
/*
* Optimize if new relfilenode was created in this subxact or one of its
* committed children and we won't see those rows later as part of an
* Optimize if new relation storage was created in this subxact or one of
* its committed children and we won't see those rows later as part of an
* earlier scan or command. The subxact test ensures that if this subxact
* aborts then the frozen rows won't be visible after xact cleanup. Note
* that the stronger test of exactly which subtransaction created it is
@ -640,7 +640,7 @@ CopyFrom(CopyFromState cstate)
errmsg("cannot perform COPY FREEZE because of prior transaction activity")));
if (cstate->rel->rd_createSubid != GetCurrentSubTransactionId() &&
cstate->rel->rd_newRelfilenodeSubid != GetCurrentSubTransactionId())
cstate->rel->rd_newRelfilelocatorSubid != GetCurrentSubTransactionId())
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("cannot perform COPY FREEZE because the table was not created or truncated in the current subtransaction")));

View File

@ -101,7 +101,7 @@ typedef struct
*/
typedef struct CreateDBRelInfo
{
RelFileNode rnode; /* physical relation identifier */
RelFileLocator rlocator; /* physical relation identifier */
Oid reloid; /* relation oid */
bool permanent; /* relation is permanent or unlogged */
} CreateDBRelInfo;
@ -127,7 +127,7 @@ static void CreateDatabaseUsingWalLog(Oid src_dboid, Oid dboid, Oid src_tsid,
static List *ScanSourceDatabasePgClass(Oid srctbid, Oid srcdbid, char *srcpath);
static List *ScanSourceDatabasePgClassPage(Page page, Buffer buf, Oid tbid,
Oid dbid, char *srcpath,
List *rnodelist, Snapshot snapshot);
List *rlocatorlist, Snapshot snapshot);
static CreateDBRelInfo *ScanSourceDatabasePgClassTuple(HeapTupleData *tuple,
Oid tbid, Oid dbid,
char *srcpath);
@ -147,12 +147,12 @@ CreateDatabaseUsingWalLog(Oid src_dboid, Oid dst_dboid,
{
char *srcpath;
char *dstpath;
List *rnodelist = NULL;
List *rlocatorlist = NULL;
ListCell *cell;
LockRelId srcrelid;
LockRelId dstrelid;
RelFileNode srcrnode;
RelFileNode dstrnode;
RelFileLocator srcrlocator;
RelFileLocator dstrlocator;
CreateDBRelInfo *relinfo;
/* Get source and destination database paths. */
@ -165,9 +165,9 @@ CreateDatabaseUsingWalLog(Oid src_dboid, Oid dst_dboid,
/* Copy relmap file from source database to the destination database. */
RelationMapCopy(dst_dboid, dst_tsid, srcpath, dstpath);
/* Get list of relfilenodes to copy from the source database. */
rnodelist = ScanSourceDatabasePgClass(src_tsid, src_dboid, srcpath);
Assert(rnodelist != NIL);
/* Get list of relfilelocators to copy from the source database. */
rlocatorlist = ScanSourceDatabasePgClass(src_tsid, src_dboid, srcpath);
Assert(rlocatorlist != NIL);
/*
* Database IDs will be the same for all relations so set them before
@ -176,11 +176,11 @@ CreateDatabaseUsingWalLog(Oid src_dboid, Oid dst_dboid,
srcrelid.dbId = src_dboid;
dstrelid.dbId = dst_dboid;
/* Loop over our list of relfilenodes and copy each one. */
foreach(cell, rnodelist)
/* Loop over our list of relfilelocators and copy each one. */
foreach(cell, rlocatorlist)
{
relinfo = lfirst(cell);
srcrnode = relinfo->rnode;
srcrlocator = relinfo->rlocator;
/*
* If the relation is from the source db's default tablespace then we
@ -188,13 +188,13 @@ CreateDatabaseUsingWalLog(Oid src_dboid, Oid dst_dboid,
* Otherwise, we need to create in the same tablespace as it is in the
* source database.
*/
if (srcrnode.spcNode == src_tsid)
dstrnode.spcNode = dst_tsid;
if (srcrlocator.spcOid == src_tsid)
dstrlocator.spcOid = dst_tsid;
else
dstrnode.spcNode = srcrnode.spcNode;
dstrlocator.spcOid = srcrlocator.spcOid;
dstrnode.dbNode = dst_dboid;
dstrnode.relNode = srcrnode.relNode;
dstrlocator.dbOid = dst_dboid;
dstrlocator.relNumber = srcrlocator.relNumber;
/*
* Acquire locks on source and target relations before copying.
@ -210,7 +210,7 @@ CreateDatabaseUsingWalLog(Oid src_dboid, Oid dst_dboid,
LockRelationId(&dstrelid, AccessShareLock);
/* Copy relation storage from source to the destination. */
CreateAndCopyRelationData(srcrnode, dstrnode, relinfo->permanent);
CreateAndCopyRelationData(srcrlocator, dstrlocator, relinfo->permanent);
/* Release the relation locks. */
UnlockRelationId(&srcrelid, AccessShareLock);
@ -219,7 +219,7 @@ CreateDatabaseUsingWalLog(Oid src_dboid, Oid dst_dboid,
pfree(srcpath);
pfree(dstpath);
list_free_deep(rnodelist);
list_free_deep(rlocatorlist);
}
/*
@ -246,31 +246,31 @@ CreateDatabaseUsingWalLog(Oid src_dboid, Oid dst_dboid,
static List *
ScanSourceDatabasePgClass(Oid tbid, Oid dbid, char *srcpath)
{
RelFileNode rnode;
RelFileLocator rlocator;
BlockNumber nblocks;
BlockNumber blkno;
Buffer buf;
Oid relfilenode;
Oid relfilenumber;
Page page;
List *rnodelist = NIL;
List *rlocatorlist = NIL;
LockRelId relid;
Relation rel;
Snapshot snapshot;
BufferAccessStrategy bstrategy;
/* Get pg_class relfilenode. */
relfilenode = RelationMapOidToFilenodeForDatabase(srcpath,
RelationRelationId);
/* Get pg_class relfilenumber. */
relfilenumber = RelationMapOidToFilenumberForDatabase(srcpath,
RelationRelationId);
/* Don't read data into shared_buffers without holding a relation lock. */
relid.dbId = dbid;
relid.relId = RelationRelationId;
LockRelationId(&relid, AccessShareLock);
/* Prepare a RelFileNode for the pg_class relation. */
rnode.spcNode = tbid;
rnode.dbNode = dbid;
rnode.relNode = relfilenode;
/* Prepare a RelFileLocator for the pg_class relation. */
rlocator.spcOid = tbid;
rlocator.dbOid = dbid;
rlocator.relNumber = relfilenumber;
/*
* We can't use a real relcache entry for a relation in some other
@ -279,7 +279,7 @@ ScanSourceDatabasePgClass(Oid tbid, Oid dbid, char *srcpath)
* used the smgr layer directly, we would have to worry about
* invalidations.
*/
rel = CreateFakeRelcacheEntry(rnode);
rel = CreateFakeRelcacheEntry(rlocator);
nblocks = smgrnblocks(RelationGetSmgr(rel), MAIN_FORKNUM);
FreeFakeRelcacheEntry(rel);
@ -299,7 +299,7 @@ ScanSourceDatabasePgClass(Oid tbid, Oid dbid, char *srcpath)
{
CHECK_FOR_INTERRUPTS();
buf = ReadBufferWithoutRelcache(rnode, MAIN_FORKNUM, blkno,
buf = ReadBufferWithoutRelcache(rlocator, MAIN_FORKNUM, blkno,
RBM_NORMAL, bstrategy, false);
LockBuffer(buf, BUFFER_LOCK_SHARE);
@ -310,10 +310,10 @@ ScanSourceDatabasePgClass(Oid tbid, Oid dbid, char *srcpath)
continue;
}
/* Append relevant pg_class tuples for current page to rnodelist. */
rnodelist = ScanSourceDatabasePgClassPage(page, buf, tbid, dbid,
srcpath, rnodelist,
snapshot);
/* Append relevant pg_class tuples for current page to rlocatorlist. */
rlocatorlist = ScanSourceDatabasePgClassPage(page, buf, tbid, dbid,
srcpath, rlocatorlist,
snapshot);
UnlockReleaseBuffer(buf);
}
@ -321,16 +321,16 @@ ScanSourceDatabasePgClass(Oid tbid, Oid dbid, char *srcpath)
/* Release relation lock. */
UnlockRelationId(&relid, AccessShareLock);
return rnodelist;
return rlocatorlist;
}
/*
* Scan one page of the source database's pg_class relation and add relevant
* entries to rnodelist. The return value is the updated list.
* entries to rlocatorlist. The return value is the updated list.
*/
static List *
ScanSourceDatabasePgClassPage(Page page, Buffer buf, Oid tbid, Oid dbid,
char *srcpath, List *rnodelist,
char *srcpath, List *rlocatorlist,
Snapshot snapshot)
{
BlockNumber blkno = BufferGetBlockNumber(buf);
@ -376,11 +376,11 @@ ScanSourceDatabasePgClassPage(Page page, Buffer buf, Oid tbid, Oid dbid,
relinfo = ScanSourceDatabasePgClassTuple(&tuple, tbid, dbid,
srcpath);
if (relinfo != NULL)
rnodelist = lappend(rnodelist, relinfo);
rlocatorlist = lappend(rlocatorlist, relinfo);
}
}
return rnodelist;
return rlocatorlist;
}
/*
@ -397,7 +397,7 @@ ScanSourceDatabasePgClassTuple(HeapTupleData *tuple, Oid tbid, Oid dbid,
{
CreateDBRelInfo *relinfo;
Form_pg_class classForm;
Oid relfilenode = InvalidOid;
Oid relfilenumber = InvalidRelFileNumber;
classForm = (Form_pg_class) GETSTRUCT(tuple);
@ -418,29 +418,29 @@ ScanSourceDatabasePgClassTuple(HeapTupleData *tuple, Oid tbid, Oid dbid,
return NULL;
/*
* If relfilenode is valid then directly use it. Otherwise, consult the
* If relfilenumber is valid then directly use it. Otherwise, consult the
* relmap.
*/
if (OidIsValid(classForm->relfilenode))
relfilenode = classForm->relfilenode;
if (RelFileNumberIsValid(classForm->relfilenode))
relfilenumber = classForm->relfilenode;
else
relfilenode = RelationMapOidToFilenodeForDatabase(srcpath,
classForm->oid);
relfilenumber = RelationMapOidToFilenumberForDatabase(srcpath,
classForm->oid);
/* We must have a valid relfilenode oid. */
if (!OidIsValid(relfilenode))
elog(ERROR, "relation with OID %u does not have a valid relfilenode",
/* We must have a valid relfilenumber. */
if (!RelFileNumberIsValid(relfilenumber))
elog(ERROR, "relation with OID %u does not have a valid relfilenumber",
classForm->oid);
/* Prepare a rel info element and add it to the list. */
relinfo = (CreateDBRelInfo *) palloc(sizeof(CreateDBRelInfo));
if (OidIsValid(classForm->reltablespace))
relinfo->rnode.spcNode = classForm->reltablespace;
relinfo->rlocator.spcOid = classForm->reltablespace;
else
relinfo->rnode.spcNode = tbid;
relinfo->rlocator.spcOid = tbid;
relinfo->rnode.dbNode = dbid;
relinfo->rnode.relNode = relfilenode;
relinfo->rlocator.dbOid = dbid;
relinfo->rlocator.relNumber = relfilenumber;
relinfo->reloid = classForm->oid;
/* Temporary relations were rejected above. */
@ -2867,8 +2867,8 @@ remove_dbtablespaces(Oid db_id)
* try to remove that already-existing subdirectory during the cleanup in
* remove_dbtablespaces. Nuking existing files seems like a bad idea, so
* instead we make this extra check before settling on the OID of the new
* database. This exactly parallels what GetNewRelFileNode() does for table
* relfilenode values.
* database. This exactly parallels what GetNewRelFileNumber() does for table
* relfilenumber values.
*/
static bool
check_db_file_conflict(Oid db_id)

View File

@ -1109,10 +1109,10 @@ DefineIndex(Oid relationId,
}
/*
* A valid stmt->oldNode implies that we already have a built form of the
* index. The caller should also decline any index build.
* A valid stmt->oldNumber implies that we already have a built form of
* the index. The caller should also decline any index build.
*/
Assert(!OidIsValid(stmt->oldNode) || (skip_build && !concurrent));
Assert(!RelFileNumberIsValid(stmt->oldNumber) || (skip_build && !concurrent));
/*
* Make the catalog entries for the index, including constraints. This
@ -1154,7 +1154,7 @@ DefineIndex(Oid relationId,
indexRelationId =
index_create(rel, indexRelationName, indexRelationId, parentIndexId,
parentConstraintId,
stmt->oldNode, indexInfo, indexColNames,
stmt->oldNumber, indexInfo, indexColNames,
accessMethodId, tablespaceId,
collationObjectId, classObjectId,
coloptions, reloptions,
@ -1361,15 +1361,15 @@ DefineIndex(Oid relationId,
* We can't use the same index name for the child index,
* so clear idxname to let the recursive invocation choose
* a new name. Likewise, the existing target relation
* field is wrong, and if indexOid or oldNode are set,
* field is wrong, and if indexOid or oldNumber are set,
* they mustn't be applied to the child either.
*/
childStmt->idxname = NULL;
childStmt->relation = NULL;
childStmt->indexOid = InvalidOid;
childStmt->oldNode = InvalidOid;
childStmt->oldNumber = InvalidRelFileNumber;
childStmt->oldCreateSubid = InvalidSubTransactionId;
childStmt->oldFirstRelfilenodeSubid = InvalidSubTransactionId;
childStmt->oldFirstRelfilelocatorSubid = InvalidSubTransactionId;
/*
* Adjust any Vars (both in expressions and in the index's
@ -3015,7 +3015,7 @@ ReindexMultipleTables(const char *objectName, ReindexObjectType objectKind,
* particular this eliminates all shared catalogs.).
*/
if (RELKIND_HAS_STORAGE(classtuple->relkind) &&
!OidIsValid(classtuple->relfilenode))
!RelFileNumberIsValid(classtuple->relfilenode))
skip_rel = true;
/*

View File

@ -118,7 +118,7 @@ SetMatViewPopulatedState(Relation relation, bool newstate)
* ExecRefreshMatView -- execute a REFRESH MATERIALIZED VIEW command
*
* This refreshes the materialized view by creating a new table and swapping
* the relfilenodes of the new table and the old materialized view, so the OID
* the relfilenumbers of the new table and the old materialized view, so the OID
* of the original materialized view is preserved. Thus we do not lose GRANT
* nor references to this materialized view.
*

View File

@ -75,7 +75,7 @@ typedef struct sequence_magic
typedef struct SeqTableData
{
Oid relid; /* pg_class OID of this sequence (hash key) */
Oid filenode; /* last seen relfilenode of this sequence */
RelFileNumber filenumber; /* last seen relfilenumber of this sequence */
LocalTransactionId lxid; /* xact in which we last did a seq op */
bool last_valid; /* do we have a valid "last" value? */
int64 last; /* value last returned by nextval */
@ -255,7 +255,7 @@ DefineSequence(ParseState *pstate, CreateSeqStmt *seq)
*
* The change is made transactionally, so that on failure of the current
* transaction, the sequence will be restored to its previous state.
* We do that by creating a whole new relfilenode for the sequence; so this
* We do that by creating a whole new relfilenumber for the sequence; so this
* works much like the rewriting forms of ALTER TABLE.
*
* Caller is assumed to have acquired AccessExclusiveLock on the sequence,
@ -310,7 +310,7 @@ ResetSequence(Oid seq_relid)
/*
* Create a new storage file for the sequence.
*/
RelationSetNewRelfilenode(seq_rel, seq_rel->rd_rel->relpersistence);
RelationSetNewRelfilenumber(seq_rel, seq_rel->rd_rel->relpersistence);
/*
* Ensure sequence's relfrozenxid is at 0, since it won't contain any
@ -347,9 +347,9 @@ fill_seq_with_data(Relation rel, HeapTuple tuple)
{
SMgrRelation srel;
srel = smgropen(rel->rd_node, InvalidBackendId);
srel = smgropen(rel->rd_locator, InvalidBackendId);
smgrcreate(srel, INIT_FORKNUM, false);
log_smgrcreate(&rel->rd_node, INIT_FORKNUM);
log_smgrcreate(&rel->rd_locator, INIT_FORKNUM);
fill_seq_fork_with_data(rel, tuple, INIT_FORKNUM);
FlushRelationBuffers(rel);
smgrclose(srel);
@ -418,7 +418,7 @@ fill_seq_fork_with_data(Relation rel, HeapTuple tuple, ForkNumber forkNum)
XLogBeginInsert();
XLogRegisterBuffer(0, buf, REGBUF_WILL_INIT);
xlrec.node = rel->rd_node;
xlrec.locator = rel->rd_locator;
XLogRegisterData((char *) &xlrec, sizeof(xl_seq_rec));
XLogRegisterData((char *) tuple->t_data, tuple->t_len);
@ -509,7 +509,7 @@ AlterSequence(ParseState *pstate, AlterSeqStmt *stmt)
* Create a new storage file for the sequence, making the state
* changes transactional.
*/
RelationSetNewRelfilenode(seqrel, seqrel->rd_rel->relpersistence);
RelationSetNewRelfilenumber(seqrel, seqrel->rd_rel->relpersistence);
/*
* Ensure sequence's relfrozenxid is at 0, since it won't contain any
@ -557,7 +557,7 @@ SequenceChangePersistence(Oid relid, char newrelpersistence)
GetTopTransactionId();
(void) read_seq_tuple(seqrel, &buf, &seqdatatuple);
RelationSetNewRelfilenode(seqrel, newrelpersistence);
RelationSetNewRelfilenumber(seqrel, newrelpersistence);
fill_seq_with_data(seqrel, &seqdatatuple);
UnlockReleaseBuffer(buf);
@ -836,7 +836,7 @@ nextval_internal(Oid relid, bool check_permissions)
seq->is_called = true;
seq->log_cnt = 0;
xlrec.node = seqrel->rd_node;
xlrec.locator = seqrel->rd_locator;
XLogRegisterData((char *) &xlrec, sizeof(xl_seq_rec));
XLogRegisterData((char *) seqdatatuple.t_data, seqdatatuple.t_len);
@ -1023,7 +1023,7 @@ do_setval(Oid relid, int64 next, bool iscalled)
XLogBeginInsert();
XLogRegisterBuffer(0, buf, REGBUF_WILL_INIT);
xlrec.node = seqrel->rd_node;
xlrec.locator = seqrel->rd_locator;
XLogRegisterData((char *) &xlrec, sizeof(xl_seq_rec));
XLogRegisterData((char *) seqdatatuple.t_data, seqdatatuple.t_len);
@ -1147,7 +1147,7 @@ init_sequence(Oid relid, SeqTable *p_elm, Relation *p_rel)
if (!found)
{
/* relid already filled in */
elm->filenode = InvalidOid;
elm->filenumber = InvalidRelFileNumber;
elm->lxid = InvalidLocalTransactionId;
elm->last_valid = false;
elm->last = elm->cached = 0;
@ -1169,9 +1169,9 @@ init_sequence(Oid relid, SeqTable *p_elm, Relation *p_rel)
* discard any cached-but-unissued values. We do not touch the currval()
* state, however.
*/
if (seqrel->rd_rel->relfilenode != elm->filenode)
if (seqrel->rd_rel->relfilenode != elm->filenumber)
{
elm->filenode = seqrel->rd_rel->relfilenode;
elm->filenumber = seqrel->rd_rel->relfilenode;
elm->cached = elm->last;
}
@ -1254,7 +1254,8 @@ read_seq_tuple(Relation rel, Buffer *buf, HeapTuple seqdatatuple)
* changed. This allows ALTER SEQUENCE to behave transactionally. Currently,
* the only option that doesn't cause that is OWNED BY. It's *necessary* for
* ALTER SEQUENCE OWNED BY to not rewrite the sequence, because that would
* break pg_upgrade by causing unwanted changes in the sequence's relfilenode.
* break pg_upgrade by causing unwanted changes in the sequence's
* relfilenumber.
*/
static void
init_params(ParseState *pstate, List *options, bool for_identity,

View File

@ -596,7 +596,7 @@ static void ATExecForceNoForceRowSecurity(Relation rel, bool force_rls);
static ObjectAddress ATExecSetCompression(AlteredTableInfo *tab, Relation rel,
const char *column, Node *newValue, LOCKMODE lockmode);
static void index_copy_data(Relation rel, RelFileNode newrnode);
static void index_copy_data(Relation rel, RelFileLocator newrlocator);
static const char *storage_name(char c);
static void RangeVarCallbackForDropRelation(const RangeVar *rel, Oid relOid,
@ -1986,12 +1986,12 @@ ExecuteTruncateGuts(List *explicit_rels,
/*
* Normally, we need a transaction-safe truncation here. However, if
* the table was either created in the current (sub)transaction or has
* a new relfilenode in the current (sub)transaction, then we can just
* truncate it in-place, because a rollback would cause the whole
* a new relfilenumber in the current (sub)transaction, then we can
* just truncate it in-place, because a rollback would cause the whole
* table or the current physical file to be thrown away anyway.
*/
if (rel->rd_createSubid == mySubid ||
rel->rd_newRelfilenodeSubid == mySubid)
rel->rd_newRelfilelocatorSubid == mySubid)
{
/* Immediate, non-rollbackable truncation is OK */
heap_truncate_one_rel(rel);
@ -2014,10 +2014,10 @@ ExecuteTruncateGuts(List *explicit_rels,
* Need the full transaction-safe pushups.
*
* Create a new empty storage file for the relation, and assign it
* as the relfilenode value. The old storage file is scheduled for
* deletion at commit.
* as the relfilenumber value. The old storage file is scheduled
* for deletion at commit.
*/
RelationSetNewRelfilenode(rel, rel->rd_rel->relpersistence);
RelationSetNewRelfilenumber(rel, rel->rd_rel->relpersistence);
heap_relid = RelationGetRelid(rel);
@ -2030,8 +2030,8 @@ ExecuteTruncateGuts(List *explicit_rels,
Relation toastrel = relation_open(toast_relid,
AccessExclusiveLock);
RelationSetNewRelfilenode(toastrel,
toastrel->rd_rel->relpersistence);
RelationSetNewRelfilenumber(toastrel,
toastrel->rd_rel->relpersistence);
table_close(toastrel, NoLock);
}
@ -3315,11 +3315,11 @@ CheckRelationTableSpaceMove(Relation rel, Oid newTableSpaceId)
/*
* SetRelationTableSpace
* Set new reltablespace and relfilenode in pg_class entry.
* Set new reltablespace and relfilenumber in pg_class entry.
*
* newTableSpaceId is the new tablespace for the relation, and
* newRelFileNode its new filenode. If newRelFileNode is InvalidOid,
* this field is not updated.
* newRelFilenumber its new filenumber. If newRelFilenumber is
* InvalidRelFileNumber, this field is not updated.
*
* NOTE: The caller must hold AccessExclusiveLock on the relation.
*
@ -3331,7 +3331,7 @@ CheckRelationTableSpaceMove(Relation rel, Oid newTableSpaceId)
void
SetRelationTableSpace(Relation rel,
Oid newTableSpaceId,
Oid newRelFileNode)
RelFileNumber newRelFilenumber)
{
Relation pg_class;
HeapTuple tuple;
@ -3351,8 +3351,8 @@ SetRelationTableSpace(Relation rel,
/* Update the pg_class row. */
rd_rel->reltablespace = (newTableSpaceId == MyDatabaseTableSpace) ?
InvalidOid : newTableSpaceId;
if (OidIsValid(newRelFileNode))
rd_rel->relfilenode = newRelFileNode;
if (RelFileNumberIsValid(newRelFilenumber))
rd_rel->relfilenode = newRelFilenumber;
CatalogTupleUpdate(pg_class, &tuple->t_self, tuple);
/*
@ -5420,7 +5420,7 @@ ATRewriteTables(AlterTableStmt *parsetree, List **wqueue, LOCKMODE lockmode,
* persistence: on one hand, we need to ensure that the buffers
* belonging to each of the two relations are marked with or without
* BM_PERMANENT properly. On the other hand, since rewriting creates
* and assigns a new relfilenode, we automatically create or drop an
* and assigns a new relfilenumber, we automatically create or drop an
* init fork for the relation as appropriate.
*/
if (tab->rewrite > 0 && tab->relkind != RELKIND_SEQUENCE)
@ -5506,12 +5506,13 @@ ATRewriteTables(AlterTableStmt *parsetree, List **wqueue, LOCKMODE lockmode,
* Create transient table that will receive the modified data.
*
* Ensure it is marked correctly as logged or unlogged. We have
* to do this here so that buffers for the new relfilenode will
* to do this here so that buffers for the new relfilenumber will
* have the right persistence set, and at the same time ensure
* that the original filenode's buffers will get read in with the
* correct setting (i.e. the original one). Otherwise a rollback
* after the rewrite would possibly result with buffers for the
* original filenode having the wrong persistence setting.
* that the original filenumbers's buffers will get read in with
* the correct setting (i.e. the original one). Otherwise a
* rollback after the rewrite would possibly result with buffers
* for the original filenumbers having the wrong persistence
* setting.
*
* NB: This relies on swap_relation_files() also swapping the
* persistence. That wouldn't work for pg_class, but that can't be
@ -8597,7 +8598,7 @@ ATExecAddIndex(AlteredTableInfo *tab, Relation rel,
/* suppress schema rights check when rebuilding existing index */
check_rights = !is_rebuild;
/* skip index build if phase 3 will do it or we're reusing an old one */
skip_build = tab->rewrite > 0 || OidIsValid(stmt->oldNode);
skip_build = tab->rewrite > 0 || RelFileNumberIsValid(stmt->oldNumber);
/* suppress notices when rebuilding existing index */
quiet = is_rebuild;
@ -8613,21 +8614,21 @@ ATExecAddIndex(AlteredTableInfo *tab, Relation rel,
quiet);
/*
* If TryReuseIndex() stashed a relfilenode for us, we used it for the new
* index instead of building from scratch. Restore associated fields.
* If TryReuseIndex() stashed a relfilenumber for us, we used it for the
* new index instead of building from scratch. Restore associated fields.
* This may store InvalidSubTransactionId in both fields, in which case
* relcache.c will assume it can rebuild the relcache entry. Hence, do
* this after the CCI that made catalog rows visible to any rebuild. The
* DROP of the old edition of this index will have scheduled the storage
* for deletion at commit, so cancel that pending deletion.
*/
if (OidIsValid(stmt->oldNode))
if (RelFileNumberIsValid(stmt->oldNumber))
{
Relation irel = index_open(address.objectId, NoLock);
irel->rd_createSubid = stmt->oldCreateSubid;
irel->rd_firstRelfilenodeSubid = stmt->oldFirstRelfilenodeSubid;
RelationPreserveStorage(irel->rd_node, true);
irel->rd_firstRelfilelocatorSubid = stmt->oldFirstRelfilelocatorSubid;
RelationPreserveStorage(irel->rd_locator, true);
index_close(irel, NoLock);
}
@ -13491,9 +13492,9 @@ TryReuseIndex(Oid oldId, IndexStmt *stmt)
/* If it's a partitioned index, there is no storage to share. */
if (irel->rd_rel->relkind != RELKIND_PARTITIONED_INDEX)
{
stmt->oldNode = irel->rd_node.relNode;
stmt->oldNumber = irel->rd_locator.relNumber;
stmt->oldCreateSubid = irel->rd_createSubid;
stmt->oldFirstRelfilenodeSubid = irel->rd_firstRelfilenodeSubid;
stmt->oldFirstRelfilelocatorSubid = irel->rd_firstRelfilelocatorSubid;
}
index_close(irel, NoLock);
}
@ -14340,8 +14341,8 @@ ATExecSetTableSpace(Oid tableOid, Oid newTableSpace, LOCKMODE lockmode)
{
Relation rel;
Oid reltoastrelid;
Oid newrelfilenode;
RelFileNode newrnode;
RelFileNumber newrelfilenumber;
RelFileLocator newrlocator;
List *reltoastidxids = NIL;
ListCell *lc;
@ -14370,26 +14371,26 @@ ATExecSetTableSpace(Oid tableOid, Oid newTableSpace, LOCKMODE lockmode)
}
/*
* Relfilenodes are not unique in databases across tablespaces, so we need
* to allocate a new one in the new tablespace.
* Relfilenumbers are not unique in databases across tablespaces, so we
* need to allocate a new one in the new tablespace.
*/
newrelfilenode = GetNewRelFileNode(newTableSpace, NULL,
rel->rd_rel->relpersistence);
newrelfilenumber = GetNewRelFileNumber(newTableSpace, NULL,
rel->rd_rel->relpersistence);
/* Open old and new relation */
newrnode = rel->rd_node;
newrnode.relNode = newrelfilenode;
newrnode.spcNode = newTableSpace;
newrlocator = rel->rd_locator;
newrlocator.relNumber = newrelfilenumber;
newrlocator.spcOid = newTableSpace;
/* hand off to AM to actually create the new filenode and copy the data */
/* hand off to AM to actually create new rel storage and copy the data */
if (rel->rd_rel->relkind == RELKIND_INDEX)
{
index_copy_data(rel, newrnode);
index_copy_data(rel, newrlocator);
}
else
{
Assert(RELKIND_HAS_TABLE_AM(rel->rd_rel->relkind));
table_relation_copy_data(rel, &newrnode);
table_relation_copy_data(rel, &newrlocator);
}
/*
@ -14400,11 +14401,11 @@ ATExecSetTableSpace(Oid tableOid, Oid newTableSpace, LOCKMODE lockmode)
* the updated pg_class entry), but that's forbidden with
* CheckRelationTableSpaceMove().
*/
SetRelationTableSpace(rel, newTableSpace, newrelfilenode);
SetRelationTableSpace(rel, newTableSpace, newrelfilenumber);
InvokeObjectPostAlterHook(RelationRelationId, RelationGetRelid(rel), 0);
RelationAssumeNewRelfilenode(rel);
RelationAssumeNewRelfilelocator(rel);
relation_close(rel, NoLock);
@ -14630,11 +14631,11 @@ AlterTableMoveAll(AlterTableMoveAllStmt *stmt)
}
static void
index_copy_data(Relation rel, RelFileNode newrnode)
index_copy_data(Relation rel, RelFileLocator newrlocator)
{
SMgrRelation dstrel;
dstrel = smgropen(newrnode, rel->rd_backend);
dstrel = smgropen(newrlocator, rel->rd_backend);
/*
* Since we copy the file directly without looking at the shared buffers,
@ -14648,10 +14649,10 @@ index_copy_data(Relation rel, RelFileNode newrnode)
* Create and copy all forks of the relation, and schedule unlinking of
* old physical files.
*
* NOTE: any conflict in relfilenode value will be caught in
* NOTE: any conflict in relfilenumber value will be caught in
* RelationCreateStorage().
*/
RelationCreateStorage(newrnode, rel->rd_rel->relpersistence, true);
RelationCreateStorage(newrlocator, rel->rd_rel->relpersistence, true);
/* copy main fork */
RelationCopyStorage(RelationGetSmgr(rel), dstrel, MAIN_FORKNUM,
@ -14672,7 +14673,7 @@ index_copy_data(Relation rel, RelFileNode newrnode)
if (RelationIsPermanent(rel) ||
(rel->rd_rel->relpersistence == RELPERSISTENCE_UNLOGGED &&
forkNum == INIT_FORKNUM))
log_smgrcreate(&newrnode, forkNum);
log_smgrcreate(&newrlocator, forkNum);
RelationCopyStorage(RelationGetSmgr(rel), dstrel, forkNum,
rel->rd_rel->relpersistence);
}

View File

@ -12,12 +12,12 @@
* remove the possibility of having file name conflicts, we isolate
* files within a tablespace into database-specific subdirectories.
*
* To support file access via the information given in RelFileNode, we
* To support file access via the information given in RelFileLocator, we
* maintain a symbolic-link map in $PGDATA/pg_tblspc. The symlinks are
* named by tablespace OIDs and point to the actual tablespace directories.
* There is also a per-cluster version directory in each tablespace.
* Thus the full path to an arbitrary file is
* $PGDATA/pg_tblspc/spcoid/PG_MAJORVER_CATVER/dboid/relfilenode
* $PGDATA/pg_tblspc/spcoid/PG_MAJORVER_CATVER/dboid/relfilenumber
* e.g.
* $PGDATA/pg_tblspc/20981/PG_9.0_201002161/719849/83292814
*
@ -25,8 +25,8 @@
* tables) and pg_default (for everything else). For backwards compatibility
* and to remain functional on platforms without symlinks, these tablespaces
* are accessed specially: they are respectively
* $PGDATA/global/relfilenode
* $PGDATA/base/dboid/relfilenode
* $PGDATA/global/relfilenumber
* $PGDATA/base/dboid/relfilenumber
*
* To allow CREATE DATABASE to give a new database a default tablespace
* that's different from the template database's default, we make the
@ -115,7 +115,7 @@ static bool destroy_tablespace_directories(Oid tablespaceoid, bool redo);
* re-create a database subdirectory (of $PGDATA/base) during WAL replay.
*/
void
TablespaceCreateDbspace(Oid spcNode, Oid dbNode, bool isRedo)
TablespaceCreateDbspace(Oid spcOid, Oid dbOid, bool isRedo)
{
struct stat st;
char *dir;
@ -124,13 +124,13 @@ TablespaceCreateDbspace(Oid spcNode, Oid dbNode, bool isRedo)
* The global tablespace doesn't have per-database subdirectories, so
* nothing to do for it.
*/
if (spcNode == GLOBALTABLESPACE_OID)
if (spcOid == GLOBALTABLESPACE_OID)
return;
Assert(OidIsValid(spcNode));
Assert(OidIsValid(dbNode));
Assert(OidIsValid(spcOid));
Assert(OidIsValid(dbOid));
dir = GetDatabasePath(dbNode, spcNode);
dir = GetDatabasePath(dbOid, spcOid);
if (stat(dir, &st) < 0)
{

View File

@ -4194,9 +4194,9 @@ _copyIndexStmt(const IndexStmt *from)
COPY_NODE_FIELD(excludeOpNames);
COPY_STRING_FIELD(idxcomment);
COPY_SCALAR_FIELD(indexOid);
COPY_SCALAR_FIELD(oldNode);
COPY_SCALAR_FIELD(oldNumber);
COPY_SCALAR_FIELD(oldCreateSubid);
COPY_SCALAR_FIELD(oldFirstRelfilenodeSubid);
COPY_SCALAR_FIELD(oldFirstRelfilelocatorSubid);
COPY_SCALAR_FIELD(unique);
COPY_SCALAR_FIELD(nulls_not_distinct);
COPY_SCALAR_FIELD(primary);

View File

@ -1768,9 +1768,9 @@ _equalIndexStmt(const IndexStmt *a, const IndexStmt *b)
COMPARE_NODE_FIELD(excludeOpNames);
COMPARE_STRING_FIELD(idxcomment);
COMPARE_SCALAR_FIELD(indexOid);
COMPARE_SCALAR_FIELD(oldNode);
COMPARE_SCALAR_FIELD(oldNumber);
COMPARE_SCALAR_FIELD(oldCreateSubid);
COMPARE_SCALAR_FIELD(oldFirstRelfilenodeSubid);
COMPARE_SCALAR_FIELD(oldFirstRelfilelocatorSubid);
COMPARE_SCALAR_FIELD(unique);
COMPARE_SCALAR_FIELD(nulls_not_distinct);
COMPARE_SCALAR_FIELD(primary);

View File

@ -2932,9 +2932,9 @@ _outIndexStmt(StringInfo str, const IndexStmt *node)
WRITE_NODE_FIELD(excludeOpNames);
WRITE_STRING_FIELD(idxcomment);
WRITE_OID_FIELD(indexOid);
WRITE_OID_FIELD(oldNode);
WRITE_OID_FIELD(oldNumber);
WRITE_UINT_FIELD(oldCreateSubid);
WRITE_UINT_FIELD(oldFirstRelfilenodeSubid);
WRITE_UINT_FIELD(oldFirstRelfilelocatorSubid);
WRITE_BOOL_FIELD(unique);
WRITE_BOOL_FIELD(nulls_not_distinct);
WRITE_BOOL_FIELD(primary);

View File

@ -7990,9 +7990,9 @@ IndexStmt: CREATE opt_unique INDEX opt_concurrently opt_index_name
n->excludeOpNames = NIL;
n->idxcomment = NULL;
n->indexOid = InvalidOid;
n->oldNode = InvalidOid;
n->oldNumber = InvalidRelFileNumber;
n->oldCreateSubid = InvalidSubTransactionId;
n->oldFirstRelfilenodeSubid = InvalidSubTransactionId;
n->oldFirstRelfilelocatorSubid = InvalidSubTransactionId;
n->primary = false;
n->isconstraint = false;
n->deferrable = false;
@ -8022,9 +8022,9 @@ IndexStmt: CREATE opt_unique INDEX opt_concurrently opt_index_name
n->excludeOpNames = NIL;
n->idxcomment = NULL;
n->indexOid = InvalidOid;
n->oldNode = InvalidOid;
n->oldNumber = InvalidRelFileNumber;
n->oldCreateSubid = InvalidSubTransactionId;
n->oldFirstRelfilenodeSubid = InvalidSubTransactionId;
n->oldFirstRelfilelocatorSubid = InvalidSubTransactionId;
n->primary = false;
n->isconstraint = false;
n->deferrable = false;

View File

@ -1578,9 +1578,9 @@ generateClonedIndexStmt(RangeVar *heapRel, Relation source_idx,
index->excludeOpNames = NIL;
index->idxcomment = NULL;
index->indexOid = InvalidOid;
index->oldNode = InvalidOid;
index->oldNumber = InvalidRelFileNumber;
index->oldCreateSubid = InvalidSubTransactionId;
index->oldFirstRelfilenodeSubid = InvalidSubTransactionId;
index->oldFirstRelfilelocatorSubid = InvalidSubTransactionId;
index->unique = idxrec->indisunique;
index->nulls_not_distinct = idxrec->indnullsnotdistinct;
index->primary = idxrec->indisprimary;
@ -2199,9 +2199,9 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt)
index->excludeOpNames = NIL;
index->idxcomment = NULL;
index->indexOid = InvalidOid;
index->oldNode = InvalidOid;
index->oldNumber = InvalidRelFileNumber;
index->oldCreateSubid = InvalidSubTransactionId;
index->oldFirstRelfilenodeSubid = InvalidSubTransactionId;
index->oldFirstRelfilelocatorSubid = InvalidSubTransactionId;
index->transformed = false;
index->concurrent = false;
index->if_not_exists = false;

View File

@ -1207,7 +1207,7 @@ CompactCheckpointerRequestQueue(void)
* We use the request struct directly as a hashtable key. This
* assumes that any padding bytes in the structs are consistently the
* same, which should be okay because we zeroed them in
* CheckpointerShmemInit. Note also that RelFileNode had better
* CheckpointerShmemInit. Note also that RelFileLocator had better
* contain no pad bytes.
*/
request = &CheckpointerShmem->requests[n];

View File

@ -845,7 +845,7 @@ DecodeInsert(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
XLogReaderState *r = buf->record;
xl_heap_insert *xlrec;
ReorderBufferChange *change;
RelFileNode target_node;
RelFileLocator target_locator;
xlrec = (xl_heap_insert *) XLogRecGetData(r);
@ -857,8 +857,8 @@ DecodeInsert(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
return;
/* only interested in our database */
XLogRecGetBlockTag(r, 0, &target_node, NULL, NULL);
if (target_node.dbNode != ctx->slot->data.database)
XLogRecGetBlockTag(r, 0, &target_locator, NULL, NULL);
if (target_locator.dbOid != ctx->slot->data.database)
return;
/* output plugin doesn't look for this origin, no need to queue */
@ -872,7 +872,7 @@ DecodeInsert(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
change->action = REORDER_BUFFER_CHANGE_INTERNAL_SPEC_INSERT;
change->origin_id = XLogRecGetOrigin(r);
memcpy(&change->data.tp.relnode, &target_node, sizeof(RelFileNode));
memcpy(&change->data.tp.rlocator, &target_locator, sizeof(RelFileLocator));
tupledata = XLogRecGetBlockData(r, 0, &datalen);
tuplelen = datalen - SizeOfHeapHeader;
@ -902,13 +902,13 @@ DecodeUpdate(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
xl_heap_update *xlrec;
ReorderBufferChange *change;
char *data;
RelFileNode target_node;
RelFileLocator target_locator;
xlrec = (xl_heap_update *) XLogRecGetData(r);
/* only interested in our database */
XLogRecGetBlockTag(r, 0, &target_node, NULL, NULL);
if (target_node.dbNode != ctx->slot->data.database)
XLogRecGetBlockTag(r, 0, &target_locator, NULL, NULL);
if (target_locator.dbOid != ctx->slot->data.database)
return;
/* output plugin doesn't look for this origin, no need to queue */
@ -918,7 +918,7 @@ DecodeUpdate(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
change = ReorderBufferGetChange(ctx->reorder);
change->action = REORDER_BUFFER_CHANGE_UPDATE;
change->origin_id = XLogRecGetOrigin(r);
memcpy(&change->data.tp.relnode, &target_node, sizeof(RelFileNode));
memcpy(&change->data.tp.rlocator, &target_locator, sizeof(RelFileLocator));
if (xlrec->flags & XLH_UPDATE_CONTAINS_NEW_TUPLE)
{
@ -968,13 +968,13 @@ DecodeDelete(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
XLogReaderState *r = buf->record;
xl_heap_delete *xlrec;
ReorderBufferChange *change;
RelFileNode target_node;
RelFileLocator target_locator;
xlrec = (xl_heap_delete *) XLogRecGetData(r);
/* only interested in our database */
XLogRecGetBlockTag(r, 0, &target_node, NULL, NULL);
if (target_node.dbNode != ctx->slot->data.database)
XLogRecGetBlockTag(r, 0, &target_locator, NULL, NULL);
if (target_locator.dbOid != ctx->slot->data.database)
return;
/* output plugin doesn't look for this origin, no need to queue */
@ -990,7 +990,7 @@ DecodeDelete(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
change->origin_id = XLogRecGetOrigin(r);
memcpy(&change->data.tp.relnode, &target_node, sizeof(RelFileNode));
memcpy(&change->data.tp.rlocator, &target_locator, sizeof(RelFileLocator));
/* old primary key stored */
if (xlrec->flags & XLH_DELETE_CONTAINS_OLD)
@ -1063,7 +1063,7 @@ DecodeMultiInsert(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
char *data;
char *tupledata;
Size tuplelen;
RelFileNode rnode;
RelFileLocator rlocator;
xlrec = (xl_heap_multi_insert *) XLogRecGetData(r);
@ -1075,8 +1075,8 @@ DecodeMultiInsert(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
return;
/* only interested in our database */
XLogRecGetBlockTag(r, 0, &rnode, NULL, NULL);
if (rnode.dbNode != ctx->slot->data.database)
XLogRecGetBlockTag(r, 0, &rlocator, NULL, NULL);
if (rlocator.dbOid != ctx->slot->data.database)
return;
/* output plugin doesn't look for this origin, no need to queue */
@ -1103,7 +1103,7 @@ DecodeMultiInsert(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
change->action = REORDER_BUFFER_CHANGE_INSERT;
change->origin_id = XLogRecGetOrigin(r);
memcpy(&change->data.tp.relnode, &rnode, sizeof(RelFileNode));
memcpy(&change->data.tp.rlocator, &rlocator, sizeof(RelFileLocator));
xlhdr = (xl_multi_insert_tuple *) SHORTALIGN(data);
data = ((char *) xlhdr) + SizeOfMultiInsertTuple;
@ -1165,11 +1165,11 @@ DecodeSpecConfirm(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
{
XLogReaderState *r = buf->record;
ReorderBufferChange *change;
RelFileNode target_node;
RelFileLocator target_locator;
/* only interested in our database */
XLogRecGetBlockTag(r, 0, &target_node, NULL, NULL);
if (target_node.dbNode != ctx->slot->data.database)
XLogRecGetBlockTag(r, 0, &target_locator, NULL, NULL);
if (target_locator.dbOid != ctx->slot->data.database)
return;
/* output plugin doesn't look for this origin, no need to queue */
@ -1180,7 +1180,7 @@ DecodeSpecConfirm(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
change->action = REORDER_BUFFER_CHANGE_INTERNAL_SPEC_CONFIRM;
change->origin_id = XLogRecGetOrigin(r);
memcpy(&change->data.tp.relnode, &target_node, sizeof(RelFileNode));
memcpy(&change->data.tp.rlocator, &target_locator, sizeof(RelFileLocator));
change->data.tp.clear_toast_afterwards = true;

View File

@ -106,7 +106,7 @@
#include "utils/memdebug.h"
#include "utils/memutils.h"
#include "utils/rel.h"
#include "utils/relfilenodemap.h"
#include "utils/relfilenumbermap.h"
/* entry for a hash table we use to map from xid to our transaction state */
@ -116,10 +116,10 @@ typedef struct ReorderBufferTXNByIdEnt
ReorderBufferTXN *txn;
} ReorderBufferTXNByIdEnt;
/* data structures for (relfilenode, ctid) => (cmin, cmax) mapping */
/* data structures for (relfilelocator, ctid) => (cmin, cmax) mapping */
typedef struct ReorderBufferTupleCidKey
{
RelFileNode relnode;
RelFileLocator rlocator;
ItemPointerData tid;
} ReorderBufferTupleCidKey;
@ -1643,7 +1643,7 @@ ReorderBufferTruncateTXN(ReorderBuffer *rb, ReorderBufferTXN *txn, bool txn_prep
}
/*
* Destroy the (relfilenode, ctid) hashtable, so that we don't leak any
* Destroy the (relfilelocator, ctid) hashtable, so that we don't leak any
* memory. We could also keep the hash table and update it with new ctid
* values, but this seems simpler and good enough for now.
*/
@ -1673,7 +1673,7 @@ ReorderBufferTruncateTXN(ReorderBuffer *rb, ReorderBufferTXN *txn, bool txn_prep
}
/*
* Build a hash with a (relfilenode, ctid) -> (cmin, cmax) mapping for use by
* Build a hash with a (relfilelocator, ctid) -> (cmin, cmax) mapping for use by
* HeapTupleSatisfiesHistoricMVCC.
*/
static void
@ -1711,7 +1711,7 @@ ReorderBufferBuildTupleCidHash(ReorderBuffer *rb, ReorderBufferTXN *txn)
/* be careful about padding */
memset(&key, 0, sizeof(ReorderBufferTupleCidKey));
key.relnode = change->data.tuplecid.node;
key.rlocator = change->data.tuplecid.locator;
ItemPointerCopy(&change->data.tuplecid.tid,
&key.tid);
@ -2140,36 +2140,36 @@ ReorderBufferProcessTXN(ReorderBuffer *rb, ReorderBufferTXN *txn,
case REORDER_BUFFER_CHANGE_DELETE:
Assert(snapshot_now);
reloid = RelidByRelfilenode(change->data.tp.relnode.spcNode,
change->data.tp.relnode.relNode);
reloid = RelidByRelfilenumber(change->data.tp.rlocator.spcOid,
change->data.tp.rlocator.relNumber);
/*
* Mapped catalog tuple without data, emitted while
* catalog table was in the process of being rewritten. We
* can fail to look up the relfilenode, because the
* can fail to look up the relfilenumber, because the
* relmapper has no "historic" view, in contrast to the
* normal catalog during decoding. Thus repeated rewrites
* can cause a lookup failure. That's OK because we do not
* decode catalog changes anyway. Normally such tuples
* would be skipped over below, but we can't identify
* whether the table should be logically logged without
* mapping the relfilenode to the oid.
* mapping the relfilenumber to the oid.
*/
if (reloid == InvalidOid &&
change->data.tp.newtuple == NULL &&
change->data.tp.oldtuple == NULL)
goto change_done;
else if (reloid == InvalidOid)
elog(ERROR, "could not map filenode \"%s\" to relation OID",
relpathperm(change->data.tp.relnode,
elog(ERROR, "could not map filenumber \"%s\" to relation OID",
relpathperm(change->data.tp.rlocator,
MAIN_FORKNUM));
relation = RelationIdGetRelation(reloid);
if (!RelationIsValid(relation))
elog(ERROR, "could not open relation with OID %u (for filenode \"%s\")",
elog(ERROR, "could not open relation with OID %u (for filenumber \"%s\")",
reloid,
relpathperm(change->data.tp.relnode,
relpathperm(change->data.tp.rlocator,
MAIN_FORKNUM));
if (!RelationIsLogicallyLogged(relation))
@ -3157,7 +3157,7 @@ ReorderBufferChangeMemoryUpdate(ReorderBuffer *rb,
}
/*
* Add new (relfilenode, tid) -> (cmin, cmax) mappings.
* Add new (relfilelocator, tid) -> (cmin, cmax) mappings.
*
* We do not include this change type in memory accounting, because we
* keep CIDs in a separate list and do not evict them when reaching
@ -3165,7 +3165,7 @@ ReorderBufferChangeMemoryUpdate(ReorderBuffer *rb,
*/
void
ReorderBufferAddNewTupleCids(ReorderBuffer *rb, TransactionId xid,
XLogRecPtr lsn, RelFileNode node,
XLogRecPtr lsn, RelFileLocator locator,
ItemPointerData tid, CommandId cmin,
CommandId cmax, CommandId combocid)
{
@ -3174,7 +3174,7 @@ ReorderBufferAddNewTupleCids(ReorderBuffer *rb, TransactionId xid,
txn = ReorderBufferTXNByXid(rb, xid, true, NULL, lsn, true);
change->data.tuplecid.node = node;
change->data.tuplecid.locator = locator;
change->data.tuplecid.tid = tid;
change->data.tuplecid.cmin = cmin;
change->data.tuplecid.cmax = cmax;
@ -4839,7 +4839,7 @@ ReorderBufferToastReset(ReorderBuffer *rb, ReorderBufferTXN *txn)
* need anymore.
*
* To resolve those problems we have a per-transaction hash of (cmin,
* cmax) tuples keyed by (relfilenode, ctid) which contains the actual
* cmax) tuples keyed by (relfilelocator, ctid) which contains the actual
* (cmin, cmax) values. That also takes care of combo CIDs by simply
* not caring about them at all. As we have the real cmin/cmax values
* combo CIDs aren't interesting.
@ -4870,9 +4870,9 @@ DisplayMapping(HTAB *tuplecid_data)
while ((ent = (ReorderBufferTupleCidEnt *) hash_seq_search(&hstat)) != NULL)
{
elog(DEBUG3, "mapping: node: %u/%u/%u tid: %u/%u cmin: %u, cmax: %u",
ent->key.relnode.dbNode,
ent->key.relnode.spcNode,
ent->key.relnode.relNode,
ent->key.rlocator.dbOid,
ent->key.rlocator.spcOid,
ent->key.rlocator.relNumber,
ItemPointerGetBlockNumber(&ent->key.tid),
ItemPointerGetOffsetNumber(&ent->key.tid),
ent->cmin,
@ -4932,7 +4932,7 @@ ApplyLogicalMappingFile(HTAB *tuplecid_data, Oid relid, const char *fname)
path, readBytes,
(int32) sizeof(LogicalRewriteMappingData))));
key.relnode = map.old_node;
key.rlocator = map.old_locator;
ItemPointerCopy(&map.old_tid,
&key.tid);
@ -4947,7 +4947,7 @@ ApplyLogicalMappingFile(HTAB *tuplecid_data, Oid relid, const char *fname)
if (!ent)
continue;
key.relnode = map.new_node;
key.rlocator = map.new_locator;
ItemPointerCopy(&map.new_tid,
&key.tid);
@ -5120,10 +5120,10 @@ ResolveCminCmaxDuringDecoding(HTAB *tuplecid_data,
Assert(!BufferIsLocal(buffer));
/*
* get relfilenode from the buffer, no convenient way to access it other
* than that.
* get relfilelocator from the buffer, no convenient way to access it
* other than that.
*/
BufferGetTag(buffer, &key.relnode, &forkno, &blockno);
BufferGetTag(buffer, &key.rlocator, &forkno, &blockno);
/* tuples can only be in the main fork */
Assert(forkno == MAIN_FORKNUM);

View File

@ -781,7 +781,7 @@ SnapBuildProcessNewCid(SnapBuild *builder, TransactionId xid,
ReorderBufferXidSetCatalogChanges(builder->reorder, xid, lsn);
ReorderBufferAddNewTupleCids(builder->reorder, xlrec->top_xid, lsn,
xlrec->target_node, xlrec->target_tid,
xlrec->target_locator, xlrec->target_tid,
xlrec->cmin, xlrec->cmax,
xlrec->combocid);

View File

@ -121,12 +121,12 @@ typedef struct CkptTsStatus
* Type for array used to sort SMgrRelations
*
* FlushRelationsAllBuffers shares the same comparator function with
* DropRelFileNodesAllBuffers. Pointer to this struct and RelFileNode must be
* DropRelFileLocatorsAllBuffers. Pointer to this struct and RelFileLocator must be
* compatible.
*/
typedef struct SMgrSortArray
{
RelFileNode rnode; /* This must be the first member */
RelFileLocator rlocator; /* This must be the first member */
SMgrRelation srel;
} SMgrSortArray;
@ -483,16 +483,16 @@ static BufferDesc *BufferAlloc(SMgrRelation smgr,
BufferAccessStrategy strategy,
bool *foundPtr);
static void FlushBuffer(BufferDesc *buf, SMgrRelation reln);
static void FindAndDropRelFileNodeBuffers(RelFileNode rnode,
ForkNumber forkNum,
BlockNumber nForkBlock,
BlockNumber firstDelBlock);
static void FindAndDropRelFileLocatorBuffers(RelFileLocator rlocator,
ForkNumber forkNum,
BlockNumber nForkBlock,
BlockNumber firstDelBlock);
static void RelationCopyStorageUsingBuffer(Relation src, Relation dst,
ForkNumber forkNum,
bool isunlogged);
static void AtProcExit_Buffers(int code, Datum arg);
static void CheckForBufferLeaks(void);
static int rnode_comparator(const void *p1, const void *p2);
static int rlocator_comparator(const void *p1, const void *p2);
static inline int buffertag_comparator(const BufferTag *a, const BufferTag *b);
static inline int ckpt_buforder_comparator(const CkptSortItem *a, const CkptSortItem *b);
static int ts_ckpt_progress_comparator(Datum a, Datum b, void *arg);
@ -515,7 +515,7 @@ PrefetchSharedBuffer(SMgrRelation smgr_reln,
Assert(BlockNumberIsValid(blockNum));
/* create a tag so we can lookup the buffer */
INIT_BUFFERTAG(newTag, smgr_reln->smgr_rnode.node,
INIT_BUFFERTAG(newTag, smgr_reln->smgr_rlocator.locator,
forkNum, blockNum);
/* determine its hash code and partition lock ID */
@ -620,7 +620,7 @@ PrefetchBuffer(Relation reln, ForkNumber forkNum, BlockNumber blockNum)
* tag. In that case, the buffer is pinned and the usage count is bumped.
*/
bool
ReadRecentBuffer(RelFileNode rnode, ForkNumber forkNum, BlockNumber blockNum,
ReadRecentBuffer(RelFileLocator rlocator, ForkNumber forkNum, BlockNumber blockNum,
Buffer recent_buffer)
{
BufferDesc *bufHdr;
@ -632,7 +632,7 @@ ReadRecentBuffer(RelFileNode rnode, ForkNumber forkNum, BlockNumber blockNum,
ResourceOwnerEnlargeBuffers(CurrentResourceOwner);
ReservePrivateRefCountEntry();
INIT_BUFFERTAG(tag, rnode, forkNum, blockNum);
INIT_BUFFERTAG(tag, rlocator, forkNum, blockNum);
if (BufferIsLocal(recent_buffer))
{
@ -786,13 +786,13 @@ ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum,
* BackendId).
*/
Buffer
ReadBufferWithoutRelcache(RelFileNode rnode, ForkNumber forkNum,
ReadBufferWithoutRelcache(RelFileLocator rlocator, ForkNumber forkNum,
BlockNumber blockNum, ReadBufferMode mode,
BufferAccessStrategy strategy, bool permanent)
{
bool hit;
SMgrRelation smgr = smgropen(rnode, InvalidBackendId);
SMgrRelation smgr = smgropen(rlocator, InvalidBackendId);
return ReadBuffer_common(smgr, permanent ? RELPERSISTENCE_PERMANENT :
RELPERSISTENCE_UNLOGGED, forkNum, blockNum,
@ -824,10 +824,10 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
isExtend = (blockNum == P_NEW);
TRACE_POSTGRESQL_BUFFER_READ_START(forkNum, blockNum,
smgr->smgr_rnode.node.spcNode,
smgr->smgr_rnode.node.dbNode,
smgr->smgr_rnode.node.relNode,
smgr->smgr_rnode.backend,
smgr->smgr_rlocator.locator.spcOid,
smgr->smgr_rlocator.locator.dbOid,
smgr->smgr_rlocator.locator.relNumber,
smgr->smgr_rlocator.backend,
isExtend);
/* Substitute proper block number if caller asked for P_NEW */
@ -839,7 +839,7 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("cannot extend relation %s beyond %u blocks",
relpath(smgr->smgr_rnode, forkNum),
relpath(smgr->smgr_rlocator, forkNum),
P_NEW)));
}
@ -886,10 +886,10 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
VacuumCostBalance += VacuumCostPageHit;
TRACE_POSTGRESQL_BUFFER_READ_DONE(forkNum, blockNum,
smgr->smgr_rnode.node.spcNode,
smgr->smgr_rnode.node.dbNode,
smgr->smgr_rnode.node.relNode,
smgr->smgr_rnode.backend,
smgr->smgr_rlocator.locator.spcOid,
smgr->smgr_rlocator.locator.dbOid,
smgr->smgr_rlocator.locator.relNumber,
smgr->smgr_rlocator.backend,
isExtend,
found);
@ -926,7 +926,7 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
if (!PageIsNew((Page) bufBlock))
ereport(ERROR,
(errmsg("unexpected data beyond EOF in block %u of relation %s",
blockNum, relpath(smgr->smgr_rnode, forkNum)),
blockNum, relpath(smgr->smgr_rlocator, forkNum)),
errhint("This has been seen to occur with buggy kernels; consider updating your system.")));
/*
@ -1028,7 +1028,7 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
(errcode(ERRCODE_DATA_CORRUPTED),
errmsg("invalid page in block %u of relation %s; zeroing out page",
blockNum,
relpath(smgr->smgr_rnode, forkNum))));
relpath(smgr->smgr_rlocator, forkNum))));
MemSet((char *) bufBlock, 0, BLCKSZ);
}
else
@ -1036,7 +1036,7 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
(errcode(ERRCODE_DATA_CORRUPTED),
errmsg("invalid page in block %u of relation %s",
blockNum,
relpath(smgr->smgr_rnode, forkNum))));
relpath(smgr->smgr_rlocator, forkNum))));
}
}
}
@ -1076,10 +1076,10 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
VacuumCostBalance += VacuumCostPageMiss;
TRACE_POSTGRESQL_BUFFER_READ_DONE(forkNum, blockNum,
smgr->smgr_rnode.node.spcNode,
smgr->smgr_rnode.node.dbNode,
smgr->smgr_rnode.node.relNode,
smgr->smgr_rnode.backend,
smgr->smgr_rlocator.locator.spcOid,
smgr->smgr_rlocator.locator.dbOid,
smgr->smgr_rlocator.locator.relNumber,
smgr->smgr_rlocator.backend,
isExtend,
found);
@ -1124,7 +1124,7 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
uint32 buf_state;
/* create a tag so we can lookup the buffer */
INIT_BUFFERTAG(newTag, smgr->smgr_rnode.node, forkNum, blockNum);
INIT_BUFFERTAG(newTag, smgr->smgr_rlocator.locator, forkNum, blockNum);
/* determine its hash code and partition lock ID */
newHash = BufTableHashCode(&newTag);
@ -1255,9 +1255,9 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
/* OK, do the I/O */
TRACE_POSTGRESQL_BUFFER_WRITE_DIRTY_START(forkNum, blockNum,
smgr->smgr_rnode.node.spcNode,
smgr->smgr_rnode.node.dbNode,
smgr->smgr_rnode.node.relNode);
smgr->smgr_rlocator.locator.spcOid,
smgr->smgr_rlocator.locator.dbOid,
smgr->smgr_rlocator.locator.relNumber);
FlushBuffer(buf, NULL);
LWLockRelease(BufferDescriptorGetContentLock(buf));
@ -1266,9 +1266,9 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
&buf->tag);
TRACE_POSTGRESQL_BUFFER_WRITE_DIRTY_DONE(forkNum, blockNum,
smgr->smgr_rnode.node.spcNode,
smgr->smgr_rnode.node.dbNode,
smgr->smgr_rnode.node.relNode);
smgr->smgr_rlocator.locator.spcOid,
smgr->smgr_rlocator.locator.dbOid,
smgr->smgr_rlocator.locator.relNumber);
}
else
{
@ -1647,7 +1647,7 @@ ReleaseAndReadBuffer(Buffer buffer,
{
bufHdr = GetLocalBufferDescriptor(-buffer - 1);
if (bufHdr->tag.blockNum == blockNum &&
RelFileNodeEquals(bufHdr->tag.rnode, relation->rd_node) &&
RelFileLocatorEquals(bufHdr->tag.rlocator, relation->rd_locator) &&
bufHdr->tag.forkNum == forkNum)
return buffer;
ResourceOwnerForgetBuffer(CurrentResourceOwner, buffer);
@ -1658,7 +1658,7 @@ ReleaseAndReadBuffer(Buffer buffer,
bufHdr = GetBufferDescriptor(buffer - 1);
/* we have pin, so it's ok to examine tag without spinlock */
if (bufHdr->tag.blockNum == blockNum &&
RelFileNodeEquals(bufHdr->tag.rnode, relation->rd_node) &&
RelFileLocatorEquals(bufHdr->tag.rlocator, relation->rd_locator) &&
bufHdr->tag.forkNum == forkNum)
return buffer;
UnpinBuffer(bufHdr, true);
@ -2000,8 +2000,8 @@ BufferSync(int flags)
item = &CkptBufferIds[num_to_scan++];
item->buf_id = buf_id;
item->tsId = bufHdr->tag.rnode.spcNode;
item->relNode = bufHdr->tag.rnode.relNode;
item->tsId = bufHdr->tag.rlocator.spcOid;
item->relNumber = bufHdr->tag.rlocator.relNumber;
item->forkNum = bufHdr->tag.forkNum;
item->blockNum = bufHdr->tag.blockNum;
}
@ -2708,7 +2708,7 @@ PrintBufferLeakWarning(Buffer buffer)
}
/* theoretically we should lock the bufhdr here */
path = relpathbackend(buf->tag.rnode, backend, buf->tag.forkNum);
path = relpathbackend(buf->tag.rlocator, backend, buf->tag.forkNum);
buf_state = pg_atomic_read_u32(&buf->state);
elog(WARNING,
"buffer refcount leak: [%03d] "
@ -2769,11 +2769,11 @@ BufferGetBlockNumber(Buffer buffer)
/*
* BufferGetTag
* Returns the relfilenode, fork number and block number associated with
* Returns the relfilelocator, fork number and block number associated with
* a buffer.
*/
void
BufferGetTag(Buffer buffer, RelFileNode *rnode, ForkNumber *forknum,
BufferGetTag(Buffer buffer, RelFileLocator *rlocator, ForkNumber *forknum,
BlockNumber *blknum)
{
BufferDesc *bufHdr;
@ -2787,7 +2787,7 @@ BufferGetTag(Buffer buffer, RelFileNode *rnode, ForkNumber *forknum,
bufHdr = GetBufferDescriptor(buffer - 1);
/* pinned, so OK to read tag without spinlock */
*rnode = bufHdr->tag.rnode;
*rlocator = bufHdr->tag.rlocator;
*forknum = bufHdr->tag.forkNum;
*blknum = bufHdr->tag.blockNum;
}
@ -2838,13 +2838,13 @@ FlushBuffer(BufferDesc *buf, SMgrRelation reln)
/* Find smgr relation for buffer */
if (reln == NULL)
reln = smgropen(buf->tag.rnode, InvalidBackendId);
reln = smgropen(buf->tag.rlocator, InvalidBackendId);
TRACE_POSTGRESQL_BUFFER_FLUSH_START(buf->tag.forkNum,
buf->tag.blockNum,
reln->smgr_rnode.node.spcNode,
reln->smgr_rnode.node.dbNode,
reln->smgr_rnode.node.relNode);
reln->smgr_rlocator.locator.spcOid,
reln->smgr_rlocator.locator.dbOid,
reln->smgr_rlocator.locator.relNumber);
buf_state = LockBufHdr(buf);
@ -2922,9 +2922,9 @@ FlushBuffer(BufferDesc *buf, SMgrRelation reln)
TRACE_POSTGRESQL_BUFFER_FLUSH_DONE(buf->tag.forkNum,
buf->tag.blockNum,
reln->smgr_rnode.node.spcNode,
reln->smgr_rnode.node.dbNode,
reln->smgr_rnode.node.relNode);
reln->smgr_rlocator.locator.spcOid,
reln->smgr_rlocator.locator.dbOid,
reln->smgr_rlocator.locator.relNumber);
/* Pop the error context stack */
error_context_stack = errcallback.previous;
@ -3026,7 +3026,7 @@ BufferGetLSNAtomic(Buffer buffer)
}
/* ---------------------------------------------------------------------
* DropRelFileNodeBuffers
* DropRelFileLocatorBuffers
*
* This function removes from the buffer pool all the pages of the
* specified relation forks that have block numbers >= firstDelBlock.
@ -3047,25 +3047,25 @@ BufferGetLSNAtomic(Buffer buffer)
* --------------------------------------------------------------------
*/
void
DropRelFileNodeBuffers(SMgrRelation smgr_reln, ForkNumber *forkNum,
int nforks, BlockNumber *firstDelBlock)
DropRelFileLocatorBuffers(SMgrRelation smgr_reln, ForkNumber *forkNum,
int nforks, BlockNumber *firstDelBlock)
{
int i;
int j;
RelFileNodeBackend rnode;
RelFileLocatorBackend rlocator;
BlockNumber nForkBlock[MAX_FORKNUM];
uint64 nBlocksToInvalidate = 0;
rnode = smgr_reln->smgr_rnode;
rlocator = smgr_reln->smgr_rlocator;
/* If it's a local relation, it's localbuf.c's problem. */
if (RelFileNodeBackendIsTemp(rnode))
if (RelFileLocatorBackendIsTemp(rlocator))
{
if (rnode.backend == MyBackendId)
if (rlocator.backend == MyBackendId)
{
for (j = 0; j < nforks; j++)
DropRelFileNodeLocalBuffers(rnode.node, forkNum[j],
firstDelBlock[j]);
DropRelFileLocatorLocalBuffers(rlocator.locator, forkNum[j],
firstDelBlock[j]);
}
return;
}
@ -3115,8 +3115,8 @@ DropRelFileNodeBuffers(SMgrRelation smgr_reln, ForkNumber *forkNum,
nBlocksToInvalidate < BUF_DROP_FULL_SCAN_THRESHOLD)
{
for (j = 0; j < nforks; j++)
FindAndDropRelFileNodeBuffers(rnode.node, forkNum[j],
nForkBlock[j], firstDelBlock[j]);
FindAndDropRelFileLocatorBuffers(rlocator.locator, forkNum[j],
nForkBlock[j], firstDelBlock[j]);
return;
}
@ -3138,17 +3138,17 @@ DropRelFileNodeBuffers(SMgrRelation smgr_reln, ForkNumber *forkNum,
* false positives are safe because we'll recheck after getting the
* buffer lock.
*
* We could check forkNum and blockNum as well as the rnode, but the
* incremental win from doing so seems small.
* We could check forkNum and blockNum as well as the rlocator, but
* the incremental win from doing so seems small.
*/
if (!RelFileNodeEquals(bufHdr->tag.rnode, rnode.node))
if (!RelFileLocatorEquals(bufHdr->tag.rlocator, rlocator.locator))
continue;
buf_state = LockBufHdr(bufHdr);
for (j = 0; j < nforks; j++)
{
if (RelFileNodeEquals(bufHdr->tag.rnode, rnode.node) &&
if (RelFileLocatorEquals(bufHdr->tag.rlocator, rlocator.locator) &&
bufHdr->tag.forkNum == forkNum[j] &&
bufHdr->tag.blockNum >= firstDelBlock[j])
{
@ -3162,16 +3162,16 @@ DropRelFileNodeBuffers(SMgrRelation smgr_reln, ForkNumber *forkNum,
}
/* ---------------------------------------------------------------------
* DropRelFileNodesAllBuffers
* DropRelFileLocatorsAllBuffers
*
* This function removes from the buffer pool all the pages of all
* forks of the specified relations. It's equivalent to calling
* DropRelFileNodeBuffers once per fork per relation with
* DropRelFileLocatorBuffers once per fork per relation with
* firstDelBlock = 0.
* --------------------------------------------------------------------
*/
void
DropRelFileNodesAllBuffers(SMgrRelation *smgr_reln, int nnodes)
DropRelFileLocatorsAllBuffers(SMgrRelation *smgr_reln, int nlocators)
{
int i;
int j;
@ -3179,22 +3179,22 @@ DropRelFileNodesAllBuffers(SMgrRelation *smgr_reln, int nnodes)
SMgrRelation *rels;
BlockNumber (*block)[MAX_FORKNUM + 1];
uint64 nBlocksToInvalidate = 0;
RelFileNode *nodes;
RelFileLocator *locators;
bool cached = true;
bool use_bsearch;
if (nnodes == 0)
if (nlocators == 0)
return;
rels = palloc(sizeof(SMgrRelation) * nnodes); /* non-local relations */
rels = palloc(sizeof(SMgrRelation) * nlocators); /* non-local relations */
/* If it's a local relation, it's localbuf.c's problem. */
for (i = 0; i < nnodes; i++)
for (i = 0; i < nlocators; i++)
{
if (RelFileNodeBackendIsTemp(smgr_reln[i]->smgr_rnode))
if (RelFileLocatorBackendIsTemp(smgr_reln[i]->smgr_rlocator))
{
if (smgr_reln[i]->smgr_rnode.backend == MyBackendId)
DropRelFileNodeAllLocalBuffers(smgr_reln[i]->smgr_rnode.node);
if (smgr_reln[i]->smgr_rlocator.backend == MyBackendId)
DropRelFileLocatorAllLocalBuffers(smgr_reln[i]->smgr_rlocator.locator);
}
else
rels[n++] = smgr_reln[i];
@ -3219,7 +3219,7 @@ DropRelFileNodesAllBuffers(SMgrRelation *smgr_reln, int nnodes)
/*
* We can avoid scanning the entire buffer pool if we know the exact size
* of each of the given relation forks. See DropRelFileNodeBuffers.
* of each of the given relation forks. See DropRelFileLocatorBuffers.
*/
for (i = 0; i < n && cached; i++)
{
@ -3257,8 +3257,8 @@ DropRelFileNodesAllBuffers(SMgrRelation *smgr_reln, int nnodes)
continue;
/* drop all the buffers for a particular relation fork */
FindAndDropRelFileNodeBuffers(rels[i]->smgr_rnode.node,
j, block[i][j], 0);
FindAndDropRelFileLocatorBuffers(rels[i]->smgr_rlocator.locator,
j, block[i][j], 0);
}
}
@ -3268,9 +3268,9 @@ DropRelFileNodesAllBuffers(SMgrRelation *smgr_reln, int nnodes)
}
pfree(block);
nodes = palloc(sizeof(RelFileNode) * n); /* non-local relations */
locators = palloc(sizeof(RelFileLocator) * n); /* non-local relations */
for (i = 0; i < n; i++)
nodes[i] = rels[i]->smgr_rnode.node;
locators[i] = rels[i]->smgr_rlocator.locator;
/*
* For low number of relations to drop just use a simple walk through, to
@ -3280,19 +3280,19 @@ DropRelFileNodesAllBuffers(SMgrRelation *smgr_reln, int nnodes)
*/
use_bsearch = n > RELS_BSEARCH_THRESHOLD;
/* sort the list of rnodes if necessary */
/* sort the list of rlocators if necessary */
if (use_bsearch)
pg_qsort(nodes, n, sizeof(RelFileNode), rnode_comparator);
pg_qsort(locators, n, sizeof(RelFileLocator), rlocator_comparator);
for (i = 0; i < NBuffers; i++)
{
RelFileNode *rnode = NULL;
RelFileLocator *rlocator = NULL;
BufferDesc *bufHdr = GetBufferDescriptor(i);
uint32 buf_state;
/*
* As in DropRelFileNodeBuffers, an unlocked precheck should be safe
* and saves some cycles.
* As in DropRelFileLocatorBuffers, an unlocked precheck should be
* safe and saves some cycles.
*/
if (!use_bsearch)
@ -3301,37 +3301,37 @@ DropRelFileNodesAllBuffers(SMgrRelation *smgr_reln, int nnodes)
for (j = 0; j < n; j++)
{
if (RelFileNodeEquals(bufHdr->tag.rnode, nodes[j]))
if (RelFileLocatorEquals(bufHdr->tag.rlocator, locators[j]))
{
rnode = &nodes[j];
rlocator = &locators[j];
break;
}
}
}
else
{
rnode = bsearch((const void *) &(bufHdr->tag.rnode),
nodes, n, sizeof(RelFileNode),
rnode_comparator);
rlocator = bsearch((const void *) &(bufHdr->tag.rlocator),
locators, n, sizeof(RelFileLocator),
rlocator_comparator);
}
/* buffer doesn't belong to any of the given relfilenodes; skip it */
if (rnode == NULL)
/* buffer doesn't belong to any of the given relfilelocators; skip it */
if (rlocator == NULL)
continue;
buf_state = LockBufHdr(bufHdr);
if (RelFileNodeEquals(bufHdr->tag.rnode, (*rnode)))
if (RelFileLocatorEquals(bufHdr->tag.rlocator, (*rlocator)))
InvalidateBuffer(bufHdr); /* releases spinlock */
else
UnlockBufHdr(bufHdr, buf_state);
}
pfree(nodes);
pfree(locators);
pfree(rels);
}
/* ---------------------------------------------------------------------
* FindAndDropRelFileNodeBuffers
* FindAndDropRelFileLocatorBuffers
*
* This function performs look up in BufMapping table and removes from the
* buffer pool all the pages of the specified relation fork that has block
@ -3340,9 +3340,9 @@ DropRelFileNodesAllBuffers(SMgrRelation *smgr_reln, int nnodes)
* --------------------------------------------------------------------
*/
static void
FindAndDropRelFileNodeBuffers(RelFileNode rnode, ForkNumber forkNum,
BlockNumber nForkBlock,
BlockNumber firstDelBlock)
FindAndDropRelFileLocatorBuffers(RelFileLocator rlocator, ForkNumber forkNum,
BlockNumber nForkBlock,
BlockNumber firstDelBlock)
{
BlockNumber curBlock;
@ -3356,7 +3356,7 @@ FindAndDropRelFileNodeBuffers(RelFileNode rnode, ForkNumber forkNum,
uint32 buf_state;
/* create a tag so we can lookup the buffer */
INIT_BUFFERTAG(bufTag, rnode, forkNum, curBlock);
INIT_BUFFERTAG(bufTag, rlocator, forkNum, curBlock);
/* determine its hash code and partition lock ID */
bufHash = BufTableHashCode(&bufTag);
@ -3380,7 +3380,7 @@ FindAndDropRelFileNodeBuffers(RelFileNode rnode, ForkNumber forkNum,
*/
buf_state = LockBufHdr(bufHdr);
if (RelFileNodeEquals(bufHdr->tag.rnode, rnode) &&
if (RelFileLocatorEquals(bufHdr->tag.rlocator, rlocator) &&
bufHdr->tag.forkNum == forkNum &&
bufHdr->tag.blockNum >= firstDelBlock)
InvalidateBuffer(bufHdr); /* releases spinlock */
@ -3397,7 +3397,7 @@ FindAndDropRelFileNodeBuffers(RelFileNode rnode, ForkNumber forkNum,
* bothering to write them out first. This is used when we destroy a
* database, to avoid trying to flush data to disk when the directory
* tree no longer exists. Implementation is pretty similar to
* DropRelFileNodeBuffers() which is for destroying just one relation.
* DropRelFileLocatorBuffers() which is for destroying just one relation.
* --------------------------------------------------------------------
*/
void
@ -3416,14 +3416,14 @@ DropDatabaseBuffers(Oid dbid)
uint32 buf_state;
/*
* As in DropRelFileNodeBuffers, an unlocked precheck should be safe
* and saves some cycles.
* As in DropRelFileLocatorBuffers, an unlocked precheck should be
* safe and saves some cycles.
*/
if (bufHdr->tag.rnode.dbNode != dbid)
if (bufHdr->tag.rlocator.dbOid != dbid)
continue;
buf_state = LockBufHdr(bufHdr);
if (bufHdr->tag.rnode.dbNode == dbid)
if (bufHdr->tag.rlocator.dbOid == dbid)
InvalidateBuffer(bufHdr); /* releases spinlock */
else
UnlockBufHdr(bufHdr, buf_state);
@ -3453,7 +3453,7 @@ PrintBufferDescs(void)
"[%02d] (freeNext=%d, rel=%s, "
"blockNum=%u, flags=0x%x, refcount=%u %d)",
i, buf->freeNext,
relpathbackend(buf->tag.rnode, InvalidBackendId, buf->tag.forkNum),
relpathbackend(buf->tag.rlocator, InvalidBackendId, buf->tag.forkNum),
buf->tag.blockNum, buf->flags,
buf->refcount, GetPrivateRefCount(b));
}
@ -3478,7 +3478,7 @@ PrintPinnedBufs(void)
"[%02d] (freeNext=%d, rel=%s, "
"blockNum=%u, flags=0x%x, refcount=%u %d)",
i, buf->freeNext,
relpathperm(buf->tag.rnode, buf->tag.forkNum),
relpathperm(buf->tag.rlocator, buf->tag.forkNum),
buf->tag.blockNum, buf->flags,
buf->refcount, GetPrivateRefCount(b));
}
@ -3517,7 +3517,7 @@ FlushRelationBuffers(Relation rel)
uint32 buf_state;
bufHdr = GetLocalBufferDescriptor(i);
if (RelFileNodeEquals(bufHdr->tag.rnode, rel->rd_node) &&
if (RelFileLocatorEquals(bufHdr->tag.rlocator, rel->rd_locator) &&
((buf_state = pg_atomic_read_u32(&bufHdr->state)) &
(BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
{
@ -3561,16 +3561,16 @@ FlushRelationBuffers(Relation rel)
bufHdr = GetBufferDescriptor(i);
/*
* As in DropRelFileNodeBuffers, an unlocked precheck should be safe
* and saves some cycles.
* As in DropRelFileLocatorBuffers, an unlocked precheck should be
* safe and saves some cycles.
*/
if (!RelFileNodeEquals(bufHdr->tag.rnode, rel->rd_node))
if (!RelFileLocatorEquals(bufHdr->tag.rlocator, rel->rd_locator))
continue;
ReservePrivateRefCountEntry();
buf_state = LockBufHdr(bufHdr);
if (RelFileNodeEquals(bufHdr->tag.rnode, rel->rd_node) &&
if (RelFileLocatorEquals(bufHdr->tag.rlocator, rel->rd_locator) &&
(buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
{
PinBuffer_Locked(bufHdr);
@ -3608,21 +3608,21 @@ FlushRelationsAllBuffers(SMgrRelation *smgrs, int nrels)
for (i = 0; i < nrels; i++)
{
Assert(!RelFileNodeBackendIsTemp(smgrs[i]->smgr_rnode));
Assert(!RelFileLocatorBackendIsTemp(smgrs[i]->smgr_rlocator));
srels[i].rnode = smgrs[i]->smgr_rnode.node;
srels[i].rlocator = smgrs[i]->smgr_rlocator.locator;
srels[i].srel = smgrs[i];
}
/*
* Save the bsearch overhead for low number of relations to sync. See
* DropRelFileNodesAllBuffers for details.
* DropRelFileLocatorsAllBuffers for details.
*/
use_bsearch = nrels > RELS_BSEARCH_THRESHOLD;
/* sort the list of SMgrRelations if necessary */
if (use_bsearch)
pg_qsort(srels, nrels, sizeof(SMgrSortArray), rnode_comparator);
pg_qsort(srels, nrels, sizeof(SMgrSortArray), rlocator_comparator);
/* Make sure we can handle the pin inside the loop */
ResourceOwnerEnlargeBuffers(CurrentResourceOwner);
@ -3634,8 +3634,8 @@ FlushRelationsAllBuffers(SMgrRelation *smgrs, int nrels)
uint32 buf_state;
/*
* As in DropRelFileNodeBuffers, an unlocked precheck should be safe
* and saves some cycles.
* As in DropRelFileLocatorBuffers, an unlocked precheck should be
* safe and saves some cycles.
*/
if (!use_bsearch)
@ -3644,7 +3644,7 @@ FlushRelationsAllBuffers(SMgrRelation *smgrs, int nrels)
for (j = 0; j < nrels; j++)
{
if (RelFileNodeEquals(bufHdr->tag.rnode, srels[j].rnode))
if (RelFileLocatorEquals(bufHdr->tag.rlocator, srels[j].rlocator))
{
srelent = &srels[j];
break;
@ -3653,19 +3653,19 @@ FlushRelationsAllBuffers(SMgrRelation *smgrs, int nrels)
}
else
{
srelent = bsearch((const void *) &(bufHdr->tag.rnode),
srelent = bsearch((const void *) &(bufHdr->tag.rlocator),
srels, nrels, sizeof(SMgrSortArray),
rnode_comparator);
rlocator_comparator);
}
/* buffer doesn't belong to any of the given relfilenodes; skip it */
/* buffer doesn't belong to any of the given relfilelocators; skip it */
if (srelent == NULL)
continue;
ReservePrivateRefCountEntry();
buf_state = LockBufHdr(bufHdr);
if (RelFileNodeEquals(bufHdr->tag.rnode, srelent->rnode) &&
if (RelFileLocatorEquals(bufHdr->tag.rlocator, srelent->rlocator) &&
(buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
{
PinBuffer_Locked(bufHdr);
@ -3729,7 +3729,7 @@ RelationCopyStorageUsingBuffer(Relation src, Relation dst, ForkNumber forkNum,
CHECK_FOR_INTERRUPTS();
/* Read block from source relation. */
srcBuf = ReadBufferWithoutRelcache(src->rd_node, forkNum, blkno,
srcBuf = ReadBufferWithoutRelcache(src->rd_locator, forkNum, blkno,
RBM_NORMAL, bstrategy_src,
permanent);
srcPage = BufferGetPage(srcBuf);
@ -3740,7 +3740,7 @@ RelationCopyStorageUsingBuffer(Relation src, Relation dst, ForkNumber forkNum,
}
/* Use P_NEW to extend the destination relation. */
dstBuf = ReadBufferWithoutRelcache(dst->rd_node, forkNum, P_NEW,
dstBuf = ReadBufferWithoutRelcache(dst->rd_locator, forkNum, P_NEW,
RBM_NORMAL, bstrategy_dst,
permanent);
LockBuffer(dstBuf, BUFFER_LOCK_EXCLUSIVE);
@ -3775,8 +3775,8 @@ RelationCopyStorageUsingBuffer(Relation src, Relation dst, ForkNumber forkNum,
* --------------------------------------------------------------------
*/
void
CreateAndCopyRelationData(RelFileNode src_rnode, RelFileNode dst_rnode,
bool permanent)
CreateAndCopyRelationData(RelFileLocator src_rlocator,
RelFileLocator dst_rlocator, bool permanent)
{
Relation src_rel;
Relation dst_rel;
@ -3793,8 +3793,8 @@ CreateAndCopyRelationData(RelFileNode src_rnode, RelFileNode dst_rnode,
* used the smgr layer directly, we would have to worry about
* invalidations.
*/
src_rel = CreateFakeRelcacheEntry(src_rnode);
dst_rel = CreateFakeRelcacheEntry(dst_rnode);
src_rel = CreateFakeRelcacheEntry(src_rlocator);
dst_rel = CreateFakeRelcacheEntry(dst_rlocator);
/*
* Create and copy all forks of the relation. During create database we
@ -3802,7 +3802,7 @@ CreateAndCopyRelationData(RelFileNode src_rnode, RelFileNode dst_rnode,
* directory. Therefore, each individual relation doesn't need to be
* registered for cleanup.
*/
RelationCreateStorage(dst_rnode, relpersistence, false);
RelationCreateStorage(dst_rlocator, relpersistence, false);
/* copy main fork. */
RelationCopyStorageUsingBuffer(src_rel, dst_rel, MAIN_FORKNUM, permanent);
@ -3820,7 +3820,7 @@ CreateAndCopyRelationData(RelFileNode src_rnode, RelFileNode dst_rnode,
* init fork of an unlogged relation.
*/
if (permanent || forkNum == INIT_FORKNUM)
log_smgrcreate(&dst_rnode, forkNum);
log_smgrcreate(&dst_rlocator, forkNum);
/* Copy a fork's data, block by block. */
RelationCopyStorageUsingBuffer(src_rel, dst_rel, forkNum,
@ -3864,16 +3864,16 @@ FlushDatabaseBuffers(Oid dbid)
bufHdr = GetBufferDescriptor(i);
/*
* As in DropRelFileNodeBuffers, an unlocked precheck should be safe
* and saves some cycles.
* As in DropRelFileLocatorBuffers, an unlocked precheck should be
* safe and saves some cycles.
*/
if (bufHdr->tag.rnode.dbNode != dbid)
if (bufHdr->tag.rlocator.dbOid != dbid)
continue;
ReservePrivateRefCountEntry();
buf_state = LockBufHdr(bufHdr);
if (bufHdr->tag.rnode.dbNode == dbid &&
if (bufHdr->tag.rlocator.dbOid == dbid &&
(buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
{
PinBuffer_Locked(bufHdr);
@ -4034,7 +4034,7 @@ MarkBufferDirtyHint(Buffer buffer, bool buffer_std)
(pg_atomic_read_u32(&bufHdr->state) & BM_PERMANENT))
{
/*
* If we must not write WAL, due to a relfilenode-specific
* If we must not write WAL, due to a relfilelocator-specific
* condition or being in recovery, don't dirty the page. We can
* set the hint, just not dirty the page as a result so the hint
* is lost when we evict the page or shutdown.
@ -4042,7 +4042,7 @@ MarkBufferDirtyHint(Buffer buffer, bool buffer_std)
* See src/backend/storage/page/README for longer discussion.
*/
if (RecoveryInProgress() ||
RelFileNodeSkippingWAL(bufHdr->tag.rnode))
RelFileLocatorSkippingWAL(bufHdr->tag.rlocator))
return;
/*
@ -4651,7 +4651,7 @@ AbortBufferIO(void)
/* Buffer is pinned, so we can read tag without spinlock */
char *path;
path = relpathperm(buf->tag.rnode, buf->tag.forkNum);
path = relpathperm(buf->tag.rlocator, buf->tag.forkNum);
ereport(WARNING,
(errcode(ERRCODE_IO_ERROR),
errmsg("could not write block %u of %s",
@ -4675,7 +4675,7 @@ shared_buffer_write_error_callback(void *arg)
/* Buffer is pinned, so we can read the tag without locking the spinlock */
if (bufHdr != NULL)
{
char *path = relpathperm(bufHdr->tag.rnode, bufHdr->tag.forkNum);
char *path = relpathperm(bufHdr->tag.rlocator, bufHdr->tag.forkNum);
errcontext("writing block %u of relation %s",
bufHdr->tag.blockNum, path);
@ -4693,7 +4693,7 @@ local_buffer_write_error_callback(void *arg)
if (bufHdr != NULL)
{
char *path = relpathbackend(bufHdr->tag.rnode, MyBackendId,
char *path = relpathbackend(bufHdr->tag.rlocator, MyBackendId,
bufHdr->tag.forkNum);
errcontext("writing block %u of relation %s",
@ -4703,27 +4703,27 @@ local_buffer_write_error_callback(void *arg)
}
/*
* RelFileNode qsort/bsearch comparator; see RelFileNodeEquals.
* RelFileLocator qsort/bsearch comparator; see RelFileLocatorEquals.
*/
static int
rnode_comparator(const void *p1, const void *p2)
rlocator_comparator(const void *p1, const void *p2)
{
RelFileNode n1 = *(const RelFileNode *) p1;
RelFileNode n2 = *(const RelFileNode *) p2;
RelFileLocator n1 = *(const RelFileLocator *) p1;
RelFileLocator n2 = *(const RelFileLocator *) p2;
if (n1.relNode < n2.relNode)
if (n1.relNumber < n2.relNumber)
return -1;
else if (n1.relNode > n2.relNode)
else if (n1.relNumber > n2.relNumber)
return 1;
if (n1.dbNode < n2.dbNode)
if (n1.dbOid < n2.dbOid)
return -1;
else if (n1.dbNode > n2.dbNode)
else if (n1.dbOid > n2.dbOid)
return 1;
if (n1.spcNode < n2.spcNode)
if (n1.spcOid < n2.spcOid)
return -1;
else if (n1.spcNode > n2.spcNode)
else if (n1.spcOid > n2.spcOid)
return 1;
else
return 0;
@ -4789,7 +4789,7 @@ buffertag_comparator(const BufferTag *ba, const BufferTag *bb)
{
int ret;
ret = rnode_comparator(&ba->rnode, &bb->rnode);
ret = rlocator_comparator(&ba->rlocator, &bb->rlocator);
if (ret != 0)
return ret;
@ -4822,9 +4822,9 @@ ckpt_buforder_comparator(const CkptSortItem *a, const CkptSortItem *b)
else if (a->tsId > b->tsId)
return 1;
/* compare relation */
if (a->relNode < b->relNode)
if (a->relNumber < b->relNumber)
return -1;
else if (a->relNode > b->relNode)
else if (a->relNumber > b->relNumber)
return 1;
/* compare fork */
else if (a->forkNum < b->forkNum)
@ -4960,7 +4960,7 @@ IssuePendingWritebacks(WritebackContext *context)
next = &context->pending_writebacks[i + ahead + 1];
/* different file, stop */
if (!RelFileNodeEquals(cur->tag.rnode, next->tag.rnode) ||
if (!RelFileLocatorEquals(cur->tag.rlocator, next->tag.rlocator) ||
cur->tag.forkNum != next->tag.forkNum)
break;
@ -4979,7 +4979,7 @@ IssuePendingWritebacks(WritebackContext *context)
i += ahead;
/* and finally tell the kernel to write the data to storage */
reln = smgropen(tag.rnode, InvalidBackendId);
reln = smgropen(tag.rlocator, InvalidBackendId);
smgrwriteback(reln, tag.forkNum, tag.blockNum, nblocks);
}

View File

@ -68,7 +68,7 @@ PrefetchLocalBuffer(SMgrRelation smgr, ForkNumber forkNum,
BufferTag newTag; /* identity of requested block */
LocalBufferLookupEnt *hresult;
INIT_BUFFERTAG(newTag, smgr->smgr_rnode.node, forkNum, blockNum);
INIT_BUFFERTAG(newTag, smgr->smgr_rlocator.locator, forkNum, blockNum);
/* Initialize local buffers if first request in this session */
if (LocalBufHash == NULL)
@ -117,7 +117,7 @@ LocalBufferAlloc(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum,
bool found;
uint32 buf_state;
INIT_BUFFERTAG(newTag, smgr->smgr_rnode.node, forkNum, blockNum);
INIT_BUFFERTAG(newTag, smgr->smgr_rlocator.locator, forkNum, blockNum);
/* Initialize local buffers if first request in this session */
if (LocalBufHash == NULL)
@ -134,7 +134,7 @@ LocalBufferAlloc(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum,
Assert(BUFFERTAGS_EQUAL(bufHdr->tag, newTag));
#ifdef LBDEBUG
fprintf(stderr, "LB ALLOC (%u,%d,%d) %d\n",
smgr->smgr_rnode.node.relNode, forkNum, blockNum, -b - 1);
smgr->smgr_rlocator.locator.relNumber, forkNum, blockNum, -b - 1);
#endif
buf_state = pg_atomic_read_u32(&bufHdr->state);
@ -162,7 +162,7 @@ LocalBufferAlloc(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum,
#ifdef LBDEBUG
fprintf(stderr, "LB ALLOC (%u,%d,%d) %d\n",
smgr->smgr_rnode.node.relNode, forkNum, blockNum,
smgr->smgr_rlocator.locator.relNumber, forkNum, blockNum,
-nextFreeLocalBuf - 1);
#endif
@ -215,7 +215,7 @@ LocalBufferAlloc(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum,
Page localpage = (char *) LocalBufHdrGetBlock(bufHdr);
/* Find smgr relation for buffer */
oreln = smgropen(bufHdr->tag.rnode, MyBackendId);
oreln = smgropen(bufHdr->tag.rlocator, MyBackendId);
PageSetChecksumInplace(localpage, bufHdr->tag.blockNum);
@ -312,7 +312,7 @@ MarkLocalBufferDirty(Buffer buffer)
}
/*
* DropRelFileNodeLocalBuffers
* DropRelFileLocatorLocalBuffers
* This function removes from the buffer pool all the pages of the
* specified relation that have block numbers >= firstDelBlock.
* (In particular, with firstDelBlock = 0, all pages are removed.)
@ -320,11 +320,11 @@ MarkLocalBufferDirty(Buffer buffer)
* out first. Therefore, this is NOT rollback-able, and so should be
* used only with extreme caution!
*
* See DropRelFileNodeBuffers in bufmgr.c for more notes.
* See DropRelFileLocatorBuffers in bufmgr.c for more notes.
*/
void
DropRelFileNodeLocalBuffers(RelFileNode rnode, ForkNumber forkNum,
BlockNumber firstDelBlock)
DropRelFileLocatorLocalBuffers(RelFileLocator rlocator, ForkNumber forkNum,
BlockNumber firstDelBlock)
{
int i;
@ -337,14 +337,14 @@ DropRelFileNodeLocalBuffers(RelFileNode rnode, ForkNumber forkNum,
buf_state = pg_atomic_read_u32(&bufHdr->state);
if ((buf_state & BM_TAG_VALID) &&
RelFileNodeEquals(bufHdr->tag.rnode, rnode) &&
RelFileLocatorEquals(bufHdr->tag.rlocator, rlocator) &&
bufHdr->tag.forkNum == forkNum &&
bufHdr->tag.blockNum >= firstDelBlock)
{
if (LocalRefCount[i] != 0)
elog(ERROR, "block %u of %s is still referenced (local %u)",
bufHdr->tag.blockNum,
relpathbackend(bufHdr->tag.rnode, MyBackendId,
relpathbackend(bufHdr->tag.rlocator, MyBackendId,
bufHdr->tag.forkNum),
LocalRefCount[i]);
/* Remove entry from hashtable */
@ -363,14 +363,14 @@ DropRelFileNodeLocalBuffers(RelFileNode rnode, ForkNumber forkNum,
}
/*
* DropRelFileNodeAllLocalBuffers
* DropRelFileLocatorAllLocalBuffers
* This function removes from the buffer pool all pages of all forks
* of the specified relation.
*
* See DropRelFileNodesAllBuffers in bufmgr.c for more notes.
* See DropRelFileLocatorsAllBuffers in bufmgr.c for more notes.
*/
void
DropRelFileNodeAllLocalBuffers(RelFileNode rnode)
DropRelFileLocatorAllLocalBuffers(RelFileLocator rlocator)
{
int i;
@ -383,12 +383,12 @@ DropRelFileNodeAllLocalBuffers(RelFileNode rnode)
buf_state = pg_atomic_read_u32(&bufHdr->state);
if ((buf_state & BM_TAG_VALID) &&
RelFileNodeEquals(bufHdr->tag.rnode, rnode))
RelFileLocatorEquals(bufHdr->tag.rlocator, rlocator))
{
if (LocalRefCount[i] != 0)
elog(ERROR, "block %u of %s is still referenced (local %u)",
bufHdr->tag.blockNum,
relpathbackend(bufHdr->tag.rnode, MyBackendId,
relpathbackend(bufHdr->tag.rlocator, MyBackendId,
bufHdr->tag.forkNum),
LocalRefCount[i]);
/* Remove entry from hashtable */
@ -589,8 +589,8 @@ AtProcExit_LocalBuffers(void)
{
/*
* We shouldn't be holding any remaining pins; if we are, and assertions
* aren't enabled, we'll fail later in DropRelFileNodeBuffers while trying
* to drop the temp rels.
* aren't enabled, we'll fail later in DropRelFileLocatorBuffers while
* trying to drop the temp rels.
*/
CheckForLocalBufferLeaks();
}

View File

@ -196,7 +196,7 @@ RecordPageWithFreeSpace(Relation rel, BlockNumber heapBlk, Size spaceAvail)
* WAL replay
*/
void
XLogRecordPageWithFreeSpace(RelFileNode rnode, BlockNumber heapBlk,
XLogRecordPageWithFreeSpace(RelFileLocator rlocator, BlockNumber heapBlk,
Size spaceAvail)
{
int new_cat = fsm_space_avail_to_cat(spaceAvail);
@ -211,8 +211,8 @@ XLogRecordPageWithFreeSpace(RelFileNode rnode, BlockNumber heapBlk,
blkno = fsm_logical_to_physical(addr);
/* If the page doesn't exist already, extend */
buf = XLogReadBufferExtended(rnode, FSM_FORKNUM, blkno, RBM_ZERO_ON_ERROR,
InvalidBuffer);
buf = XLogReadBufferExtended(rlocator, FSM_FORKNUM, blkno,
RBM_ZERO_ON_ERROR, InvalidBuffer);
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
page = BufferGetPage(buf);

View File

@ -268,13 +268,13 @@ restart:
*
* Fix the corruption and restart.
*/
RelFileNode rnode;
RelFileLocator rlocator;
ForkNumber forknum;
BlockNumber blknum;
BufferGetTag(buf, &rnode, &forknum, &blknum);
BufferGetTag(buf, &rlocator, &forknum, &blknum);
elog(DEBUG1, "fixing corrupt FSM block %u, relation %u/%u/%u",
blknum, rnode.spcNode, rnode.dbNode, rnode.relNode);
blknum, rlocator.spcOid, rlocator.dbOid, rlocator.relNumber);
/* make sure we hold an exclusive lock */
if (!exclusive_lock_held)

View File

@ -442,7 +442,7 @@ ResolveRecoveryConflictWithVirtualXIDs(VirtualTransactionId *waitlist,
}
void
ResolveRecoveryConflictWithSnapshot(TransactionId latestRemovedXid, RelFileNode node)
ResolveRecoveryConflictWithSnapshot(TransactionId latestRemovedXid, RelFileLocator locator)
{
VirtualTransactionId *backends;
@ -461,7 +461,7 @@ ResolveRecoveryConflictWithSnapshot(TransactionId latestRemovedXid, RelFileNode
return;
backends = GetConflictingVirtualXIDs(latestRemovedXid,
node.dbNode);
locator.dbOid);
ResolveRecoveryConflictWithVirtualXIDs(backends,
PROCSIG_RECOVERY_CONFLICT_SNAPSHOT,
@ -475,7 +475,7 @@ ResolveRecoveryConflictWithSnapshot(TransactionId latestRemovedXid, RelFileNode
*/
void
ResolveRecoveryConflictWithSnapshotFullXid(FullTransactionId latestRemovedFullXid,
RelFileNode node)
RelFileLocator locator)
{
/*
* ResolveRecoveryConflictWithSnapshot operates on 32-bit TransactionIds,
@ -493,7 +493,7 @@ ResolveRecoveryConflictWithSnapshotFullXid(FullTransactionId latestRemovedFullXi
TransactionId latestRemovedXid;
latestRemovedXid = XidFromFullTransactionId(latestRemovedFullXid);
ResolveRecoveryConflictWithSnapshot(latestRemovedXid, node);
ResolveRecoveryConflictWithSnapshot(latestRemovedXid, locator);
}
}

View File

@ -1997,7 +1997,7 @@ PageIsPredicateLocked(Relation relation, BlockNumber blkno)
PREDICATELOCKTARGET *target;
SET_PREDICATELOCKTARGETTAG_PAGE(targettag,
relation->rd_node.dbNode,
relation->rd_locator.dbOid,
relation->rd_id,
blkno);
@ -2576,7 +2576,7 @@ PredicateLockRelation(Relation relation, Snapshot snapshot)
return;
SET_PREDICATELOCKTARGETTAG_RELATION(tag,
relation->rd_node.dbNode,
relation->rd_locator.dbOid,
relation->rd_id);
PredicateLockAcquire(&tag);
}
@ -2599,7 +2599,7 @@ PredicateLockPage(Relation relation, BlockNumber blkno, Snapshot snapshot)
return;
SET_PREDICATELOCKTARGETTAG_PAGE(tag,
relation->rd_node.dbNode,
relation->rd_locator.dbOid,
relation->rd_id,
blkno);
PredicateLockAcquire(&tag);
@ -2638,13 +2638,13 @@ PredicateLockTID(Relation relation, ItemPointer tid, Snapshot snapshot,
* level lock.
*/
SET_PREDICATELOCKTARGETTAG_RELATION(tag,
relation->rd_node.dbNode,
relation->rd_locator.dbOid,
relation->rd_id);
if (PredicateLockExists(&tag))
return;
SET_PREDICATELOCKTARGETTAG_TUPLE(tag,
relation->rd_node.dbNode,
relation->rd_locator.dbOid,
relation->rd_id,
ItemPointerGetBlockNumber(tid),
ItemPointerGetOffsetNumber(tid));
@ -2974,7 +2974,7 @@ DropAllPredicateLocksFromTable(Relation relation, bool transfer)
if (!PredicateLockingNeededForRelation(relation))
return;
dbId = relation->rd_node.dbNode;
dbId = relation->rd_locator.dbOid;
relId = relation->rd_id;
if (relation->rd_index == NULL)
{
@ -3194,11 +3194,11 @@ PredicateLockPageSplit(Relation relation, BlockNumber oldblkno,
Assert(BlockNumberIsValid(newblkno));
SET_PREDICATELOCKTARGETTAG_PAGE(oldtargettag,
relation->rd_node.dbNode,
relation->rd_locator.dbOid,
relation->rd_id,
oldblkno);
SET_PREDICATELOCKTARGETTAG_PAGE(newtargettag,
relation->rd_node.dbNode,
relation->rd_locator.dbOid,
relation->rd_id,
newblkno);
@ -4478,7 +4478,7 @@ CheckForSerializableConflictIn(Relation relation, ItemPointer tid, BlockNumber b
if (tid != NULL)
{
SET_PREDICATELOCKTARGETTAG_TUPLE(targettag,
relation->rd_node.dbNode,
relation->rd_locator.dbOid,
relation->rd_id,
ItemPointerGetBlockNumber(tid),
ItemPointerGetOffsetNumber(tid));
@ -4488,14 +4488,14 @@ CheckForSerializableConflictIn(Relation relation, ItemPointer tid, BlockNumber b
if (blkno != InvalidBlockNumber)
{
SET_PREDICATELOCKTARGETTAG_PAGE(targettag,
relation->rd_node.dbNode,
relation->rd_locator.dbOid,
relation->rd_id,
blkno);
CheckTargetForConflictsIn(&targettag);
}
SET_PREDICATELOCKTARGETTAG_RELATION(targettag,
relation->rd_node.dbNode,
relation->rd_locator.dbOid,
relation->rd_id);
CheckTargetForConflictsIn(&targettag);
}
@ -4556,7 +4556,7 @@ CheckTableForSerializableConflictIn(Relation relation)
Assert(relation->rd_index == NULL); /* not an index relation */
dbId = relation->rd_node.dbNode;
dbId = relation->rd_locator.dbOid;
heapId = relation->rd_id;
LWLockAcquire(SerializablePredicateListLock, LW_EXCLUSIVE);

View File

@ -46,7 +46,7 @@ physical relation in system catalogs.
It is assumed that the main fork, fork number 0 or MAIN_FORKNUM, always
exists. Fork numbers are assigned in src/include/common/relpath.h.
Functions in smgr.c and md.c take an extra fork number argument, in addition
to relfilenode and block number, to identify which relation fork you want to
to relfilelocator and block number, to identify which relation fork you want to
access. Since most code wants to access the main fork, a shortcut version of
ReadBuffer that accesses MAIN_FORKNUM is provided in the buffer manager for
convenience.

View File

@ -35,7 +35,7 @@
#include "storage/bufmgr.h"
#include "storage/fd.h"
#include "storage/md.h"
#include "storage/relfilenode.h"
#include "storage/relfilelocator.h"
#include "storage/smgr.h"
#include "storage/sync.h"
#include "utils/hsearch.h"
@ -89,11 +89,11 @@ static MemoryContext MdCxt; /* context for all MdfdVec objects */
/* Populate a file tag describing an md.c segment file. */
#define INIT_MD_FILETAG(a,xx_rnode,xx_forknum,xx_segno) \
#define INIT_MD_FILETAG(a,xx_rlocator,xx_forknum,xx_segno) \
( \
memset(&(a), 0, sizeof(FileTag)), \
(a).handler = SYNC_HANDLER_MD, \
(a).rnode = (xx_rnode), \
(a).rlocator = (xx_rlocator), \
(a).forknum = (xx_forknum), \
(a).segno = (xx_segno) \
)
@ -121,14 +121,14 @@ static MemoryContext MdCxt; /* context for all MdfdVec objects */
/* local routines */
static void mdunlinkfork(RelFileNodeBackend rnode, ForkNumber forkNum,
static void mdunlinkfork(RelFileLocatorBackend rlocator, ForkNumber forkNum,
bool isRedo);
static MdfdVec *mdopenfork(SMgrRelation reln, ForkNumber forknum, int behavior);
static void register_dirty_segment(SMgrRelation reln, ForkNumber forknum,
MdfdVec *seg);
static void register_unlink_segment(RelFileNodeBackend rnode, ForkNumber forknum,
static void register_unlink_segment(RelFileLocatorBackend rlocator, ForkNumber forknum,
BlockNumber segno);
static void register_forget_request(RelFileNodeBackend rnode, ForkNumber forknum,
static void register_forget_request(RelFileLocatorBackend rlocator, ForkNumber forknum,
BlockNumber segno);
static void _fdvec_resize(SMgrRelation reln,
ForkNumber forknum,
@ -199,11 +199,11 @@ mdcreate(SMgrRelation reln, ForkNumber forkNum, bool isRedo)
* should be here and not in commands/tablespace.c? But that would imply
* importing a lot of stuff that smgr.c oughtn't know, either.
*/
TablespaceCreateDbspace(reln->smgr_rnode.node.spcNode,
reln->smgr_rnode.node.dbNode,
TablespaceCreateDbspace(reln->smgr_rlocator.locator.spcOid,
reln->smgr_rlocator.locator.dbOid,
isRedo);
path = relpath(reln->smgr_rnode, forkNum);
path = relpath(reln->smgr_rlocator, forkNum);
fd = PathNameOpenFile(path, O_RDWR | O_CREAT | O_EXCL | PG_BINARY);
@ -234,7 +234,7 @@ mdcreate(SMgrRelation reln, ForkNumber forkNum, bool isRedo)
/*
* mdunlink() -- Unlink a relation.
*
* Note that we're passed a RelFileNodeBackend --- by the time this is called,
* Note that we're passed a RelFileLocatorBackend --- by the time this is called,
* there won't be an SMgrRelation hashtable entry anymore.
*
* forkNum can be a fork number to delete a specific fork, or InvalidForkNumber
@ -243,10 +243,10 @@ mdcreate(SMgrRelation reln, ForkNumber forkNum, bool isRedo)
* For regular relations, we don't unlink the first segment file of the rel,
* but just truncate it to zero length, and record a request to unlink it after
* the next checkpoint. Additional segments can be unlinked immediately,
* however. Leaving the empty file in place prevents that relfilenode
* number from being reused. The scenario this protects us from is:
* however. Leaving the empty file in place prevents that relfilenumber
* from being reused. The scenario this protects us from is:
* 1. We delete a relation (and commit, and actually remove its file).
* 2. We create a new relation, which by chance gets the same relfilenode as
* 2. We create a new relation, which by chance gets the same relfilenumber as
* the just-deleted one (OIDs must've wrapped around for that to happen).
* 3. We crash before another checkpoint occurs.
* During replay, we would delete the file and then recreate it, which is fine
@ -254,18 +254,18 @@ mdcreate(SMgrRelation reln, ForkNumber forkNum, bool isRedo)
* But if we didn't WAL-log insertions, but instead relied on fsyncing the
* file after populating it (as we do at wal_level=minimal), the contents of
* the file would be lost forever. By leaving the empty file until after the
* next checkpoint, we prevent reassignment of the relfilenode number until
* it's safe, because relfilenode assignment skips over any existing file.
* next checkpoint, we prevent reassignment of the relfilenumber until it's
* safe, because relfilenumber assignment skips over any existing file.
*
* We do not need to go through this dance for temp relations, though, because
* we never make WAL entries for temp rels, and so a temp rel poses no threat
* to the health of a regular rel that has taken over its relfilenode number.
* to the health of a regular rel that has taken over its relfilenumber.
* The fact that temp rels and regular rels have different file naming
* patterns provides additional safety.
*
* All the above applies only to the relation's main fork; other forks can
* just be removed immediately, since they are not needed to prevent the
* relfilenode number from being recycled. Also, we do not carefully
* relfilenumber from being recycled. Also, we do not carefully
* track whether other forks have been created or not, but just attempt to
* unlink them unconditionally; so we should never complain about ENOENT.
*
@ -278,16 +278,16 @@ mdcreate(SMgrRelation reln, ForkNumber forkNum, bool isRedo)
* we are usually not in a transaction anymore when this is called.
*/
void
mdunlink(RelFileNodeBackend rnode, ForkNumber forkNum, bool isRedo)
mdunlink(RelFileLocatorBackend rlocator, ForkNumber forkNum, bool isRedo)
{
/* Now do the per-fork work */
if (forkNum == InvalidForkNumber)
{
for (forkNum = 0; forkNum <= MAX_FORKNUM; forkNum++)
mdunlinkfork(rnode, forkNum, isRedo);
mdunlinkfork(rlocator, forkNum, isRedo);
}
else
mdunlinkfork(rnode, forkNum, isRedo);
mdunlinkfork(rlocator, forkNum, isRedo);
}
/*
@ -315,25 +315,25 @@ do_truncate(const char *path)
}
static void
mdunlinkfork(RelFileNodeBackend rnode, ForkNumber forkNum, bool isRedo)
mdunlinkfork(RelFileLocatorBackend rlocator, ForkNumber forkNum, bool isRedo)
{
char *path;
int ret;
path = relpath(rnode, forkNum);
path = relpath(rlocator, forkNum);
/*
* Delete or truncate the first segment.
*/
if (isRedo || forkNum != MAIN_FORKNUM || RelFileNodeBackendIsTemp(rnode))
if (isRedo || forkNum != MAIN_FORKNUM || RelFileLocatorBackendIsTemp(rlocator))
{
if (!RelFileNodeBackendIsTemp(rnode))
if (!RelFileLocatorBackendIsTemp(rlocator))
{
/* Prevent other backends' fds from holding on to the disk space */
ret = do_truncate(path);
/* Forget any pending sync requests for the first segment */
register_forget_request(rnode, forkNum, 0 /* first seg */ );
register_forget_request(rlocator, forkNum, 0 /* first seg */ );
}
else
ret = 0;
@ -354,7 +354,7 @@ mdunlinkfork(RelFileNodeBackend rnode, ForkNumber forkNum, bool isRedo)
ret = do_truncate(path);
/* Register request to unlink first segment later */
register_unlink_segment(rnode, forkNum, 0 /* first seg */ );
register_unlink_segment(rlocator, forkNum, 0 /* first seg */ );
}
/*
@ -373,7 +373,7 @@ mdunlinkfork(RelFileNodeBackend rnode, ForkNumber forkNum, bool isRedo)
{
sprintf(segpath, "%s.%u", path, segno);
if (!RelFileNodeBackendIsTemp(rnode))
if (!RelFileLocatorBackendIsTemp(rlocator))
{
/*
* Prevent other backends' fds from holding on to the disk
@ -386,7 +386,7 @@ mdunlinkfork(RelFileNodeBackend rnode, ForkNumber forkNum, bool isRedo)
* Forget any pending sync requests for this segment before we
* try to unlink.
*/
register_forget_request(rnode, forkNum, segno);
register_forget_request(rlocator, forkNum, segno);
}
if (unlink(segpath) < 0)
@ -437,7 +437,7 @@ mdextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("cannot extend file \"%s\" beyond %u blocks",
relpath(reln->smgr_rnode, forknum),
relpath(reln->smgr_rlocator, forknum),
InvalidBlockNumber)));
v = _mdfd_getseg(reln, forknum, blocknum, skipFsync, EXTENSION_CREATE);
@ -490,7 +490,7 @@ mdopenfork(SMgrRelation reln, ForkNumber forknum, int behavior)
if (reln->md_num_open_segs[forknum] > 0)
return &reln->md_seg_fds[forknum][0];
path = relpath(reln->smgr_rnode, forknum);
path = relpath(reln->smgr_rlocator, forknum);
fd = PathNameOpenFile(path, O_RDWR | PG_BINARY);
@ -645,10 +645,10 @@ mdread(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
MdfdVec *v;
TRACE_POSTGRESQL_SMGR_MD_READ_START(forknum, blocknum,
reln->smgr_rnode.node.spcNode,
reln->smgr_rnode.node.dbNode,
reln->smgr_rnode.node.relNode,
reln->smgr_rnode.backend);
reln->smgr_rlocator.locator.spcOid,
reln->smgr_rlocator.locator.dbOid,
reln->smgr_rlocator.locator.relNumber,
reln->smgr_rlocator.backend);
v = _mdfd_getseg(reln, forknum, blocknum, false,
EXTENSION_FAIL | EXTENSION_CREATE_RECOVERY);
@ -660,10 +660,10 @@ mdread(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
nbytes = FileRead(v->mdfd_vfd, buffer, BLCKSZ, seekpos, WAIT_EVENT_DATA_FILE_READ);
TRACE_POSTGRESQL_SMGR_MD_READ_DONE(forknum, blocknum,
reln->smgr_rnode.node.spcNode,
reln->smgr_rnode.node.dbNode,
reln->smgr_rnode.node.relNode,
reln->smgr_rnode.backend,
reln->smgr_rlocator.locator.spcOid,
reln->smgr_rlocator.locator.dbOid,
reln->smgr_rlocator.locator.relNumber,
reln->smgr_rlocator.backend,
nbytes,
BLCKSZ);
@ -715,10 +715,10 @@ mdwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
#endif
TRACE_POSTGRESQL_SMGR_MD_WRITE_START(forknum, blocknum,
reln->smgr_rnode.node.spcNode,
reln->smgr_rnode.node.dbNode,
reln->smgr_rnode.node.relNode,
reln->smgr_rnode.backend);
reln->smgr_rlocator.locator.spcOid,
reln->smgr_rlocator.locator.dbOid,
reln->smgr_rlocator.locator.relNumber,
reln->smgr_rlocator.backend);
v = _mdfd_getseg(reln, forknum, blocknum, skipFsync,
EXTENSION_FAIL | EXTENSION_CREATE_RECOVERY);
@ -730,10 +730,10 @@ mdwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
nbytes = FileWrite(v->mdfd_vfd, buffer, BLCKSZ, seekpos, WAIT_EVENT_DATA_FILE_WRITE);
TRACE_POSTGRESQL_SMGR_MD_WRITE_DONE(forknum, blocknum,
reln->smgr_rnode.node.spcNode,
reln->smgr_rnode.node.dbNode,
reln->smgr_rnode.node.relNode,
reln->smgr_rnode.backend,
reln->smgr_rlocator.locator.spcOid,
reln->smgr_rlocator.locator.dbOid,
reln->smgr_rlocator.locator.relNumber,
reln->smgr_rlocator.backend,
nbytes,
BLCKSZ);
@ -842,7 +842,7 @@ mdtruncate(SMgrRelation reln, ForkNumber forknum, BlockNumber nblocks)
return;
ereport(ERROR,
(errmsg("could not truncate file \"%s\" to %u blocks: it's only %u blocks now",
relpath(reln->smgr_rnode, forknum),
relpath(reln->smgr_rlocator, forknum),
nblocks, curnblk)));
}
if (nblocks == curnblk)
@ -983,7 +983,7 @@ register_dirty_segment(SMgrRelation reln, ForkNumber forknum, MdfdVec *seg)
{
FileTag tag;
INIT_MD_FILETAG(tag, reln->smgr_rnode.node, forknum, seg->mdfd_segno);
INIT_MD_FILETAG(tag, reln->smgr_rlocator.locator, forknum, seg->mdfd_segno);
/* Temp relations should never be fsync'd */
Assert(!SmgrIsTemp(reln));
@ -1005,15 +1005,15 @@ register_dirty_segment(SMgrRelation reln, ForkNumber forknum, MdfdVec *seg)
* register_unlink_segment() -- Schedule a file to be deleted after next checkpoint
*/
static void
register_unlink_segment(RelFileNodeBackend rnode, ForkNumber forknum,
register_unlink_segment(RelFileLocatorBackend rlocator, ForkNumber forknum,
BlockNumber segno)
{
FileTag tag;
INIT_MD_FILETAG(tag, rnode.node, forknum, segno);
INIT_MD_FILETAG(tag, rlocator.locator, forknum, segno);
/* Should never be used with temp relations */
Assert(!RelFileNodeBackendIsTemp(rnode));
Assert(!RelFileLocatorBackendIsTemp(rlocator));
RegisterSyncRequest(&tag, SYNC_UNLINK_REQUEST, true /* retryOnError */ );
}
@ -1022,12 +1022,12 @@ register_unlink_segment(RelFileNodeBackend rnode, ForkNumber forknum,
* register_forget_request() -- forget any fsyncs for a relation fork's segment
*/
static void
register_forget_request(RelFileNodeBackend rnode, ForkNumber forknum,
register_forget_request(RelFileLocatorBackend rlocator, ForkNumber forknum,
BlockNumber segno)
{
FileTag tag;
INIT_MD_FILETAG(tag, rnode.node, forknum, segno);
INIT_MD_FILETAG(tag, rlocator.locator, forknum, segno);
RegisterSyncRequest(&tag, SYNC_FORGET_REQUEST, true /* retryOnError */ );
}
@ -1039,13 +1039,13 @@ void
ForgetDatabaseSyncRequests(Oid dbid)
{
FileTag tag;
RelFileNode rnode;
RelFileLocator rlocator;
rnode.dbNode = dbid;
rnode.spcNode = 0;
rnode.relNode = 0;
rlocator.dbOid = dbid;
rlocator.spcOid = 0;
rlocator.relNumber = 0;
INIT_MD_FILETAG(tag, rnode, InvalidForkNumber, InvalidBlockNumber);
INIT_MD_FILETAG(tag, rlocator, InvalidForkNumber, InvalidBlockNumber);
RegisterSyncRequest(&tag, SYNC_FILTER_REQUEST, true /* retryOnError */ );
}
@ -1054,7 +1054,7 @@ ForgetDatabaseSyncRequests(Oid dbid)
* DropRelationFiles -- drop files of all given relations
*/
void
DropRelationFiles(RelFileNode *delrels, int ndelrels, bool isRedo)
DropRelationFiles(RelFileLocator *delrels, int ndelrels, bool isRedo)
{
SMgrRelation *srels;
int i;
@ -1129,7 +1129,7 @@ _mdfd_segpath(SMgrRelation reln, ForkNumber forknum, BlockNumber segno)
char *path,
*fullpath;
path = relpath(reln->smgr_rnode, forknum);
path = relpath(reln->smgr_rlocator, forknum);
if (segno > 0)
{
@ -1345,7 +1345,7 @@ _mdnblocks(SMgrRelation reln, ForkNumber forknum, MdfdVec *seg)
int
mdsyncfiletag(const FileTag *ftag, char *path)
{
SMgrRelation reln = smgropen(ftag->rnode, InvalidBackendId);
SMgrRelation reln = smgropen(ftag->rlocator, InvalidBackendId);
File file;
bool need_to_close;
int result,
@ -1395,7 +1395,7 @@ mdunlinkfiletag(const FileTag *ftag, char *path)
char *p;
/* Compute the path. */
p = relpathperm(ftag->rnode, MAIN_FORKNUM);
p = relpathperm(ftag->rlocator, MAIN_FORKNUM);
strlcpy(path, p, MAXPGPATH);
pfree(p);
@ -1417,5 +1417,5 @@ mdfiletagmatches(const FileTag *ftag, const FileTag *candidate)
* We'll return true for all candidates that have the same database OID as
* the ftag from the SYNC_FILTER_REQUEST request, so they're forgotten.
*/
return ftag->rnode.dbNode == candidate->rnode.dbNode;
return ftag->rlocator.dbOid == candidate->rlocator.dbOid;
}

View File

@ -46,7 +46,7 @@ typedef struct f_smgr
void (*smgr_create) (SMgrRelation reln, ForkNumber forknum,
bool isRedo);
bool (*smgr_exists) (SMgrRelation reln, ForkNumber forknum);
void (*smgr_unlink) (RelFileNodeBackend rnode, ForkNumber forknum,
void (*smgr_unlink) (RelFileLocatorBackend rlocator, ForkNumber forknum,
bool isRedo);
void (*smgr_extend) (SMgrRelation reln, ForkNumber forknum,
BlockNumber blocknum, char *buffer, bool skipFsync);
@ -143,9 +143,9 @@ smgrshutdown(int code, Datum arg)
* This does not attempt to actually open the underlying file.
*/
SMgrRelation
smgropen(RelFileNode rnode, BackendId backend)
smgropen(RelFileLocator rlocator, BackendId backend)
{
RelFileNodeBackend brnode;
RelFileLocatorBackend brlocator;
SMgrRelation reln;
bool found;
@ -154,7 +154,7 @@ smgropen(RelFileNode rnode, BackendId backend)
/* First time through: initialize the hash table */
HASHCTL ctl;
ctl.keysize = sizeof(RelFileNodeBackend);
ctl.keysize = sizeof(RelFileLocatorBackend);
ctl.entrysize = sizeof(SMgrRelationData);
SMgrRelationHash = hash_create("smgr relation table", 400,
&ctl, HASH_ELEM | HASH_BLOBS);
@ -162,10 +162,10 @@ smgropen(RelFileNode rnode, BackendId backend)
}
/* Look up or create an entry */
brnode.node = rnode;
brnode.backend = backend;
brlocator.locator = rlocator;
brlocator.backend = backend;
reln = (SMgrRelation) hash_search(SMgrRelationHash,
(void *) &brnode,
(void *) &brlocator,
HASH_ENTER, &found);
/* Initialize it if not present before */
@ -267,7 +267,7 @@ smgrclose(SMgrRelation reln)
dlist_delete(&reln->node);
if (hash_search(SMgrRelationHash,
(void *) &(reln->smgr_rnode),
(void *) &(reln->smgr_rlocator),
HASH_REMOVE, NULL) == NULL)
elog(ERROR, "SMgrRelation hashtable corrupted");
@ -335,15 +335,15 @@ smgrcloseall(void)
}
/*
* smgrclosenode() -- Close SMgrRelation object for given RelFileNode,
* smgrcloserellocator() -- Close SMgrRelation object for given RelFileLocator,
* if one exists.
*
* This has the same effects as smgrclose(smgropen(rnode)), but it avoids
* This has the same effects as smgrclose(smgropen(rlocator)), but it avoids
* uselessly creating a hashtable entry only to drop it again when no
* such entry exists already.
*/
void
smgrclosenode(RelFileNodeBackend rnode)
smgrcloserellocator(RelFileLocatorBackend rlocator)
{
SMgrRelation reln;
@ -352,7 +352,7 @@ smgrclosenode(RelFileNodeBackend rnode)
return;
reln = (SMgrRelation) hash_search(SMgrRelationHash,
(void *) &rnode,
(void *) &rlocator,
HASH_FIND, NULL);
if (reln != NULL)
smgrclose(reln);
@ -420,7 +420,7 @@ void
smgrdounlinkall(SMgrRelation *rels, int nrels, bool isRedo)
{
int i = 0;
RelFileNodeBackend *rnodes;
RelFileLocatorBackend *rlocators;
ForkNumber forknum;
if (nrels == 0)
@ -430,19 +430,19 @@ smgrdounlinkall(SMgrRelation *rels, int nrels, bool isRedo)
* Get rid of any remaining buffers for the relations. bufmgr will just
* drop them without bothering to write the contents.
*/
DropRelFileNodesAllBuffers(rels, nrels);
DropRelFileLocatorsAllBuffers(rels, nrels);
/*
* create an array which contains all relations to be dropped, and close
* each relation's forks at the smgr level while at it
*/
rnodes = palloc(sizeof(RelFileNodeBackend) * nrels);
rlocators = palloc(sizeof(RelFileLocatorBackend) * nrels);
for (i = 0; i < nrels; i++)
{
RelFileNodeBackend rnode = rels[i]->smgr_rnode;
RelFileLocatorBackend rlocator = rels[i]->smgr_rlocator;
int which = rels[i]->smgr_which;
rnodes[i] = rnode;
rlocators[i] = rlocator;
/* Close the forks at smgr level */
for (forknum = 0; forknum <= MAX_FORKNUM; forknum++)
@ -458,7 +458,7 @@ smgrdounlinkall(SMgrRelation *rels, int nrels, bool isRedo)
* closed our own smgr rel.
*/
for (i = 0; i < nrels; i++)
CacheInvalidateSmgr(rnodes[i]);
CacheInvalidateSmgr(rlocators[i]);
/*
* Delete the physical file(s).
@ -473,10 +473,10 @@ smgrdounlinkall(SMgrRelation *rels, int nrels, bool isRedo)
int which = rels[i]->smgr_which;
for (forknum = 0; forknum <= MAX_FORKNUM; forknum++)
smgrsw[which].smgr_unlink(rnodes[i], forknum, isRedo);
smgrsw[which].smgr_unlink(rlocators[i], forknum, isRedo);
}
pfree(rnodes);
pfree(rlocators);
}
@ -631,7 +631,7 @@ smgrtruncate(SMgrRelation reln, ForkNumber *forknum, int nforks, BlockNumber *nb
* Get rid of any buffers for the about-to-be-deleted blocks. bufmgr will
* just drop them without bothering to write the contents.
*/
DropRelFileNodeBuffers(reln, forknum, nforks, nblocks);
DropRelFileLocatorBuffers(reln, forknum, nforks, nblocks);
/*
* Send a shared-inval message to force other backends to close any smgr
@ -643,7 +643,7 @@ smgrtruncate(SMgrRelation reln, ForkNumber *forknum, int nforks, BlockNumber *nb
* is a performance-critical path.) As in the unlink code, we want to be
* sure the message is sent before we start changing things on-disk.
*/
CacheInvalidateSmgr(reln->smgr_rnode);
CacheInvalidateSmgr(reln->smgr_rlocator);
/* Do the truncation */
for (i = 0; i < nforks; i++)

View File

@ -27,7 +27,7 @@
#include "utils/builtins.h"
#include "utils/numeric.h"
#include "utils/rel.h"
#include "utils/relfilenodemap.h"
#include "utils/relfilenumbermap.h"
#include "utils/relmapper.h"
#include "utils/syscache.h"
@ -292,7 +292,7 @@ pg_tablespace_size_name(PG_FUNCTION_ARGS)
* is no check here or at the call sites for that.
*/
static int64
calculate_relation_size(RelFileNode *rfn, BackendId backend, ForkNumber forknum)
calculate_relation_size(RelFileLocator *rfn, BackendId backend, ForkNumber forknum)
{
int64 totalsize = 0;
char *relationpath;
@ -349,7 +349,7 @@ pg_relation_size(PG_FUNCTION_ARGS)
if (rel == NULL)
PG_RETURN_NULL();
size = calculate_relation_size(&(rel->rd_node), rel->rd_backend,
size = calculate_relation_size(&(rel->rd_locator), rel->rd_backend,
forkname_to_number(text_to_cstring(forkName)));
relation_close(rel, AccessShareLock);
@ -374,7 +374,7 @@ calculate_toast_table_size(Oid toastrelid)
/* toast heap size, including FSM and VM size */
for (forkNum = 0; forkNum <= MAX_FORKNUM; forkNum++)
size += calculate_relation_size(&(toastRel->rd_node),
size += calculate_relation_size(&(toastRel->rd_locator),
toastRel->rd_backend, forkNum);
/* toast index size, including FSM and VM size */
@ -388,7 +388,7 @@ calculate_toast_table_size(Oid toastrelid)
toastIdxRel = relation_open(lfirst_oid(lc),
AccessShareLock);
for (forkNum = 0; forkNum <= MAX_FORKNUM; forkNum++)
size += calculate_relation_size(&(toastIdxRel->rd_node),
size += calculate_relation_size(&(toastIdxRel->rd_locator),
toastIdxRel->rd_backend, forkNum);
relation_close(toastIdxRel, AccessShareLock);
@ -417,7 +417,7 @@ calculate_table_size(Relation rel)
* heap size, including FSM and VM
*/
for (forkNum = 0; forkNum <= MAX_FORKNUM; forkNum++)
size += calculate_relation_size(&(rel->rd_node), rel->rd_backend,
size += calculate_relation_size(&(rel->rd_locator), rel->rd_backend,
forkNum);
/*
@ -456,7 +456,7 @@ calculate_indexes_size(Relation rel)
idxRel = relation_open(idxOid, AccessShareLock);
for (forkNum = 0; forkNum <= MAX_FORKNUM; forkNum++)
size += calculate_relation_size(&(idxRel->rd_node),
size += calculate_relation_size(&(idxRel->rd_locator),
idxRel->rd_backend,
forkNum);
@ -850,7 +850,7 @@ Datum
pg_relation_filenode(PG_FUNCTION_ARGS)
{
Oid relid = PG_GETARG_OID(0);
Oid result;
RelFileNumber result;
HeapTuple tuple;
Form_pg_class relform;
@ -864,29 +864,29 @@ pg_relation_filenode(PG_FUNCTION_ARGS)
if (relform->relfilenode)
result = relform->relfilenode;
else /* Consult the relation mapper */
result = RelationMapOidToFilenode(relid,
relform->relisshared);
result = RelationMapOidToFilenumber(relid,
relform->relisshared);
}
else
{
/* no storage, return NULL */
result = InvalidOid;
result = InvalidRelFileNumber;
}
ReleaseSysCache(tuple);
if (!OidIsValid(result))
if (!RelFileNumberIsValid(result))
PG_RETURN_NULL();
PG_RETURN_OID(result);
}
/*
* Get the relation via (reltablespace, relfilenode)
* Get the relation via (reltablespace, relfilenumber)
*
* This is expected to be used when somebody wants to match an individual file
* on the filesystem back to its table. That's not trivially possible via
* pg_class, because that doesn't contain the relfilenodes of shared and nailed
* pg_class, because that doesn't contain the relfilenumbers of shared and nailed
* tables.
*
* We don't fail but return NULL if we cannot find a mapping.
@ -898,14 +898,14 @@ Datum
pg_filenode_relation(PG_FUNCTION_ARGS)
{
Oid reltablespace = PG_GETARG_OID(0);
Oid relfilenode = PG_GETARG_OID(1);
RelFileNumber relfilenumber = PG_GETARG_OID(1);
Oid heaprel;
/* test needed so RelidByRelfilenode doesn't misbehave */
if (!OidIsValid(relfilenode))
/* test needed so RelidByRelfilenumber doesn't misbehave */
if (!RelFileNumberIsValid(relfilenumber))
PG_RETURN_NULL();
heaprel = RelidByRelfilenode(reltablespace, relfilenode);
heaprel = RelidByRelfilenumber(reltablespace, relfilenumber);
if (!OidIsValid(heaprel))
PG_RETURN_NULL();
@ -924,7 +924,7 @@ pg_relation_filepath(PG_FUNCTION_ARGS)
Oid relid = PG_GETARG_OID(0);
HeapTuple tuple;
Form_pg_class relform;
RelFileNode rnode;
RelFileLocator rlocator;
BackendId backend;
char *path;
@ -937,29 +937,29 @@ pg_relation_filepath(PG_FUNCTION_ARGS)
{
/* This logic should match RelationInitPhysicalAddr */
if (relform->reltablespace)
rnode.spcNode = relform->reltablespace;
rlocator.spcOid = relform->reltablespace;
else
rnode.spcNode = MyDatabaseTableSpace;
if (rnode.spcNode == GLOBALTABLESPACE_OID)
rnode.dbNode = InvalidOid;
rlocator.spcOid = MyDatabaseTableSpace;
if (rlocator.spcOid == GLOBALTABLESPACE_OID)
rlocator.dbOid = InvalidOid;
else
rnode.dbNode = MyDatabaseId;
rlocator.dbOid = MyDatabaseId;
if (relform->relfilenode)
rnode.relNode = relform->relfilenode;
rlocator.relNumber = relform->relfilenode;
else /* Consult the relation mapper */
rnode.relNode = RelationMapOidToFilenode(relid,
relform->relisshared);
rlocator.relNumber = RelationMapOidToFilenumber(relid,
relform->relisshared);
}
else
{
/* no storage, return NULL */
rnode.relNode = InvalidOid;
rlocator.relNumber = InvalidRelFileNumber;
/* some compilers generate warnings without these next two lines */
rnode.dbNode = InvalidOid;
rnode.spcNode = InvalidOid;
rlocator.dbOid = InvalidOid;
rlocator.spcOid = InvalidOid;
}
if (!OidIsValid(rnode.relNode))
if (!RelFileNumberIsValid(rlocator.relNumber))
{
ReleaseSysCache(tuple);
PG_RETURN_NULL();
@ -990,7 +990,7 @@ pg_relation_filepath(PG_FUNCTION_ARGS)
ReleaseSysCache(tuple);
path = relpathbackend(rnode, backend, MAIN_FORKNUM);
path = relpathbackend(rlocator, backend, MAIN_FORKNUM);
PG_RETURN_TEXT_P(cstring_to_text(path));
}

View File

@ -2,7 +2,7 @@
* pg_upgrade_support.c
*
* server-side functions to set backend global variables
* to control oid and relfilenode assignment, and do other special
* to control oid and relfilenumber assignment, and do other special
* hacks needed for pg_upgrade.
*
* Copyright (c) 2010-2022, PostgreSQL Global Development Group
@ -98,10 +98,10 @@ binary_upgrade_set_next_heap_pg_class_oid(PG_FUNCTION_ARGS)
Datum
binary_upgrade_set_next_heap_relfilenode(PG_FUNCTION_ARGS)
{
Oid nodeoid = PG_GETARG_OID(0);
RelFileNumber relfilenumber = PG_GETARG_OID(0);
CHECK_IS_BINARY_UPGRADE;
binary_upgrade_next_heap_pg_class_relfilenode = nodeoid;
binary_upgrade_next_heap_pg_class_relfilenumber = relfilenumber;
PG_RETURN_VOID();
}
@ -120,10 +120,10 @@ binary_upgrade_set_next_index_pg_class_oid(PG_FUNCTION_ARGS)
Datum
binary_upgrade_set_next_index_relfilenode(PG_FUNCTION_ARGS)
{
Oid nodeoid = PG_GETARG_OID(0);
RelFileNumber relfilenumber = PG_GETARG_OID(0);
CHECK_IS_BINARY_UPGRADE;
binary_upgrade_next_index_pg_class_relfilenode = nodeoid;
binary_upgrade_next_index_pg_class_relfilenumber = relfilenumber;
PG_RETURN_VOID();
}
@ -142,10 +142,10 @@ binary_upgrade_set_next_toast_pg_class_oid(PG_FUNCTION_ARGS)
Datum
binary_upgrade_set_next_toast_relfilenode(PG_FUNCTION_ARGS)
{
Oid nodeoid = PG_GETARG_OID(0);
RelFileNumber relfilenumber = PG_GETARG_OID(0);
CHECK_IS_BINARY_UPGRADE;
binary_upgrade_next_toast_pg_class_relfilenode = nodeoid;
binary_upgrade_next_toast_pg_class_relfilenumber = relfilenumber;
PG_RETURN_VOID();
}

View File

@ -21,7 +21,7 @@ OBJS = \
partcache.o \
plancache.o \
relcache.o \
relfilenodemap.o \
relfilenumbermap.o \
relmapper.o \
spccache.o \
syscache.o \

View File

@ -661,11 +661,11 @@ LocalExecuteInvalidationMessage(SharedInvalidationMessage *msg)
* We could have smgr entries for relations of other databases, so no
* short-circuit test is possible here.
*/
RelFileNodeBackend rnode;
RelFileLocatorBackend rlocator;
rnode.node = msg->sm.rnode;
rnode.backend = (msg->sm.backend_hi << 16) | (int) msg->sm.backend_lo;
smgrclosenode(rnode);
rlocator.locator = msg->sm.rlocator;
rlocator.backend = (msg->sm.backend_hi << 16) | (int) msg->sm.backend_lo;
smgrcloserellocator(rlocator);
}
else if (msg->id == SHAREDINVALRELMAP_ID)
{
@ -1459,14 +1459,14 @@ CacheInvalidateRelcacheByRelid(Oid relid)
* Thus, the maximum possible backend ID is 2^23-1.
*/
void
CacheInvalidateSmgr(RelFileNodeBackend rnode)
CacheInvalidateSmgr(RelFileLocatorBackend rlocator)
{
SharedInvalidationMessage msg;
msg.sm.id = SHAREDINVALSMGR_ID;
msg.sm.backend_hi = rnode.backend >> 16;
msg.sm.backend_lo = rnode.backend & 0xffff;
msg.sm.rnode = rnode.node;
msg.sm.backend_hi = rlocator.backend >> 16;
msg.sm.backend_lo = rlocator.backend & 0xffff;
msg.sm.rlocator = rlocator.locator;
/* check AddCatcacheInvalidationMessage() for an explanation */
VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));

View File

@ -369,7 +369,7 @@ ScanPgRelation(Oid targetRelId, bool indexOK, bool force_non_historic)
/*
* The caller might need a tuple that's newer than the one the historic
* snapshot; currently the only case requiring to do so is looking up the
* relfilenode of non mapped system relations during decoding. That
* relfilenumber of non mapped system relations during decoding. That
* snapshot can't change in the midst of a relcache build, so there's no
* need to register the snapshot.
*/
@ -1133,8 +1133,8 @@ retry:
relation->rd_refcnt = 0;
relation->rd_isnailed = false;
relation->rd_createSubid = InvalidSubTransactionId;
relation->rd_newRelfilenodeSubid = InvalidSubTransactionId;
relation->rd_firstRelfilenodeSubid = InvalidSubTransactionId;
relation->rd_newRelfilelocatorSubid = InvalidSubTransactionId;
relation->rd_firstRelfilelocatorSubid = InvalidSubTransactionId;
relation->rd_droppedSubid = InvalidSubTransactionId;
switch (relation->rd_rel->relpersistence)
{
@ -1300,7 +1300,7 @@ retry:
}
/*
* Initialize the physical addressing info (RelFileNode) for a relcache entry
* Initialize the physical addressing info (RelFileLocator) for a relcache entry
*
* Note: at the physical level, relations in the pg_global tablespace must
* be treated as shared, even if relisshared isn't set. Hence we do not
@ -1309,20 +1309,20 @@ retry:
static void
RelationInitPhysicalAddr(Relation relation)
{
Oid oldnode = relation->rd_node.relNode;
RelFileNumber oldnumber = relation->rd_locator.relNumber;
/* these relations kinds never have storage */
if (!RELKIND_HAS_STORAGE(relation->rd_rel->relkind))
return;
if (relation->rd_rel->reltablespace)
relation->rd_node.spcNode = relation->rd_rel->reltablespace;
relation->rd_locator.spcOid = relation->rd_rel->reltablespace;
else
relation->rd_node.spcNode = MyDatabaseTableSpace;
if (relation->rd_node.spcNode == GLOBALTABLESPACE_OID)
relation->rd_node.dbNode = InvalidOid;
relation->rd_locator.spcOid = MyDatabaseTableSpace;
if (relation->rd_locator.spcOid == GLOBALTABLESPACE_OID)
relation->rd_locator.dbOid = InvalidOid;
else
relation->rd_node.dbNode = MyDatabaseId;
relation->rd_locator.dbOid = MyDatabaseId;
if (relation->rd_rel->relfilenode)
{
@ -1356,30 +1356,30 @@ RelationInitPhysicalAddr(Relation relation)
heap_freetuple(phys_tuple);
}
relation->rd_node.relNode = relation->rd_rel->relfilenode;
relation->rd_locator.relNumber = relation->rd_rel->relfilenode;
}
else
{
/* Consult the relation mapper */
relation->rd_node.relNode =
RelationMapOidToFilenode(relation->rd_id,
relation->rd_rel->relisshared);
if (!OidIsValid(relation->rd_node.relNode))
relation->rd_locator.relNumber =
RelationMapOidToFilenumber(relation->rd_id,
relation->rd_rel->relisshared);
if (!RelFileNumberIsValid(relation->rd_locator.relNumber))
elog(ERROR, "could not find relation mapping for relation \"%s\", OID %u",
RelationGetRelationName(relation), relation->rd_id);
}
/*
* For RelationNeedsWAL() to answer correctly on parallel workers, restore
* rd_firstRelfilenodeSubid. No subtransactions start or end while in
* rd_firstRelfilelocatorSubid. No subtransactions start or end while in
* parallel mode, so the specific SubTransactionId does not matter.
*/
if (IsParallelWorker() && oldnode != relation->rd_node.relNode)
if (IsParallelWorker() && oldnumber != relation->rd_locator.relNumber)
{
if (RelFileNodeSkippingWAL(relation->rd_node))
relation->rd_firstRelfilenodeSubid = TopSubTransactionId;
if (RelFileLocatorSkippingWAL(relation->rd_locator))
relation->rd_firstRelfilelocatorSubid = TopSubTransactionId;
else
relation->rd_firstRelfilenodeSubid = InvalidSubTransactionId;
relation->rd_firstRelfilelocatorSubid = InvalidSubTransactionId;
}
}
@ -1889,8 +1889,8 @@ formrdesc(const char *relationName, Oid relationReltype,
*/
relation->rd_isnailed = true;
relation->rd_createSubid = InvalidSubTransactionId;
relation->rd_newRelfilenodeSubid = InvalidSubTransactionId;
relation->rd_firstRelfilenodeSubid = InvalidSubTransactionId;
relation->rd_newRelfilelocatorSubid = InvalidSubTransactionId;
relation->rd_firstRelfilelocatorSubid = InvalidSubTransactionId;
relation->rd_droppedSubid = InvalidSubTransactionId;
relation->rd_backend = InvalidBackendId;
relation->rd_islocaltemp = false;
@ -1978,11 +1978,11 @@ formrdesc(const char *relationName, Oid relationReltype,
/*
* All relations made with formrdesc are mapped. This is necessarily so
* because there is no other way to know what filenode they currently
* because there is no other way to know what filenumber they currently
* have. In bootstrap mode, add them to the initial relation mapper data,
* specifying that the initial filenode is the same as the OID.
* specifying that the initial filenumber is the same as the OID.
*/
relation->rd_rel->relfilenode = InvalidOid;
relation->rd_rel->relfilenode = InvalidRelFileNumber;
if (IsBootstrapProcessingMode())
RelationMapUpdateMap(RelationGetRelid(relation),
RelationGetRelid(relation),
@ -2180,7 +2180,7 @@ RelationClose(Relation relation)
#ifdef RELCACHE_FORCE_RELEASE
if (RelationHasReferenceCountZero(relation) &&
relation->rd_createSubid == InvalidSubTransactionId &&
relation->rd_firstRelfilenodeSubid == InvalidSubTransactionId)
relation->rd_firstRelfilelocatorSubid == InvalidSubTransactionId)
RelationClearRelation(relation, false);
#endif
}
@ -2352,7 +2352,7 @@ RelationReloadNailed(Relation relation)
{
/*
* If it's a nailed-but-not-mapped index, then we need to re-read the
* pg_class row to see if its relfilenode changed.
* pg_class row to see if its relfilenumber changed.
*/
RelationReloadIndexInfo(relation);
}
@ -2700,8 +2700,8 @@ RelationClearRelation(Relation relation, bool rebuild)
Assert(newrel->rd_isnailed == relation->rd_isnailed);
/* creation sub-XIDs must be preserved */
SWAPFIELD(SubTransactionId, rd_createSubid);
SWAPFIELD(SubTransactionId, rd_newRelfilenodeSubid);
SWAPFIELD(SubTransactionId, rd_firstRelfilenodeSubid);
SWAPFIELD(SubTransactionId, rd_newRelfilelocatorSubid);
SWAPFIELD(SubTransactionId, rd_firstRelfilelocatorSubid);
SWAPFIELD(SubTransactionId, rd_droppedSubid);
/* un-swap rd_rel pointers, swap contents instead */
SWAPFIELD(Form_pg_class, rd_rel);
@ -2791,12 +2791,12 @@ static void
RelationFlushRelation(Relation relation)
{
if (relation->rd_createSubid != InvalidSubTransactionId ||
relation->rd_firstRelfilenodeSubid != InvalidSubTransactionId)
relation->rd_firstRelfilelocatorSubid != InvalidSubTransactionId)
{
/*
* New relcache entries are always rebuilt, not flushed; else we'd
* forget the "new" status of the relation. Ditto for the
* new-relfilenode status.
* new-relfilenumber status.
*
* The rel could have zero refcnt here, so temporarily increment the
* refcnt to ensure it's safe to rebuild it. We can assume that the
@ -2835,7 +2835,7 @@ RelationForgetRelation(Oid rid)
Assert(relation->rd_droppedSubid == InvalidSubTransactionId);
if (relation->rd_createSubid != InvalidSubTransactionId ||
relation->rd_firstRelfilenodeSubid != InvalidSubTransactionId)
relation->rd_firstRelfilelocatorSubid != InvalidSubTransactionId)
{
/*
* In the event of subtransaction rollback, we must not forget
@ -2894,7 +2894,7 @@ RelationCacheInvalidateEntry(Oid relationId)
*
* Apart from debug_discard_caches, this is currently used only to recover
* from SI message buffer overflow, so we do not touch relations having
* new-in-transaction relfilenodes; they cannot be targets of cross-backend
* new-in-transaction relfilenumbers; they cannot be targets of cross-backend
* SI updates (and our own updates now go through a separate linked list
* that isn't limited by the SI message buffer size).
*
@ -2909,7 +2909,7 @@ RelationCacheInvalidateEntry(Oid relationId)
* so hash_seq_search will complete safely; (b) during the second pass we
* only hold onto pointers to nondeletable entries.
*
* The two-phase approach also makes it easy to update relfilenodes for
* The two-phase approach also makes it easy to update relfilenumbers for
* mapped relations before we do anything else, and to ensure that the
* second pass processes nailed-in-cache items before other nondeletable
* items. This should ensure that system catalogs are up to date before
@ -2948,12 +2948,12 @@ RelationCacheInvalidate(bool debug_discard)
/*
* Ignore new relations; no other backend will manipulate them before
* we commit. Likewise, before replacing a relation's relfilenode, we
* shall have acquired AccessExclusiveLock and drained any applicable
* pending invalidations.
* we commit. Likewise, before replacing a relation's relfilelocator,
* we shall have acquired AccessExclusiveLock and drained any
* applicable pending invalidations.
*/
if (relation->rd_createSubid != InvalidSubTransactionId ||
relation->rd_firstRelfilenodeSubid != InvalidSubTransactionId)
relation->rd_firstRelfilelocatorSubid != InvalidSubTransactionId)
continue;
relcacheInvalsReceived++;
@ -2967,8 +2967,8 @@ RelationCacheInvalidate(bool debug_discard)
else
{
/*
* If it's a mapped relation, immediately update its rd_node in
* case its relfilenode changed. We must do this during phase 1
* If it's a mapped relation, immediately update its rd_locator in
* case its relfilenumber changed. We must do this during phase 1
* in case the relation is consulted during rebuild of other
* relcache entries in phase 2. It's safe since consulting the
* map doesn't involve any access to relcache entries.
@ -3078,14 +3078,14 @@ AssertPendingSyncConsistency(Relation relation)
RelationIsPermanent(relation) &&
((relation->rd_createSubid != InvalidSubTransactionId &&
RELKIND_HAS_STORAGE(relation->rd_rel->relkind)) ||
relation->rd_firstRelfilenodeSubid != InvalidSubTransactionId);
relation->rd_firstRelfilelocatorSubid != InvalidSubTransactionId);
Assert(relcache_verdict == RelFileNodeSkippingWAL(relation->rd_node));
Assert(relcache_verdict == RelFileLocatorSkippingWAL(relation->rd_locator));
if (relation->rd_droppedSubid != InvalidSubTransactionId)
Assert(!relation->rd_isvalid &&
(relation->rd_createSubid != InvalidSubTransactionId ||
relation->rd_firstRelfilenodeSubid != InvalidSubTransactionId));
relation->rd_firstRelfilelocatorSubid != InvalidSubTransactionId));
}
/*
@ -3282,8 +3282,8 @@ AtEOXact_cleanup(Relation relation, bool isCommit)
* also lets RelationClearRelation() drop the relcache entry.
*/
relation->rd_createSubid = InvalidSubTransactionId;
relation->rd_newRelfilenodeSubid = InvalidSubTransactionId;
relation->rd_firstRelfilenodeSubid = InvalidSubTransactionId;
relation->rd_newRelfilelocatorSubid = InvalidSubTransactionId;
relation->rd_firstRelfilelocatorSubid = InvalidSubTransactionId;
relation->rd_droppedSubid = InvalidSubTransactionId;
if (clear_relcache)
@ -3397,8 +3397,8 @@ AtEOSubXact_cleanup(Relation relation, bool isCommit,
{
/* allow the entry to be removed */
relation->rd_createSubid = InvalidSubTransactionId;
relation->rd_newRelfilenodeSubid = InvalidSubTransactionId;
relation->rd_firstRelfilenodeSubid = InvalidSubTransactionId;
relation->rd_newRelfilelocatorSubid = InvalidSubTransactionId;
relation->rd_firstRelfilelocatorSubid = InvalidSubTransactionId;
relation->rd_droppedSubid = InvalidSubTransactionId;
RelationClearRelation(relation, false);
return;
@ -3419,23 +3419,23 @@ AtEOSubXact_cleanup(Relation relation, bool isCommit,
}
/*
* Likewise, update or drop any new-relfilenode-in-subtransaction record
* Likewise, update or drop any new-relfilenumber-in-subtransaction record
* or drop record.
*/
if (relation->rd_newRelfilenodeSubid == mySubid)
if (relation->rd_newRelfilelocatorSubid == mySubid)
{
if (isCommit)
relation->rd_newRelfilenodeSubid = parentSubid;
relation->rd_newRelfilelocatorSubid = parentSubid;
else
relation->rd_newRelfilenodeSubid = InvalidSubTransactionId;
relation->rd_newRelfilelocatorSubid = InvalidSubTransactionId;
}
if (relation->rd_firstRelfilenodeSubid == mySubid)
if (relation->rd_firstRelfilelocatorSubid == mySubid)
{
if (isCommit)
relation->rd_firstRelfilenodeSubid = parentSubid;
relation->rd_firstRelfilelocatorSubid = parentSubid;
else
relation->rd_firstRelfilenodeSubid = InvalidSubTransactionId;
relation->rd_firstRelfilelocatorSubid = InvalidSubTransactionId;
}
if (relation->rd_droppedSubid == mySubid)
@ -3459,7 +3459,7 @@ RelationBuildLocalRelation(const char *relname,
TupleDesc tupDesc,
Oid relid,
Oid accessmtd,
Oid relfilenode,
RelFileNumber relfilenumber,
Oid reltablespace,
bool shared_relation,
bool mapped_relation,
@ -3533,8 +3533,8 @@ RelationBuildLocalRelation(const char *relname,
/* it's being created in this transaction */
rel->rd_createSubid = GetCurrentSubTransactionId();
rel->rd_newRelfilenodeSubid = InvalidSubTransactionId;
rel->rd_firstRelfilenodeSubid = InvalidSubTransactionId;
rel->rd_newRelfilelocatorSubid = InvalidSubTransactionId;
rel->rd_firstRelfilelocatorSubid = InvalidSubTransactionId;
rel->rd_droppedSubid = InvalidSubTransactionId;
/*
@ -3616,8 +3616,8 @@ RelationBuildLocalRelation(const char *relname,
/*
* Insert relation physical and logical identifiers (OIDs) into the right
* places. For a mapped relation, we set relfilenode to zero and rely on
* RelationInitPhysicalAddr to consult the map.
* places. For a mapped relation, we set relfilenumber to zero and rely
* on RelationInitPhysicalAddr to consult the map.
*/
rel->rd_rel->relisshared = shared_relation;
@ -3630,12 +3630,12 @@ RelationBuildLocalRelation(const char *relname,
if (mapped_relation)
{
rel->rd_rel->relfilenode = InvalidOid;
rel->rd_rel->relfilenode = InvalidRelFileNumber;
/* Add it to the active mapping information */
RelationMapUpdateMap(relid, relfilenode, shared_relation, true);
RelationMapUpdateMap(relid, relfilenumber, shared_relation, true);
}
else
rel->rd_rel->relfilenode = relfilenode;
rel->rd_rel->relfilenode = relfilenumber;
RelationInitLockInfo(rel); /* see lmgr.c */
@ -3683,13 +3683,13 @@ RelationBuildLocalRelation(const char *relname,
/*
* RelationSetNewRelfilenode
* RelationSetNewRelfilenumber
*
* Assign a new relfilenode (physical file name), and possibly a new
* Assign a new relfilenumber (physical file name), and possibly a new
* persistence setting, to the relation.
*
* This allows a full rewrite of the relation to be done with transactional
* safety (since the filenode assignment can be rolled back). Note however
* safety (since the filenumber assignment can be rolled back). Note however
* that there is no simple way to access the relation's old data for the
* remainder of the current transaction. This limits the usefulness to cases
* such as TRUNCATE or rebuilding an index from scratch.
@ -3697,19 +3697,19 @@ RelationBuildLocalRelation(const char *relname,
* Caller must already hold exclusive lock on the relation.
*/
void
RelationSetNewRelfilenode(Relation relation, char persistence)
RelationSetNewRelfilenumber(Relation relation, char persistence)
{
Oid newrelfilenode;
RelFileNumber newrelfilenumber;
Relation pg_class;
HeapTuple tuple;
Form_pg_class classform;
MultiXactId minmulti = InvalidMultiXactId;
TransactionId freezeXid = InvalidTransactionId;
RelFileNode newrnode;
RelFileLocator newrlocator;
/* Allocate a new relfilenode */
newrelfilenode = GetNewRelFileNode(relation->rd_rel->reltablespace, NULL,
persistence);
/* Allocate a new relfilenumber */
newrelfilenumber = GetNewRelFileNumber(relation->rd_rel->reltablespace,
NULL, persistence);
/*
* Get a writable copy of the pg_class tuple for the given relation.
@ -3729,28 +3729,28 @@ RelationSetNewRelfilenode(Relation relation, char persistence)
RelationDropStorage(relation);
/*
* Create storage for the main fork of the new relfilenode. If it's a
* Create storage for the main fork of the new relfilenumber. If it's a
* table-like object, call into the table AM to do so, which'll also
* create the table's init fork if needed.
*
* NOTE: If relevant for the AM, any conflict in relfilenode value will be
* caught here, if GetNewRelFileNode messes up for any reason.
* NOTE: If relevant for the AM, any conflict in relfilenumber value will
* be caught here, if GetNewRelFileNumber messes up for any reason.
*/
newrnode = relation->rd_node;
newrnode.relNode = newrelfilenode;
newrlocator = relation->rd_locator;
newrlocator.relNumber = newrelfilenumber;
if (RELKIND_HAS_TABLE_AM(relation->rd_rel->relkind))
{
table_relation_set_new_filenode(relation, &newrnode,
persistence,
&freezeXid, &minmulti);
table_relation_set_new_filelocator(relation, &newrlocator,
persistence,
&freezeXid, &minmulti);
}
else if (RELKIND_HAS_STORAGE(relation->rd_rel->relkind))
{
/* handle these directly, at least for now */
SMgrRelation srel;
srel = RelationCreateStorage(newrnode, persistence, true);
srel = RelationCreateStorage(newrlocator, persistence, true);
smgrclose(srel);
}
else
@ -3789,7 +3789,7 @@ RelationSetNewRelfilenode(Relation relation, char persistence)
/* Do the deed */
RelationMapUpdateMap(RelationGetRelid(relation),
newrelfilenode,
newrelfilenumber,
relation->rd_rel->relisshared,
false);
@ -3799,7 +3799,7 @@ RelationSetNewRelfilenode(Relation relation, char persistence)
else
{
/* Normal case, update the pg_class entry */
classform->relfilenode = newrelfilenode;
classform->relfilenode = newrelfilenumber;
/* relpages etc. never change for sequences */
if (relation->rd_rel->relkind != RELKIND_SEQUENCE)
@ -3825,27 +3825,27 @@ RelationSetNewRelfilenode(Relation relation, char persistence)
*/
CommandCounterIncrement();
RelationAssumeNewRelfilenode(relation);
RelationAssumeNewRelfilelocator(relation);
}
/*
* RelationAssumeNewRelfilenode
* RelationAssumeNewRelfilelocator
*
* Code that modifies pg_class.reltablespace or pg_class.relfilenode must call
* this. The call shall precede any code that might insert WAL records whose
* replay would modify bytes in the new RelFileNode, and the call shall follow
* any WAL modifying bytes in the prior RelFileNode. See struct RelationData.
* replay would modify bytes in the new RelFileLocator, and the call shall follow
* any WAL modifying bytes in the prior RelFileLocator. See struct RelationData.
* Ideally, call this as near as possible to the CommandCounterIncrement()
* that makes the pg_class change visible (before it or after it); that
* minimizes the chance of future development adding a forbidden WAL insertion
* between RelationAssumeNewRelfilenode() and CommandCounterIncrement().
* between RelationAssumeNewRelfilelocator() and CommandCounterIncrement().
*/
void
RelationAssumeNewRelfilenode(Relation relation)
RelationAssumeNewRelfilelocator(Relation relation)
{
relation->rd_newRelfilenodeSubid = GetCurrentSubTransactionId();
if (relation->rd_firstRelfilenodeSubid == InvalidSubTransactionId)
relation->rd_firstRelfilenodeSubid = relation->rd_newRelfilenodeSubid;
relation->rd_newRelfilelocatorSubid = GetCurrentSubTransactionId();
if (relation->rd_firstRelfilelocatorSubid == InvalidSubTransactionId)
relation->rd_firstRelfilelocatorSubid = relation->rd_newRelfilelocatorSubid;
/* Flag relation as needing eoxact cleanup (to clear these fields) */
EOXactListAdd(relation);
@ -6254,8 +6254,8 @@ load_relcache_init_file(bool shared)
rel->rd_fkeyvalid = false;
rel->rd_fkeylist = NIL;
rel->rd_createSubid = InvalidSubTransactionId;
rel->rd_newRelfilenodeSubid = InvalidSubTransactionId;
rel->rd_firstRelfilenodeSubid = InvalidSubTransactionId;
rel->rd_newRelfilelocatorSubid = InvalidSubTransactionId;
rel->rd_firstRelfilelocatorSubid = InvalidSubTransactionId;
rel->rd_droppedSubid = InvalidSubTransactionId;
rel->rd_amcache = NULL;
rel->pgstat_info = NULL;

View File

@ -1,13 +1,13 @@
/*-------------------------------------------------------------------------
*
* relfilenodemap.c
* relfilenode to oid mapping cache.
* relfilenumbermap.c
* relfilenumber to oid mapping cache.
*
* Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* src/backend/utils/cache/relfilenodemap.c
* src/backend/utils/cache/relfilenumbermap.c
*
*-------------------------------------------------------------------------
*/
@ -25,42 +25,42 @@
#include "utils/hsearch.h"
#include "utils/inval.h"
#include "utils/rel.h"
#include "utils/relfilenodemap.h"
#include "utils/relfilenumbermap.h"
#include "utils/relmapper.h"
/* Hash table for information about each relfilenode <-> oid pair */
static HTAB *RelfilenodeMapHash = NULL;
/* Hash table for information about each relfilenumber <-> oid pair */
static HTAB *RelfilenumberMapHash = NULL;
/* built first time through in InitializeRelfilenodeMap */
static ScanKeyData relfilenode_skey[2];
/* built first time through in InitializeRelfilenumberMap */
static ScanKeyData relfilenumber_skey[2];
typedef struct
{
Oid reltablespace;
Oid relfilenode;
} RelfilenodeMapKey;
RelFileNumber relfilenumber;
} RelfilenumberMapKey;
typedef struct
{
RelfilenodeMapKey key; /* lookup key - must be first */
RelfilenumberMapKey key; /* lookup key - must be first */
Oid relid; /* pg_class.oid */
} RelfilenodeMapEntry;
} RelfilenumberMapEntry;
/*
* RelfilenodeMapInvalidateCallback
* RelfilenumberMapInvalidateCallback
* Flush mapping entries when pg_class is updated in a relevant fashion.
*/
static void
RelfilenodeMapInvalidateCallback(Datum arg, Oid relid)
RelfilenumberMapInvalidateCallback(Datum arg, Oid relid)
{
HASH_SEQ_STATUS status;
RelfilenodeMapEntry *entry;
RelfilenumberMapEntry *entry;
/* callback only gets registered after creating the hash */
Assert(RelfilenodeMapHash != NULL);
Assert(RelfilenumberMapHash != NULL);
hash_seq_init(&status, RelfilenodeMapHash);
while ((entry = (RelfilenodeMapEntry *) hash_seq_search(&status)) != NULL)
hash_seq_init(&status, RelfilenumberMapHash);
while ((entry = (RelfilenumberMapEntry *) hash_seq_search(&status)) != NULL)
{
/*
* If relid is InvalidOid, signaling a complete reset, we must remove
@ -71,7 +71,7 @@ RelfilenodeMapInvalidateCallback(Datum arg, Oid relid)
entry->relid == InvalidOid || /* negative cache entry */
entry->relid == relid) /* individual flushed relation */
{
if (hash_search(RelfilenodeMapHash,
if (hash_search(RelfilenumberMapHash,
(void *) &entry->key,
HASH_REMOVE,
NULL) == NULL)
@ -81,11 +81,11 @@ RelfilenodeMapInvalidateCallback(Datum arg, Oid relid)
}
/*
* InitializeRelfilenodeMap
* InitializeRelfilenumberMap
* Initialize cache, either on first use or after a reset.
*/
static void
InitializeRelfilenodeMap(void)
InitializeRelfilenumberMap(void)
{
HASHCTL ctl;
int i;
@ -95,50 +95,50 @@ InitializeRelfilenodeMap(void)
CreateCacheMemoryContext();
/* build skey */
MemSet(&relfilenode_skey, 0, sizeof(relfilenode_skey));
MemSet(&relfilenumber_skey, 0, sizeof(relfilenumber_skey));
for (i = 0; i < 2; i++)
{
fmgr_info_cxt(F_OIDEQ,
&relfilenode_skey[i].sk_func,
&relfilenumber_skey[i].sk_func,
CacheMemoryContext);
relfilenode_skey[i].sk_strategy = BTEqualStrategyNumber;
relfilenode_skey[i].sk_subtype = InvalidOid;
relfilenode_skey[i].sk_collation = InvalidOid;
relfilenumber_skey[i].sk_strategy = BTEqualStrategyNumber;
relfilenumber_skey[i].sk_subtype = InvalidOid;
relfilenumber_skey[i].sk_collation = InvalidOid;
}
relfilenode_skey[0].sk_attno = Anum_pg_class_reltablespace;
relfilenode_skey[1].sk_attno = Anum_pg_class_relfilenode;
relfilenumber_skey[0].sk_attno = Anum_pg_class_reltablespace;
relfilenumber_skey[1].sk_attno = Anum_pg_class_relfilenode;
/*
* Only create the RelfilenodeMapHash now, so we don't end up partially
* Only create the RelfilenumberMapHash now, so we don't end up partially
* initialized when fmgr_info_cxt() above ERRORs out with an out of memory
* error.
*/
ctl.keysize = sizeof(RelfilenodeMapKey);
ctl.entrysize = sizeof(RelfilenodeMapEntry);
ctl.keysize = sizeof(RelfilenumberMapKey);
ctl.entrysize = sizeof(RelfilenumberMapEntry);
ctl.hcxt = CacheMemoryContext;
RelfilenodeMapHash =
hash_create("RelfilenodeMap cache", 64, &ctl,
RelfilenumberMapHash =
hash_create("RelfilenumberMap cache", 64, &ctl,
HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
/* Watch for invalidation events. */
CacheRegisterRelcacheCallback(RelfilenodeMapInvalidateCallback,
CacheRegisterRelcacheCallback(RelfilenumberMapInvalidateCallback,
(Datum) 0);
}
/*
* Map a relation's (tablespace, filenode) to a relation's oid and cache the
* result.
* Map a relation's (tablespace, relfilenumber) to a relation's oid and cache
* the result.
*
* Returns InvalidOid if no relation matching the criteria could be found.
*/
Oid
RelidByRelfilenode(Oid reltablespace, Oid relfilenode)
RelidByRelfilenumber(Oid reltablespace, RelFileNumber relfilenumber)
{
RelfilenodeMapKey key;
RelfilenodeMapEntry *entry;
RelfilenumberMapKey key;
RelfilenumberMapEntry *entry;
bool found;
SysScanDesc scandesc;
Relation relation;
@ -146,8 +146,8 @@ RelidByRelfilenode(Oid reltablespace, Oid relfilenode)
ScanKeyData skey[2];
Oid relid;
if (RelfilenodeMapHash == NULL)
InitializeRelfilenodeMap();
if (RelfilenumberMapHash == NULL)
InitializeRelfilenumberMap();
/* pg_class will show 0 when the value is actually MyDatabaseTableSpace */
if (reltablespace == MyDatabaseTableSpace)
@ -155,7 +155,7 @@ RelidByRelfilenode(Oid reltablespace, Oid relfilenode)
MemSet(&key, 0, sizeof(key));
key.reltablespace = reltablespace;
key.relfilenode = relfilenode;
key.relfilenumber = relfilenumber;
/*
* Check cache and return entry if one is found. Even if no target
@ -164,7 +164,7 @@ RelidByRelfilenode(Oid reltablespace, Oid relfilenode)
* since querying invalid values isn't supposed to be a frequent thing,
* but it's basically free.
*/
entry = hash_search(RelfilenodeMapHash, (void *) &key, HASH_FIND, &found);
entry = hash_search(RelfilenumberMapHash, (void *) &key, HASH_FIND, &found);
if (found)
return entry->relid;
@ -179,7 +179,7 @@ RelidByRelfilenode(Oid reltablespace, Oid relfilenode)
/*
* Ok, shared table, check relmapper.
*/
relid = RelationMapFilenodeToOid(relfilenode, true);
relid = RelationMapFilenumberToOid(relfilenumber, true);
}
else
{
@ -192,11 +192,11 @@ RelidByRelfilenode(Oid reltablespace, Oid relfilenode)
relation = table_open(RelationRelationId, AccessShareLock);
/* copy scankey to local copy, it will be modified during the scan */
memcpy(skey, relfilenode_skey, sizeof(skey));
memcpy(skey, relfilenumber_skey, sizeof(skey));
/* set scan arguments */
skey[0].sk_argument = ObjectIdGetDatum(reltablespace);
skey[1].sk_argument = ObjectIdGetDatum(relfilenode);
skey[1].sk_argument = ObjectIdGetDatum(relfilenumber);
scandesc = systable_beginscan(relation,
ClassTblspcRelfilenodeIndexId,
@ -213,12 +213,12 @@ RelidByRelfilenode(Oid reltablespace, Oid relfilenode)
if (found)
elog(ERROR,
"unexpected duplicate for tablespace %u, relfilenode %u",
reltablespace, relfilenode);
"unexpected duplicate for tablespace %u, relfilenumber %u",
reltablespace, relfilenumber);
found = true;
Assert(classform->reltablespace == reltablespace);
Assert(classform->relfilenode == relfilenode);
Assert(classform->relfilenode == relfilenumber);
relid = classform->oid;
}
@ -227,7 +227,7 @@ RelidByRelfilenode(Oid reltablespace, Oid relfilenode)
/* check for tables that are mapped but not shared */
if (!found)
relid = RelationMapFilenodeToOid(relfilenode, false);
relid = RelationMapFilenumberToOid(relfilenumber, false);
}
/*
@ -235,7 +235,7 @@ RelidByRelfilenode(Oid reltablespace, Oid relfilenode)
* caused cache invalidations to be executed which would have deleted a
* new entry if we had entered it above.
*/
entry = hash_search(RelfilenodeMapHash, (void *) &key, HASH_ENTER, &found);
entry = hash_search(RelfilenumberMapHash, (void *) &key, HASH_ENTER, &found);
if (found)
elog(ERROR, "corrupted hashtable");
entry->relid = relid;

View File

@ -1,7 +1,7 @@
/*-------------------------------------------------------------------------
*
* relmapper.c
* Catalog-to-filenode mapping
* Catalog-to-filenumber mapping
*
* For most tables, the physical file underlying the table is specified by
* pg_class.relfilenode. However, that obviously won't work for pg_class
@ -11,7 +11,7 @@
* update other databases' pg_class entries when relocating a shared catalog.
* Therefore, for these special catalogs (henceforth referred to as "mapped
* catalogs") we rely on a separately maintained file that shows the mapping
* from catalog OIDs to filenode numbers. Each database has a map file for
* from catalog OIDs to filenumbers. Each database has a map file for
* its local mapped catalogs, and there is a separate map file for shared
* catalogs. Mapped catalogs have zero in their pg_class.relfilenode entries.
*
@ -79,7 +79,7 @@
typedef struct RelMapping
{
Oid mapoid; /* OID of a catalog */
Oid mapfilenode; /* its filenode number */
RelFileNumber mapfilenumber; /* its rel file number */
} RelMapping;
typedef struct RelMapFile
@ -116,7 +116,7 @@ static RelMapFile local_map;
* subtransactions, so one set of transaction-level changes is sufficient.
*
* The active_xxx variables contain updates that are valid in our transaction
* and should be honored by RelationMapOidToFilenode. The pending_xxx
* and should be honored by RelationMapOidToFilenumber. The pending_xxx
* variables contain updates we have been told about that aren't active yet;
* they will become active at the next CommandCounterIncrement. This setup
* lets map updates act similarly to updates of pg_class rows, ie, they
@ -132,8 +132,8 @@ static RelMapFile pending_local_updates;
/* non-export function prototypes */
static void apply_map_update(RelMapFile *map, Oid relationId, Oid fileNode,
bool add_okay);
static void apply_map_update(RelMapFile *map, Oid relationId,
RelFileNumber filenumber, bool add_okay);
static void merge_map_updates(RelMapFile *map, const RelMapFile *updates,
bool add_okay);
static void load_relmap_file(bool shared, bool lock_held);
@ -146,19 +146,20 @@ static void perform_relmap_update(bool shared, const RelMapFile *updates);
/*
* RelationMapOidToFilenode
* RelationMapOidToFilenumber
*
* The raison d' etre ... given a relation OID, look up its filenode.
* The raison d' etre ... given a relation OID, look up its filenumber.
*
* Although shared and local relation OIDs should never overlap, the caller
* always knows which we need --- so pass that information to avoid useless
* searching.
*
* Returns InvalidOid if the OID is not known (which should never happen,
* but the caller is in a better position to report a meaningful error).
* Returns InvalidRelFileNumber if the OID is not known (which should never
* happen, but the caller is in a better position to report a meaningful
* error).
*/
Oid
RelationMapOidToFilenode(Oid relationId, bool shared)
RelFileNumber
RelationMapOidToFilenumber(Oid relationId, bool shared)
{
const RelMapFile *map;
int32 i;
@ -170,13 +171,13 @@ RelationMapOidToFilenode(Oid relationId, bool shared)
for (i = 0; i < map->num_mappings; i++)
{
if (relationId == map->mappings[i].mapoid)
return map->mappings[i].mapfilenode;
return map->mappings[i].mapfilenumber;
}
map = &shared_map;
for (i = 0; i < map->num_mappings; i++)
{
if (relationId == map->mappings[i].mapoid)
return map->mappings[i].mapfilenode;
return map->mappings[i].mapfilenumber;
}
}
else
@ -185,33 +186,33 @@ RelationMapOidToFilenode(Oid relationId, bool shared)
for (i = 0; i < map->num_mappings; i++)
{
if (relationId == map->mappings[i].mapoid)
return map->mappings[i].mapfilenode;
return map->mappings[i].mapfilenumber;
}
map = &local_map;
for (i = 0; i < map->num_mappings; i++)
{
if (relationId == map->mappings[i].mapoid)
return map->mappings[i].mapfilenode;
return map->mappings[i].mapfilenumber;
}
}
return InvalidOid;
return InvalidRelFileNumber;
}
/*
* RelationMapFilenodeToOid
* RelationMapFilenumberToOid
*
* Do the reverse of the normal direction of mapping done in
* RelationMapOidToFilenode.
* RelationMapOidToFilenumber.
*
* This is not supposed to be used during normal running but rather for
* information purposes when looking at the filesystem or xlog.
*
* Returns InvalidOid if the OID is not known; this can easily happen if the
* relfilenode doesn't pertain to a mapped relation.
* relfilenumber doesn't pertain to a mapped relation.
*/
Oid
RelationMapFilenodeToOid(Oid filenode, bool shared)
RelationMapFilenumberToOid(RelFileNumber filenumber, bool shared)
{
const RelMapFile *map;
int32 i;
@ -222,13 +223,13 @@ RelationMapFilenodeToOid(Oid filenode, bool shared)
map = &active_shared_updates;
for (i = 0; i < map->num_mappings; i++)
{
if (filenode == map->mappings[i].mapfilenode)
if (filenumber == map->mappings[i].mapfilenumber)
return map->mappings[i].mapoid;
}
map = &shared_map;
for (i = 0; i < map->num_mappings; i++)
{
if (filenode == map->mappings[i].mapfilenode)
if (filenumber == map->mappings[i].mapfilenumber)
return map->mappings[i].mapoid;
}
}
@ -237,13 +238,13 @@ RelationMapFilenodeToOid(Oid filenode, bool shared)
map = &active_local_updates;
for (i = 0; i < map->num_mappings; i++)
{
if (filenode == map->mappings[i].mapfilenode)
if (filenumber == map->mappings[i].mapfilenumber)
return map->mappings[i].mapoid;
}
map = &local_map;
for (i = 0; i < map->num_mappings; i++)
{
if (filenode == map->mappings[i].mapfilenode)
if (filenumber == map->mappings[i].mapfilenumber)
return map->mappings[i].mapoid;
}
}
@ -252,13 +253,13 @@ RelationMapFilenodeToOid(Oid filenode, bool shared)
}
/*
* RelationMapOidToFilenodeForDatabase
* RelationMapOidToFilenumberForDatabase
*
* Like RelationMapOidToFilenode, but reads the mapping from the indicated
* Like RelationMapOidToFilenumber, but reads the mapping from the indicated
* path instead of using the one for the current database.
*/
Oid
RelationMapOidToFilenodeForDatabase(char *dbpath, Oid relationId)
RelFileNumber
RelationMapOidToFilenumberForDatabase(char *dbpath, Oid relationId)
{
RelMapFile map;
int i;
@ -270,10 +271,10 @@ RelationMapOidToFilenodeForDatabase(char *dbpath, Oid relationId)
for (i = 0; i < map.num_mappings; i++)
{
if (relationId == map.mappings[i].mapoid)
return map.mappings[i].mapfilenode;
return map.mappings[i].mapfilenumber;
}
return InvalidOid;
return InvalidRelFileNumber;
}
/*
@ -311,13 +312,13 @@ RelationMapCopy(Oid dbid, Oid tsid, char *srcdbpath, char *dstdbpath)
/*
* RelationMapUpdateMap
*
* Install a new relfilenode mapping for the specified relation.
* Install a new relfilenumber mapping for the specified relation.
*
* If immediate is true (or we're bootstrapping), the mapping is activated
* immediately. Otherwise it is made pending until CommandCounterIncrement.
*/
void
RelationMapUpdateMap(Oid relationId, Oid fileNode, bool shared,
RelationMapUpdateMap(Oid relationId, RelFileNumber fileNumber, bool shared,
bool immediate)
{
RelMapFile *map;
@ -362,7 +363,7 @@ RelationMapUpdateMap(Oid relationId, Oid fileNode, bool shared,
map = &pending_local_updates;
}
}
apply_map_update(map, relationId, fileNode, true);
apply_map_update(map, relationId, fileNumber, true);
}
/*
@ -375,7 +376,8 @@ RelationMapUpdateMap(Oid relationId, Oid fileNode, bool shared,
* add_okay = false to draw an error if not.
*/
static void
apply_map_update(RelMapFile *map, Oid relationId, Oid fileNode, bool add_okay)
apply_map_update(RelMapFile *map, Oid relationId, RelFileNumber fileNumber,
bool add_okay)
{
int32 i;
@ -384,7 +386,7 @@ apply_map_update(RelMapFile *map, Oid relationId, Oid fileNode, bool add_okay)
{
if (relationId == map->mappings[i].mapoid)
{
map->mappings[i].mapfilenode = fileNode;
map->mappings[i].mapfilenumber = fileNumber;
return;
}
}
@ -396,7 +398,7 @@ apply_map_update(RelMapFile *map, Oid relationId, Oid fileNode, bool add_okay)
if (map->num_mappings >= MAX_MAPPINGS)
elog(ERROR, "ran out of space in relation map");
map->mappings[map->num_mappings].mapoid = relationId;
map->mappings[map->num_mappings].mapfilenode = fileNode;
map->mappings[map->num_mappings].mapfilenumber = fileNumber;
map->num_mappings++;
}
@ -415,7 +417,7 @@ merge_map_updates(RelMapFile *map, const RelMapFile *updates, bool add_okay)
{
apply_map_update(map,
updates->mappings[i].mapoid,
updates->mappings[i].mapfilenode,
updates->mappings[i].mapfilenumber,
add_okay);
}
}
@ -983,12 +985,12 @@ write_relmap_file(RelMapFile *newmap, bool write_wal, bool send_sinval,
for (i = 0; i < newmap->num_mappings; i++)
{
RelFileNode rnode;
RelFileLocator rlocator;
rnode.spcNode = tsid;
rnode.dbNode = dbid;
rnode.relNode = newmap->mappings[i].mapfilenode;
RelationPreserveStorage(rnode, false);
rlocator.spcOid = tsid;
rlocator.dbOid = dbid;
rlocator.relNumber = newmap->mappings[i].mapfilenumber;
RelationPreserveStorage(rlocator, false);
}
}

View File

@ -4804,15 +4804,15 @@ binary_upgrade_set_pg_class_oids(Archive *fout,
{
PQExpBuffer upgrade_query = createPQExpBuffer();
PGresult *upgrade_res;
Oid relfilenode;
RelFileNumber relfilenumber;
Oid toast_oid;
Oid toast_relfilenode;
RelFileNumber toast_relfilenumber;
char relkind;
Oid toast_index_oid;
Oid toast_index_relfilenode;
RelFileNumber toast_index_relfilenumber;
/*
* Preserve the OID and relfilenode of the table, table's index, table's
* Preserve the OID and relfilenumber of the table, table's index, table's
* toast table and toast table's index if any.
*
* One complexity is that the current table definition might not require
@ -4835,16 +4835,16 @@ binary_upgrade_set_pg_class_oids(Archive *fout,
relkind = *PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "relkind"));
relfilenode = atooid(PQgetvalue(upgrade_res, 0,
PQfnumber(upgrade_res, "relfilenode")));
relfilenumber = atooid(PQgetvalue(upgrade_res, 0,
PQfnumber(upgrade_res, "relfilenode")));
toast_oid = atooid(PQgetvalue(upgrade_res, 0,
PQfnumber(upgrade_res, "reltoastrelid")));
toast_relfilenode = atooid(PQgetvalue(upgrade_res, 0,
PQfnumber(upgrade_res, "toast_relfilenode")));
toast_relfilenumber = atooid(PQgetvalue(upgrade_res, 0,
PQfnumber(upgrade_res, "toast_relfilenode")));
toast_index_oid = atooid(PQgetvalue(upgrade_res, 0,
PQfnumber(upgrade_res, "indexrelid")));
toast_index_relfilenode = atooid(PQgetvalue(upgrade_res, 0,
PQfnumber(upgrade_res, "toast_index_relfilenode")));
toast_index_relfilenumber = atooid(PQgetvalue(upgrade_res, 0,
PQfnumber(upgrade_res, "toast_index_relfilenode")));
appendPQExpBufferStr(upgrade_buffer,
"\n-- For binary upgrade, must preserve pg_class oids and relfilenodes\n");
@ -4857,13 +4857,13 @@ binary_upgrade_set_pg_class_oids(Archive *fout,
/*
* Not every relation has storage. Also, in a pre-v12 database,
* partitioned tables have a relfilenode, which should not be
* partitioned tables have a relfilenumber, which should not be
* preserved when upgrading.
*/
if (OidIsValid(relfilenode) && relkind != RELKIND_PARTITIONED_TABLE)
if (RelFileNumberIsValid(relfilenumber) && relkind != RELKIND_PARTITIONED_TABLE)
appendPQExpBuffer(upgrade_buffer,
"SELECT pg_catalog.binary_upgrade_set_next_heap_relfilenode('%u'::pg_catalog.oid);\n",
relfilenode);
relfilenumber);
/*
* In a pre-v12 database, partitioned tables might be marked as having
@ -4877,7 +4877,7 @@ binary_upgrade_set_pg_class_oids(Archive *fout,
toast_oid);
appendPQExpBuffer(upgrade_buffer,
"SELECT pg_catalog.binary_upgrade_set_next_toast_relfilenode('%u'::pg_catalog.oid);\n",
toast_relfilenode);
toast_relfilenumber);
/* every toast table has an index */
appendPQExpBuffer(upgrade_buffer,
@ -4885,20 +4885,20 @@ binary_upgrade_set_pg_class_oids(Archive *fout,
toast_index_oid);
appendPQExpBuffer(upgrade_buffer,
"SELECT pg_catalog.binary_upgrade_set_next_index_relfilenode('%u'::pg_catalog.oid);\n",
toast_index_relfilenode);
toast_index_relfilenumber);
}
PQclear(upgrade_res);
}
else
{
/* Preserve the OID and relfilenode of the index */
/* Preserve the OID and relfilenumber of the index */
appendPQExpBuffer(upgrade_buffer,
"SELECT pg_catalog.binary_upgrade_set_next_index_pg_class_oid('%u'::pg_catalog.oid);\n",
pg_class_oid);
appendPQExpBuffer(upgrade_buffer,
"SELECT pg_catalog.binary_upgrade_set_next_index_relfilenode('%u'::pg_catalog.oid);\n",
relfilenode);
relfilenumber);
}
appendPQExpBufferChar(upgrade_buffer, '\n');

View File

@ -10,7 +10,7 @@
#define DATAPAGEMAP_H
#include "storage/block.h"
#include "storage/relfilenode.h"
#include "storage/relfilelocator.h"
struct datapagemap
{

View File

@ -56,7 +56,7 @@ static uint32 hash_string_pointer(const char *s);
static filehash_hash *filehash;
static bool isRelDataFile(const char *path);
static char *datasegpath(RelFileNode rnode, ForkNumber forknum,
static char *datasegpath(RelFileLocator rlocator, ForkNumber forknum,
BlockNumber segno);
static file_entry_t *insert_filehash_entry(const char *path);
@ -288,7 +288,7 @@ process_target_file(const char *path, file_type_t type, size_t size,
* hash table!
*/
void
process_target_wal_block_change(ForkNumber forknum, RelFileNode rnode,
process_target_wal_block_change(ForkNumber forknum, RelFileLocator rlocator,
BlockNumber blkno)
{
char *path;
@ -299,7 +299,7 @@ process_target_wal_block_change(ForkNumber forknum, RelFileNode rnode,
segno = blkno / RELSEG_SIZE;
blkno_inseg = blkno % RELSEG_SIZE;
path = datasegpath(rnode, forknum, segno);
path = datasegpath(rlocator, forknum, segno);
entry = lookup_filehash_entry(path);
pfree(path);
@ -508,7 +508,7 @@ print_filemap(filemap_t *filemap)
static bool
isRelDataFile(const char *path)
{
RelFileNode rnode;
RelFileLocator rlocator;
unsigned int segNo;
int nmatch;
bool matched;
@ -532,32 +532,32 @@ isRelDataFile(const char *path)
*
*----
*/
rnode.spcNode = InvalidOid;
rnode.dbNode = InvalidOid;
rnode.relNode = InvalidOid;
rlocator.spcOid = InvalidOid;
rlocator.dbOid = InvalidOid;
rlocator.relNumber = InvalidRelFileNumber;
segNo = 0;
matched = false;
nmatch = sscanf(path, "global/%u.%u", &rnode.relNode, &segNo);
nmatch = sscanf(path, "global/%u.%u", &rlocator.relNumber, &segNo);
if (nmatch == 1 || nmatch == 2)
{
rnode.spcNode = GLOBALTABLESPACE_OID;
rnode.dbNode = 0;
rlocator.spcOid = GLOBALTABLESPACE_OID;
rlocator.dbOid = 0;
matched = true;
}
else
{
nmatch = sscanf(path, "base/%u/%u.%u",
&rnode.dbNode, &rnode.relNode, &segNo);
&rlocator.dbOid, &rlocator.relNumber, &segNo);
if (nmatch == 2 || nmatch == 3)
{
rnode.spcNode = DEFAULTTABLESPACE_OID;
rlocator.spcOid = DEFAULTTABLESPACE_OID;
matched = true;
}
else
{
nmatch = sscanf(path, "pg_tblspc/%u/" TABLESPACE_VERSION_DIRECTORY "/%u/%u.%u",
&rnode.spcNode, &rnode.dbNode, &rnode.relNode,
&rlocator.spcOid, &rlocator.dbOid, &rlocator.relNumber,
&segNo);
if (nmatch == 3 || nmatch == 4)
matched = true;
@ -567,12 +567,12 @@ isRelDataFile(const char *path)
/*
* The sscanf tests above can match files that have extra characters at
* the end. To eliminate such cases, cross-check that GetRelationPath
* creates the exact same filename, when passed the RelFileNode
* creates the exact same filename, when passed the RelFileLocator
* information we extracted from the filename.
*/
if (matched)
{
char *check_path = datasegpath(rnode, MAIN_FORKNUM, segNo);
char *check_path = datasegpath(rlocator, MAIN_FORKNUM, segNo);
if (strcmp(check_path, path) != 0)
matched = false;
@ -589,12 +589,12 @@ isRelDataFile(const char *path)
* The returned path is palloc'd
*/
static char *
datasegpath(RelFileNode rnode, ForkNumber forknum, BlockNumber segno)
datasegpath(RelFileLocator rlocator, ForkNumber forknum, BlockNumber segno)
{
char *path;
char *segpath;
path = relpathperm(rnode, forknum);
path = relpathperm(rlocator, forknum);
if (segno > 0)
{
segpath = psprintf("%s.%u", path, segno);

View File

@ -10,7 +10,7 @@
#include "datapagemap.h"
#include "storage/block.h"
#include "storage/relfilenode.h"
#include "storage/relfilelocator.h"
/* these enum values are sorted in the order we want actions to be processed */
typedef enum
@ -103,7 +103,7 @@ extern void process_source_file(const char *path, file_type_t type,
extern void process_target_file(const char *path, file_type_t type,
size_t size, const char *link_target);
extern void process_target_wal_block_change(ForkNumber forknum,
RelFileNode rnode,
RelFileLocator rlocator,
BlockNumber blkno);
extern filemap_t *decide_file_actions(void);

View File

@ -445,18 +445,18 @@ extractPageInfo(XLogReaderState *record)
for (block_id = 0; block_id <= XLogRecMaxBlockId(record); block_id++)
{
RelFileNode rnode;
RelFileLocator rlocator;
ForkNumber forknum;
BlockNumber blkno;
if (!XLogRecGetBlockTagExtended(record, block_id,
&rnode, &forknum, &blkno, NULL))
&rlocator, &forknum, &blkno, NULL))
continue;
/* We only care about the main fork; others are copied in toto */
if (forknum != MAIN_FORKNUM)
continue;
process_target_wal_block_change(forknum, rnode, blkno);
process_target_wal_block_change(forknum, rlocator, blkno);
}
}

View File

@ -16,7 +16,7 @@
#include "datapagemap.h"
#include "libpq-fe.h"
#include "storage/block.h"
#include "storage/relfilenode.h"
#include "storage/relfilelocator.h"
/* Configuration options */
extern char *datadir_target;

View File

@ -19,7 +19,7 @@ OBJS = \
option.o \
parallel.o \
pg_upgrade.o \
relfilenode.o \
relfilenumber.o \
server.o \
tablespace.o \
util.o \

View File

@ -190,9 +190,9 @@ create_rel_filename_map(const char *old_data, const char *new_data,
map->new_tablespace_suffix = new_cluster.tablespace_suffix;
}
/* DB oid and relfilenodes are preserved between old and new cluster */
/* DB oid and relfilenumbers are preserved between old and new cluster */
map->db_oid = old_db->db_oid;
map->relfilenode = old_rel->relfilenode;
map->relfilenumber = old_rel->relfilenumber;
/* used only for logging and error reporting, old/new are identical */
map->nspname = old_rel->nspname;
@ -399,7 +399,7 @@ get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo)
i_reloid,
i_indtable,
i_toastheap,
i_relfilenode,
i_relfilenumber,
i_reltablespace;
char query[QUERY_ALLOC];
char *last_namespace = NULL,
@ -495,7 +495,7 @@ get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo)
i_toastheap = PQfnumber(res, "toastheap");
i_nspname = PQfnumber(res, "nspname");
i_relname = PQfnumber(res, "relname");
i_relfilenode = PQfnumber(res, "relfilenode");
i_relfilenumber = PQfnumber(res, "relfilenode");
i_reltablespace = PQfnumber(res, "reltablespace");
i_spclocation = PQfnumber(res, "spclocation");
@ -527,7 +527,7 @@ get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo)
relname = PQgetvalue(res, relnum, i_relname);
curr->relname = pg_strdup(relname);
curr->relfilenode = atooid(PQgetvalue(res, relnum, i_relfilenode));
curr->relfilenumber = atooid(PQgetvalue(res, relnum, i_relfilenumber));
curr->tblsp_alloc = false;
/* Is the tablespace oid non-default? */

View File

@ -135,7 +135,7 @@ typedef struct
char *nspname; /* namespace name */
char *relname; /* relation name */
Oid reloid; /* relation OID */
Oid relfilenode; /* relation file node */
RelFileNumber relfilenumber; /* relation file number */
Oid indtable; /* if index, OID of its table, else 0 */
Oid toastheap; /* if toast table, OID of base table, else 0 */
char *tablespace; /* tablespace path; "" for cluster default */
@ -159,7 +159,7 @@ typedef struct
const char *old_tablespace_suffix;
const char *new_tablespace_suffix;
Oid db_oid;
Oid relfilenode;
RelFileNumber relfilenumber;
/* the rest are used only for logging and error reporting */
char *nspname; /* namespaces */
char *relname;
@ -400,7 +400,7 @@ void parseCommandLine(int argc, char *argv[]);
void adjust_data_dir(ClusterInfo *cluster);
void get_sock_dir(ClusterInfo *cluster, bool live_check);
/* relfilenode.c */
/* relfilenumber.c */
void transfer_all_new_tablespaces(DbInfoArr *old_db_arr,
DbInfoArr *new_db_arr, char *old_pgdata, char *new_pgdata);

View File

@ -1,10 +1,10 @@
/*
* relfilenode.c
* relfilenumber.c
*
* relfilenode functions
* relfilenumber functions
*
* Copyright (c) 2010-2022, PostgreSQL Global Development Group
* src/bin/pg_upgrade/relfilenode.c
* src/bin/pg_upgrade/relfilenumber.c
*/
#include "postgres_fe.h"
@ -181,7 +181,7 @@ transfer_relfile(FileNameMap *map, const char *type_suffix, bool vm_must_add_fro
/*
* Now copy/link any related segments as well. Remember, PG breaks large
* files into 1GB segments, the first segment has no extension, subsequent
* segments are named relfilenode.1, relfilenode.2, relfilenode.3.
* segments are named relfilenumber.1, relfilenumber.2, relfilenumber.3.
*/
for (segno = 0;; segno++)
{
@ -194,14 +194,14 @@ transfer_relfile(FileNameMap *map, const char *type_suffix, bool vm_must_add_fro
map->old_tablespace,
map->old_tablespace_suffix,
map->db_oid,
map->relfilenode,
map->relfilenumber,
type_suffix,
extent_suffix);
snprintf(new_file, sizeof(new_file), "%s%s/%u/%u%s%s",
map->new_tablespace,
map->new_tablespace_suffix,
map->db_oid,
map->relfilenode,
map->relfilenumber,
type_suffix,
extent_suffix);

View File

@ -37,7 +37,7 @@ static const char *progname;
static int WalSegSz;
static volatile sig_atomic_t time_to_stop = false;
static const RelFileNode emptyRelFileNode = {0, 0, 0};
static const RelFileLocator emptyRelFileLocator = {0, 0, 0};
typedef struct XLogDumpPrivate
{
@ -63,7 +63,7 @@ typedef struct XLogDumpConfig
bool filter_by_rmgr_enabled;
TransactionId filter_by_xid;
bool filter_by_xid_enabled;
RelFileNode filter_by_relation;
RelFileLocator filter_by_relation;
bool filter_by_extended;
bool filter_by_relation_enabled;
BlockNumber filter_by_relation_block;
@ -393,7 +393,7 @@ WALDumpReadPage(XLogReaderState *state, XLogRecPtr targetPagePtr, int reqLen,
*/
static bool
XLogRecordMatchesRelationBlock(XLogReaderState *record,
RelFileNode matchRnode,
RelFileLocator matchRlocator,
BlockNumber matchBlock,
ForkNumber matchFork)
{
@ -401,17 +401,17 @@ XLogRecordMatchesRelationBlock(XLogReaderState *record,
for (block_id = 0; block_id <= XLogRecMaxBlockId(record); block_id++)
{
RelFileNode rnode;
RelFileLocator rlocator;
ForkNumber forknum;
BlockNumber blk;
if (!XLogRecGetBlockTagExtended(record, block_id,
&rnode, &forknum, &blk, NULL))
&rlocator, &forknum, &blk, NULL))
continue;
if ((matchFork == InvalidForkNumber || matchFork == forknum) &&
(RelFileNodeEquals(matchRnode, emptyRelFileNode) ||
RelFileNodeEquals(matchRnode, rnode)) &&
(RelFileLocatorEquals(matchRlocator, emptyRelFileLocator) ||
RelFileLocatorEquals(matchRlocator, rlocator)) &&
(matchBlock == InvalidBlockNumber || matchBlock == blk))
return true;
}
@ -885,11 +885,11 @@ main(int argc, char **argv)
break;
case 'R':
if (sscanf(optarg, "%u/%u/%u",
&config.filter_by_relation.spcNode,
&config.filter_by_relation.dbNode,
&config.filter_by_relation.relNode) != 3 ||
!OidIsValid(config.filter_by_relation.spcNode) ||
!OidIsValid(config.filter_by_relation.relNode))
&config.filter_by_relation.spcOid,
&config.filter_by_relation.dbOid,
&config.filter_by_relation.relNumber) != 3 ||
!OidIsValid(config.filter_by_relation.spcOid) ||
!RelFileNumberIsValid(config.filter_by_relation.relNumber))
{
pg_log_error("invalid relation specification: \"%s\"", optarg);
pg_log_error_detail("Expecting \"tablespace OID/database OID/relation filenode\".");
@ -1132,7 +1132,7 @@ main(int argc, char **argv)
!XLogRecordMatchesRelationBlock(xlogreader_state,
config.filter_by_relation_enabled ?
config.filter_by_relation :
emptyRelFileNode,
emptyRelFileLocator,
config.filter_by_relation_block_enabled ?
config.filter_by_relation_block :
InvalidBlockNumber,

View File

@ -107,24 +107,24 @@ forkname_chars(const char *str, ForkNumber *fork)
* XXX this must agree with GetRelationPath()!
*/
char *
GetDatabasePath(Oid dbNode, Oid spcNode)
GetDatabasePath(Oid dbOid, Oid spcOid)
{
if (spcNode == GLOBALTABLESPACE_OID)
if (spcOid == GLOBALTABLESPACE_OID)
{
/* Shared system relations live in {datadir}/global */
Assert(dbNode == 0);
Assert(dbOid == 0);
return pstrdup("global");
}
else if (spcNode == DEFAULTTABLESPACE_OID)
else if (spcOid == DEFAULTTABLESPACE_OID)
{
/* The default tablespace is {datadir}/base */
return psprintf("base/%u", dbNode);
return psprintf("base/%u", dbOid);
}
else
{
/* All other tablespaces are accessed via symlinks */
return psprintf("pg_tblspc/%u/%s/%u",
spcNode, TABLESPACE_VERSION_DIRECTORY, dbNode);
spcOid, TABLESPACE_VERSION_DIRECTORY, dbOid);
}
}
@ -138,44 +138,44 @@ GetDatabasePath(Oid dbNode, Oid spcNode)
* the trouble considering BackendId is just int anyway.
*/
char *
GetRelationPath(Oid dbNode, Oid spcNode, Oid relNode,
GetRelationPath(Oid dbOid, Oid spcOid, RelFileNumber relNumber,
int backendId, ForkNumber forkNumber)
{
char *path;
if (spcNode == GLOBALTABLESPACE_OID)
if (spcOid == GLOBALTABLESPACE_OID)
{
/* Shared system relations live in {datadir}/global */
Assert(dbNode == 0);
Assert(dbOid == 0);
Assert(backendId == InvalidBackendId);
if (forkNumber != MAIN_FORKNUM)
path = psprintf("global/%u_%s",
relNode, forkNames[forkNumber]);
relNumber, forkNames[forkNumber]);
else
path = psprintf("global/%u", relNode);
path = psprintf("global/%u", relNumber);
}
else if (spcNode == DEFAULTTABLESPACE_OID)
else if (spcOid == DEFAULTTABLESPACE_OID)
{
/* The default tablespace is {datadir}/base */
if (backendId == InvalidBackendId)
{
if (forkNumber != MAIN_FORKNUM)
path = psprintf("base/%u/%u_%s",
dbNode, relNode,
dbOid, relNumber,
forkNames[forkNumber]);
else
path = psprintf("base/%u/%u",
dbNode, relNode);
dbOid, relNumber);
}
else
{
if (forkNumber != MAIN_FORKNUM)
path = psprintf("base/%u/t%d_%u_%s",
dbNode, backendId, relNode,
dbOid, backendId, relNumber,
forkNames[forkNumber]);
else
path = psprintf("base/%u/t%d_%u",
dbNode, backendId, relNode);
dbOid, backendId, relNumber);
}
}
else
@ -185,25 +185,25 @@ GetRelationPath(Oid dbNode, Oid spcNode, Oid relNode,
{
if (forkNumber != MAIN_FORKNUM)
path = psprintf("pg_tblspc/%u/%s/%u/%u_%s",
spcNode, TABLESPACE_VERSION_DIRECTORY,
dbNode, relNode,
spcOid, TABLESPACE_VERSION_DIRECTORY,
dbOid, relNumber,
forkNames[forkNumber]);
else
path = psprintf("pg_tblspc/%u/%s/%u/%u",
spcNode, TABLESPACE_VERSION_DIRECTORY,
dbNode, relNode);
spcOid, TABLESPACE_VERSION_DIRECTORY,
dbOid, relNumber);
}
else
{
if (forkNumber != MAIN_FORKNUM)
path = psprintf("pg_tblspc/%u/%s/%u/t%d_%u_%s",
spcNode, TABLESPACE_VERSION_DIRECTORY,
dbNode, backendId, relNode,
spcOid, TABLESPACE_VERSION_DIRECTORY,
dbOid, backendId, relNumber,
forkNames[forkNumber]);
else
path = psprintf("pg_tblspc/%u/%s/%u/t%d_%u",
spcNode, TABLESPACE_VERSION_DIRECTORY,
dbNode, backendId, relNode);
spcOid, TABLESPACE_VERSION_DIRECTORY,
dbOid, backendId, relNumber);
}
}
return path;

View File

@ -18,7 +18,7 @@
#include "lib/stringinfo.h"
#include "storage/bufpage.h"
#include "storage/itemptr.h"
#include "storage/relfilenode.h"
#include "storage/relfilelocator.h"
#include "utils/relcache.h"

View File

@ -110,7 +110,7 @@ typedef struct
typedef struct ginxlogSplit
{
RelFileNode node;
RelFileLocator locator;
BlockNumber rrlink; /* right link, or root's blocknumber if root
* split */
BlockNumber leftChildBlkno; /* valid on a non-leaf split */
@ -167,7 +167,7 @@ typedef struct ginxlogDeletePage
*/
typedef struct ginxlogUpdateMeta
{
RelFileNode node;
RelFileLocator locator;
GinMetaPageData metadata;
BlockNumber prevTail;
BlockNumber newRightlink;

View File

@ -97,7 +97,7 @@ typedef struct gistxlogPageDelete
*/
typedef struct gistxlogPageReuse
{
RelFileNode node;
RelFileLocator locator;
BlockNumber block;
FullTransactionId latestRemovedFullXid;
} gistxlogPageReuse;

View File

@ -19,7 +19,7 @@
#include "lib/stringinfo.h"
#include "storage/buf.h"
#include "storage/bufpage.h"
#include "storage/relfilenode.h"
#include "storage/relfilelocator.h"
#include "utils/relcache.h"
@ -370,9 +370,9 @@ typedef struct xl_heap_new_cid
CommandId combocid; /* just for debugging */
/*
* Store the relfilenode/ctid pair to facilitate lookups.
* Store the relfilelocator/ctid pair to facilitate lookups.
*/
RelFileNode target_node;
RelFileLocator target_locator;
ItemPointerData target_tid;
} xl_heap_new_cid;
@ -415,7 +415,7 @@ extern bool heap_prepare_freeze_tuple(HeapTupleHeader tuple,
MultiXactId *relminmxid_out);
extern void heap_execute_freeze_tuple(HeapTupleHeader tuple,
xl_heap_freeze_tuple *xlrec_tp);
extern XLogRecPtr log_heap_visible(RelFileNode rnode, Buffer heap_buffer,
extern XLogRecPtr log_heap_visible(RelFileLocator rlocator, Buffer heap_buffer,
Buffer vm_buffer, TransactionId cutoff_xid, uint8 flags);
#endif /* HEAPAM_XLOG_H */

View File

@ -180,12 +180,12 @@ typedef struct xl_btree_dedup
* This is what we need to know about page reuse within btree. This record
* only exists to generate a conflict point for Hot Standby.
*
* Note that we must include a RelFileNode in the record because we don't
* Note that we must include a RelFileLocator in the record because we don't
* actually register the buffer with the record.
*/
typedef struct xl_btree_reuse_page
{
RelFileNode node;
RelFileLocator locator;
BlockNumber block;
FullTransactionId latestRemovedFullXid;
} xl_btree_reuse_page;

View File

@ -15,7 +15,7 @@
#include "access/htup.h"
#include "storage/itemptr.h"
#include "storage/relfilenode.h"
#include "storage/relfilelocator.h"
#include "utils/relcache.h"
/* struct definition is private to rewriteheap.c */
@ -34,8 +34,8 @@ extern bool rewrite_heap_dead_tuple(RewriteState state, HeapTuple oldTuple);
*/
typedef struct LogicalRewriteMappingData
{
RelFileNode old_node;
RelFileNode new_node;
RelFileLocator old_locator;
RelFileLocator new_locator;
ItemPointerData old_tid;
ItemPointerData new_tid;
} LogicalRewriteMappingData;

Some files were not shown because too many files have changed in this diff Show More