Initial pgindent and pgperltidy run for v13.

Includes some manual cleanup of places that pgindent messed up,
most of which weren't per project style anyway.

Notably, it seems some people didn't absorb the style rules of
commit c9d297751, because there were a bunch of new occurrences
of function calls with a newline just after the left paren, all
with faulty expectations about how the rest of the call would get
indented.
This commit is contained in:
Tom Lane 2020-05-14 13:06:38 -04:00
parent 1255466f83
commit 5cbfce562f
198 changed files with 2019 additions and 1786 deletions

View File

@ -217,7 +217,7 @@ Datum
pg_file_sync(PG_FUNCTION_ARGS)
{
char *filename;
struct stat fst;
struct stat fst;
filename = convert_and_check_filename(PG_GETARG_TEXT_PP(0));

View File

@ -256,7 +256,7 @@ checkcondition_arr(void *checkval, ITEM *item, void *options)
static bool
checkcondition_bit(void *checkval, ITEM *item, void *siglen)
{
return GETBIT(checkval, HASHVAL(item->val, (int)(intptr_t) siglen));
return GETBIT(checkval, HASHVAL(item->val, (int) (intptr_t) siglen));
}
/*
@ -300,7 +300,7 @@ bool
signconsistent(QUERYTYPE *query, BITVECP sign, int siglen, bool calcnot)
{
return execute(GETQUERY(query) + query->size - 1,
(void *) sign, (void *)(intptr_t) siglen, calcnot,
(void *) sign, (void *) (intptr_t) siglen, calcnot,
checkcondition_bit);
}

View File

@ -407,8 +407,8 @@ gist_te(ltree_gist *key, ltree *query, int siglen)
typedef struct LtreeSignature
{
BITVECP sign;
int siglen;
BITVECP sign;
int siglen;
} LtreeSignature;
static bool

View File

@ -272,7 +272,7 @@ typedef struct
#define LTG_GETRNODE(x, siglen) ( LTG_ISONENODE(x) ? LTG_NODE(x) : LTG_RNODE(x, siglen) )
extern ltree_gist *ltree_gist_alloc(bool isalltrue, BITVECP sign, int siglen,
ltree *left, ltree *right);
ltree *left, ltree *right);
/* GiST support for ltree[] */

View File

@ -40,7 +40,7 @@ ltree_gist_alloc(bool isalltrue, BITVECP sign, int siglen,
ltree *left, ltree *right)
{
int32 size = LTG_HDRSIZE + (isalltrue ? 0 : siglen) +
(left ? VARSIZE(left) + (right ? VARSIZE(right) : 0) : 0);
(left ? VARSIZE(left) + (right ? VARSIZE(right) : 0) : 0);
ltree_gist *result = palloc(size);
SET_VARSIZE(result, size);
@ -557,8 +557,8 @@ gist_between(ltree_gist *key, lquery *query, int siglen)
typedef struct LtreeSignature
{
BITVECP sign;
int siglen;
BITVECP sign;
int siglen;
} LtreeSignature;
static bool

View File

@ -2681,6 +2681,7 @@ JumbleRowMarks(pgssJumbleState *jstate, List *rowMarks)
foreach(lc, rowMarks)
{
RowMarkClause *rowmark = lfirst_node(RowMarkClause, lc);
if (!rowmark->pushedDown)
{
APP_JUMB(rowmark->rti);

View File

@ -384,7 +384,7 @@ pg_truncate_visibility_map(PG_FUNCTION_ARGS)
Oid relid = PG_GETARG_OID(0);
Relation rel;
ForkNumber fork;
BlockNumber block;
BlockNumber block;
rel = relation_open(relid, AccessExclusiveLock);

View File

@ -303,8 +303,8 @@ connect_pg_server(ForeignServer *server, UserMapping *user)
/*
* Check that non-superuser has used password to establish connection;
* otherwise, he's piggybacking on the postgres server's user
* identity. See also dblink_security_check() in contrib/dblink
* and check_conn_params.
* identity. See also dblink_security_check() in contrib/dblink and
* check_conn_params.
*/
if (!superuser_arg(user->userid) && UserMappingPasswordRequired(user) &&
!PQconnectionUsedPassword(conn))
@ -361,6 +361,7 @@ UserMappingPasswordRequired(UserMapping *user)
foreach(cell, user->options)
{
DefElem *def = (DefElem *) lfirst(cell);
if (strcmp(def->defname, "password_required") == 0)
return defGetBoolean(def);
}

View File

@ -144,13 +144,13 @@ postgres_fdw_validator(PG_FUNCTION_ARGS)
}
else if (strcmp(def->defname, "password_required") == 0)
{
bool pw_required = defGetBoolean(def);
bool pw_required = defGetBoolean(def);
/*
* Only the superuser may set this option on a user mapping, or
* alter a user mapping on which this option is set. We allow a
* user to clear this option if it's set - in fact, we don't have a
* choice since we can't see the old mapping when validating an
* user to clear this option if it's set - in fact, we don't have
* a choice since we can't see the old mapping when validating an
* alter.
*/
if (!superuser() && !pw_required)
@ -204,11 +204,11 @@ InitPgFdwOptions(void)
{"fetch_size", ForeignServerRelationId, false},
{"fetch_size", ForeignTableRelationId, false},
{"password_required", UserMappingRelationId, false},
/*
* sslcert and sslkey are in fact libpq options, but we repeat them
* here to allow them to appear in both foreign server context
* (when we generate libpq options) and user mapping context
* (from here).
* here to allow them to appear in both foreign server context (when
* we generate libpq options) and user mapping context (from here).
*/
{"sslcert", UserMappingRelationId, true},
{"sslkey", UserMappingRelationId, true},

View File

@ -55,8 +55,10 @@ while (<$feat>)
print " <entry>$feature_id</entry>\n";
}
print " <entry>",
defined($feature_packages{$feature_id}) ? $feature_packages{$feature_id} : "",
"</entry>\n";
defined($feature_packages{$feature_id})
? $feature_packages{$feature_id}
: "",
"</entry>\n";
if ($subfeature_id)
{
print " <entry>$subfeature_name</entry>\n";

View File

@ -201,7 +201,7 @@ detoast_attr(struct varlena *attr)
*/
struct varlena *
detoast_attr_slice(struct varlena *attr,
int32 sliceoffset, int32 slicelength)
int32 sliceoffset, int32 slicelength)
{
struct varlena *preslice;
struct varlena *result;
@ -220,12 +220,12 @@ detoast_attr_slice(struct varlena *attr,
/*
* For compressed values, we need to fetch enough slices to decompress
* at least the requested part (when a prefix is requested). Otherwise,
* just fetch all slices.
* at least the requested part (when a prefix is requested).
* Otherwise, just fetch all slices.
*/
if (slicelength > 0 && sliceoffset >= 0)
{
int32 max_size;
int32 max_size;
/*
* Determine maximum amount of compressed data needed for a prefix
@ -253,7 +253,7 @@ detoast_attr_slice(struct varlena *attr,
Assert(!VARATT_IS_EXTERNAL_INDIRECT(redirect.pointer));
return detoast_attr_slice(redirect.pointer,
sliceoffset, slicelength);
sliceoffset, slicelength);
}
else if (VARATT_IS_EXTERNAL_EXPANDED(attr))
{
@ -343,7 +343,8 @@ toast_fetch_datum(struct varlena *attr)
SET_VARSIZE(result, attrsize + VARHDRSZ);
if (attrsize == 0)
return result; /* Probably shouldn't happen, but just in case. */
return result; /* Probably shouldn't happen, but just in
* case. */
/*
* Open the toast relation and its indexes
@ -387,9 +388,9 @@ toast_fetch_datum_slice(struct varlena *attr, int32 sliceoffset,
VARATT_EXTERNAL_GET_POINTER(toast_pointer, attr);
/*
* It's nonsense to fetch slices of a compressed datum unless when it's
* a prefix -- this isn't lo_* we can't return a compressed datum which
* is meaningful to toast later.
* It's nonsense to fetch slices of a compressed datum unless when it's a
* prefix -- this isn't lo_* we can't return a compressed datum which is
* meaningful to toast later.
*/
Assert(!VARATT_EXTERNAL_IS_COMPRESSED(toast_pointer) || 0 == sliceoffset);

View File

@ -1347,8 +1347,8 @@ gistfinishsplit(GISTInsertState *state, GISTInsertStack *stack,
left->buf, right->buf, false, false))
{
/*
* If the parent page was split, the existing downlink might
* have moved.
* If the parent page was split, the existing downlink might have
* moved.
*/
stack->downlinkoffnum = InvalidOffsetNumber;
}
@ -1370,9 +1370,10 @@ gistfinishsplit(GISTInsertState *state, GISTInsertStack *stack,
tuples, 2,
stack->downlinkoffnum,
left->buf, right->buf,
true, /* Unlock parent */
unlockbuf /* Unlock stack->buffer if caller wants that */
))
true, /* Unlock parent */
unlockbuf /* Unlock stack->buffer if caller wants
* that */
))
{
/*
* If the parent page was split, the downlink might have moved.

View File

@ -144,6 +144,7 @@ _hash_spareindex(uint32 num_bucket)
{
uint32 splitpoint_group;
uint32 splitpoint_phases;
splitpoint_group = pg_ceil_log2_32(num_bucket);
if (splitpoint_group < HASH_SPLITPOINT_GROUPS_WITH_ONE_PHASE)

View File

@ -318,7 +318,7 @@ check_hash_func_signature(Oid funcid, int16 amprocnum, Oid argtype)
argtype == XIDOID || argtype == CIDOID))
/* okay, allowed use of hashint4() */ ;
else if ((funcid == F_HASHINT8 || funcid == F_HASHINT8EXTENDED) &&
(argtype == XID8OID))
(argtype == XID8OID))
/* okay, allowed use of hashint8() */ ;
else if ((funcid == F_TIMESTAMP_HASH ||
funcid == F_TIMESTAMP_HASH_EXTENDED) &&

View File

@ -2153,8 +2153,8 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
RelationPutHeapTuple(relation, buffer, heaptuples[ndone], false);
/*
* Note that heap_multi_insert is not used for catalog tuples yet,
* but this will cover the gap once that is the case.
* Note that heap_multi_insert is not used for catalog tuples yet, but
* this will cover the gap once that is the case.
*/
if (needwal && need_cids)
log_heap_new_cid(relation, heaptuples[ndone]);

View File

@ -943,7 +943,7 @@ index_opclass_options(Relation indrel, AttrNumber attnum, Datum attoptions,
/* fetch options support procedure if specified */
if (amoptsprocnum != 0)
procid =index_getprocid(indrel, attnum, amoptsprocnum);
procid = index_getprocid(indrel, attnum, amoptsprocnum);
if (!OidIsValid(procid))
{
@ -953,7 +953,7 @@ index_opclass_options(Relation indrel, AttrNumber attnum, Datum attoptions,
bool isnull;
if (!DatumGetPointer(attoptions))
return NULL; /* ok, no options, no procedure */
return NULL; /* ok, no options, no procedure */
/*
* Report an error if the opclass's options-parsing procedure does not

View File

@ -1566,7 +1566,8 @@ _bt_pagedel(Relation rel, Buffer leafbuf, TransactionId *oldestBtpoXact)
BTScanInsert itup_key;
ItemId itemid;
IndexTuple targetkey;
BlockNumber leftsib, leafblkno;
BlockNumber leftsib,
leafblkno;
Buffer sleafbuf;
itemid = PageGetItemId(page, P_HIKEY);
@ -1777,6 +1778,7 @@ _bt_mark_page_halfdead(Relation rel, Buffer leafbuf, BTStack stack)
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
#ifdef USE_ASSERT_CHECKING
/*
* This is just an assertion because _bt_lock_subtree_parent should have
* guaranteed tuple has the expected contents
@ -2368,7 +2370,8 @@ _bt_lock_subtree_parent(Relation rel, BlockNumber child, BTStack stack,
Buffer *subtreeparent, OffsetNumber *poffset,
BlockNumber *topparent, BlockNumber *topparentrightsib)
{
BlockNumber parent, leftsibparent;
BlockNumber parent,
leftsibparent;
OffsetNumber parentoffset,
maxoff;
Buffer pbuf;
@ -2439,9 +2442,9 @@ _bt_lock_subtree_parent(Relation rel, BlockNumber child, BTStack stack,
/*
* Now make sure that the parent deletion is itself safe by examining the
* child's grandparent page. Recurse, passing the parent page as the
* child page (child's grandparent is the parent on the next level up).
* If parent deletion is unsafe, then child deletion must also be unsafe
* (in which case caller cannot delete any pages at all).
* child page (child's grandparent is the parent on the next level up). If
* parent deletion is unsafe, then child deletion must also be unsafe (in
* which case caller cannot delete any pages at all).
*/
*topparent = parent;
*topparentrightsib = opaque->btpo_next;

View File

@ -1091,7 +1091,8 @@ btvacuumpage(BTVacState *vstate, BlockNumber scanblkno)
void *callback_state = vstate->callback_state;
Relation rel = info->index;
bool attempt_pagedel;
BlockNumber blkno, backtrack_to;
BlockNumber blkno,
backtrack_to;
Buffer buf;
Page page;
BTPageOpaque opaque;

View File

@ -156,11 +156,10 @@ _bt_search(Relation rel, BTScanInsert key, Buffer *bufP, int access,
/*
* We need to save the location of the pivot tuple we chose in the
* parent page on a stack. If we need to split a page, we'll use
* the stack to work back up to its parent page. If caller ends up
* splitting a page one level down, it usually ends up inserting a
* new pivot tuple/downlink immediately after the location recorded
* here.
* parent page on a stack. If we need to split a page, we'll use the
* stack to work back up to its parent page. If caller ends up
* splitting a page one level down, it usually ends up inserting a new
* pivot tuple/downlink immediately after the location recorded here.
*/
new_stack = (BTStack) palloc(sizeof(BTStackData));
new_stack->bts_blkno = par_blkno;

View File

@ -72,7 +72,7 @@ static bool _bt_afternewitemoff(FindSplitData *state, OffsetNumber maxoff,
static bool _bt_adjacenthtid(ItemPointer lowhtid, ItemPointer highhtid);
static OffsetNumber _bt_bestsplitloc(FindSplitData *state, int perfectpenalty,
bool *newitemonleft, FindSplitStrat strategy);
static int _bt_defaultinterval(FindSplitData *state);
static int _bt_defaultinterval(FindSplitData *state);
static int _bt_strategy(FindSplitData *state, SplitPoint *leftpage,
SplitPoint *rightpage, FindSplitStrat *strategy);
static void _bt_interval_edges(FindSplitData *state,

View File

@ -35,7 +35,7 @@ dbase_desc(StringInfo buf, XLogReaderState *record)
else if (info == XLOG_DBASE_DROP)
{
xl_dbase_drop_rec *xlrec = (xl_dbase_drop_rec *) rec;
int i;
int i;
appendStringInfo(buf, "dir");
for (i = 0; i < xlrec->ntablespaces; i++)

View File

@ -251,7 +251,7 @@ static void
xact_desc_relations(StringInfo buf, char *label, int nrels,
RelFileNode *xnodes)
{
int i;
int i;
if (nrels > 0)
{
@ -269,7 +269,7 @@ xact_desc_relations(StringInfo buf, char *label, int nrels,
static void
xact_desc_subxacts(StringInfo buf, int nsubxacts, TransactionId *subxacts)
{
int i;
int i;
if (nsubxacts > 0)
{

View File

@ -275,7 +275,7 @@ spgvalidate(Oid opclassoid)
if ((thisgroup->functionset & (((uint64) 1) << i)) != 0)
continue; /* got it */
if (i == SPGIST_OPTIONS_PROC)
continue; /* optional method */
continue; /* optional method */
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
errmsg("operator family \"%s\" of access method %s is missing support function %d for type %s",

View File

@ -3750,7 +3750,7 @@ EndTransactionBlock(bool chain)
if (chain)
ereport(ERROR,
(errcode(ERRCODE_NO_ACTIVE_SQL_TRANSACTION),
/* translator: %s represents an SQL statement name */
/* translator: %s represents an SQL statement name */
errmsg("%s can only be used in transaction blocks",
"COMMIT AND CHAIN")));
else
@ -3829,7 +3829,7 @@ EndTransactionBlock(bool chain)
if (chain)
ereport(ERROR,
(errcode(ERRCODE_NO_ACTIVE_SQL_TRANSACTION),
/* translator: %s represents an SQL statement name */
/* translator: %s represents an SQL statement name */
errmsg("%s can only be used in transaction blocks",
"COMMIT AND CHAIN")));
else
@ -3952,7 +3952,7 @@ UserAbortTransactionBlock(bool chain)
if (chain)
ereport(ERROR,
(errcode(ERRCODE_NO_ACTIVE_SQL_TRANSACTION),
/* translator: %s represents an SQL statement name */
/* translator: %s represents an SQL statement name */
errmsg("%s can only be used in transaction blocks",
"ROLLBACK AND CHAIN")));
else

View File

@ -6071,7 +6071,7 @@ recoveryApplyDelay(XLogReaderState *record)
{
uint8 xact_info;
TimestampTz xtime;
TimestampTz delayUntil;
TimestampTz delayUntil;
long secs;
int microsecs;
@ -6341,7 +6341,11 @@ StartupXLOG(void)
switch (ControlFile->state)
{
case DB_SHUTDOWNED:
/* This is the expected case, so don't be chatty in standalone mode */
/*
* This is the expected case, so don't be chatty in standalone
* mode
*/
ereport(IsPostmasterEnvironment ? LOG : NOTICE,
(errmsg("database system was shut down at %s",
str_time(ControlFile->time))));
@ -10691,8 +10695,8 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p,
datadirpathlen = strlen(DataDir);
/*
* Report that we are now estimating the total backup size
* if we're streaming base backup as requested by pg_basebackup
* Report that we are now estimating the total backup size if we're
* streaming base backup as requested by pg_basebackup
*/
if (tablespaces)
pgstat_progress_update_param(PROGRESS_BASEBACKUP_PHASE,
@ -11410,7 +11414,7 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p)
void
do_pg_abort_backup(int code, Datum arg)
{
bool emit_warning = DatumGetBool(arg);
bool emit_warning = DatumGetBool(arg);
/*
* Quick exit if session is not keeping around a non-exclusive backup
@ -12154,8 +12158,8 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
*/
/*
* We should be able to move to XLOG_FROM_STREAM
* only in standby mode.
* We should be able to move to XLOG_FROM_STREAM only in
* standby mode.
*/
Assert(StandbyMode);
@ -12242,6 +12246,7 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
{
case XLOG_FROM_ARCHIVE:
case XLOG_FROM_PG_WAL:
/*
* WAL receiver must not be running when reading WAL from
* archive or pg_wal.
@ -12279,8 +12284,8 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
bool havedata;
/*
* We should be able to move to XLOG_FROM_STREAM
* only in standby mode.
* We should be able to move to XLOG_FROM_STREAM only in
* standby mode.
*/
Assert(StandbyMode);

View File

@ -64,8 +64,8 @@ RestoreArchivedFile(char *path, const char *xlogfname,
TimeLineID restartTli;
/*
* Ignore restore_command when not in archive recovery (meaning
* we are in crash recovery).
* Ignore restore_command when not in archive recovery (meaning we are in
* crash recovery).
*/
if (!ArchiveRecoveryRequested)
goto not_available;

View File

@ -1595,9 +1595,9 @@ RestoreBlockImage(XLogReaderState *record, uint8 block_id, char *page)
FullTransactionId
XLogRecGetFullXid(XLogReaderState *record)
{
TransactionId xid,
next_xid;
uint32 epoch;
TransactionId xid,
next_xid;
uint32 epoch;
/*
* This function is only safe during replay, because it depends on the
@ -1610,8 +1610,8 @@ XLogRecGetFullXid(XLogReaderState *record)
epoch = EpochFromFullTransactionId(ShmemVariableCache->nextFullXid);
/*
* If xid is numerically greater than next_xid, it has to be from the
* last epoch.
* If xid is numerically greater than next_xid, it has to be from the last
* epoch.
*/
if (unlikely(xid > next_xid))
--epoch;

View File

@ -109,7 +109,7 @@ foreach my $header (@ARGV)
}
else
{
push @{ $catalog_data{pg_description}}, \%descr;
push @{ $catalog_data{pg_description} }, \%descr;
}
}
@ -679,8 +679,8 @@ close $bki;
close $schemapg;
# Finally, rename the completed files into place.
Catalog::RenameTempFile($bkifile, $tmpext);
Catalog::RenameTempFile($schemafile, $tmpext);
Catalog::RenameTempFile($bkifile, $tmpext);
Catalog::RenameTempFile($schemafile, $tmpext);
exit 0;

View File

@ -3464,7 +3464,7 @@ restart:
*/
foreach(cell, parent_cons)
{
Oid parent = lfirst_oid(cell);
Oid parent = lfirst_oid(cell);
ScanKeyInit(&key,
Anum_pg_constraint_oid,
@ -3487,9 +3487,9 @@ restart:
*
* Because of this arrangement, we can correctly catch all
* relevant relations by adding to 'parent_cons' all rows with
* valid conparentid, and to the 'oids' list all rows with a
* zero conparentid. If any oids are added to 'oids', redo the
* first loop above by setting 'restart'.
* valid conparentid, and to the 'oids' list all rows with a zero
* conparentid. If any oids are added to 'oids', redo the first
* loop above by setting 'restart'.
*/
if (OidIsValid(con->conparentid))
parent_cons = list_append_unique_oid(parent_cons,

View File

@ -43,13 +43,13 @@ ObjectAddress
CastCreate(Oid sourcetypeid, Oid targettypeid, Oid funcid, char castcontext,
char castmethod, DependencyType behavior)
{
Relation relation;
HeapTuple tuple;
Oid castid;
Datum values[Natts_pg_cast];
bool nulls[Natts_pg_cast];
ObjectAddress myself,
referenced;
Relation relation;
HeapTuple tuple;
Oid castid;
Datum values[Natts_pg_cast];
bool nulls[Natts_pg_cast];
ObjectAddress myself,
referenced;
relation = table_open(CastRelationId, RowExclusiveLock);

View File

@ -706,8 +706,8 @@ getAutoExtensionsOfObject(Oid classId, Oid objectId)
{
List *result = NIL;
Relation depRel;
ScanKeyData key[2];
SysScanDesc scan;
ScanKeyData key[2];
SysScanDesc scan;
HeapTuple tup;
depRel = table_open(DependRelationId, AccessShareLock);

View File

@ -1324,6 +1324,7 @@ shdepDropOwned(List *roleids, DropBehavior behavior)
sdepForm->objid);
break;
case SHARED_DEPENDENCY_POLICY:
/*
* Try to remove role from policy; if unable to, remove
* policy.
@ -1335,6 +1336,7 @@ shdepDropOwned(List *roleids, DropBehavior behavior)
obj.classId = sdepForm->classid;
obj.objectId = sdepForm->objid;
obj.objectSubId = sdepForm->objsubid;
/*
* Acquire lock on object, then verify this dependency
* is still relevant. If not, the object might have

View File

@ -280,8 +280,8 @@ RelationTruncate(Relation rel, BlockNumber nblocks)
bool vm;
bool need_fsm_vacuum = false;
ForkNumber forks[MAX_FORKNUM];
BlockNumber blocks[MAX_FORKNUM];
int nforks = 0;
BlockNumber blocks[MAX_FORKNUM];
int nforks = 0;
/* Open it at the smgr level if not already done */
RelationOpenSmgr(rel);
@ -298,7 +298,7 @@ RelationTruncate(Relation rel, BlockNumber nblocks)
blocks[nforks] = nblocks;
nforks++;
/* Prepare for truncation of the FSM if it exists */
/* Prepare for truncation of the FSM if it exists */
fsm = smgrexists(rel->rd_smgr, FSM_FORKNUM);
if (fsm)
{
@ -367,9 +367,9 @@ RelationTruncate(Relation rel, BlockNumber nblocks)
smgrtruncate(rel->rd_smgr, forks, nforks, blocks);
/*
* Update upper-level FSM pages to account for the truncation.
* This is important because the just-truncated pages were likely
* marked as all-free, and would be preferentially selected.
* Update upper-level FSM pages to account for the truncation. This is
* important because the just-truncated pages were likely marked as
* all-free, and would be preferentially selected.
*/
if (need_fsm_vacuum)
FreeSpaceMapVacuumRange(rel, nblocks, InvalidBlockNumber);
@ -923,8 +923,8 @@ smgr_redo(XLogReaderState *record)
SMgrRelation reln;
Relation rel;
ForkNumber forks[MAX_FORKNUM];
BlockNumber blocks[MAX_FORKNUM];
int nforks = 0;
BlockNumber blocks[MAX_FORKNUM];
int nforks = 0;
bool need_fsm_vacuum = false;
reln = smgropen(xlrec->rnode, InvalidBackendId);
@ -995,9 +995,9 @@ smgr_redo(XLogReaderState *record)
smgrtruncate(reln, forks, nforks, blocks);
/*
* Update upper-level FSM pages to account for the truncation.
* This is important because the just-truncated pages were likely
* marked as all-free, and would be preferentially selected.
* Update upper-level FSM pages to account for the truncation. This is
* important because the just-truncated pages were likely marked as
* all-free, and would be preferentially selected.
*/
if (need_fsm_vacuum)
FreeSpaceMapVacuumRange(rel, xlrec->blkno,

View File

@ -470,7 +470,7 @@ ExecAlterObjectDependsStmt(AlterObjectDependsStmt *stmt, ObjectAddress *refAddre
}
else
{
List *currexts;
List *currexts;
/* Avoid duplicates */
currexts = getAutoExtensionsOfObject(address.classId,

View File

@ -1947,11 +1947,11 @@ remove_dbtablespaces(Oid db_id)
Relation rel;
TableScanDesc scan;
HeapTuple tuple;
List *ltblspc = NIL;
ListCell *cell;
int ntblspc;
int i;
Oid *tablespace_ids;
List *ltblspc = NIL;
ListCell *cell;
int ntblspc;
int i;
Oid *tablespace_ids;
rel = table_open(TableSpaceRelationId, AccessShareLock);
scan = table_beginscan_catalog(rel, 0, NULL);

View File

@ -72,12 +72,6 @@ typedef struct EventTriggerQueryState
static EventTriggerQueryState *currentEventTriggerState = NULL;
typedef struct
{
const char *obtypename;
bool supported;
} event_trigger_support_data;
/* Support for dropped objects */
typedef struct SQLDropObject
{

View File

@ -2886,8 +2886,8 @@ show_incremental_sort_info(IncrementalSortState *incrsortstate,
* we don't need to do anything if there were 0 full groups.
*
* We still have to continue after this block if there are no full groups,
* though, since it's possible that we have workers that did real work even
* if the leader didn't participate.
* though, since it's possible that we have workers that did real work
* even if the leader didn't participate.
*/
if (fullsortGroupInfo->groupCount > 0)
{
@ -2914,8 +2914,8 @@ show_incremental_sort_info(IncrementalSortState *incrsortstate,
&incrsortstate->shared_info->sinfo[n];
/*
* If a worker hasn't processed any sort groups at all, then exclude
* it from output since it either didn't launch or didn't
* If a worker hasn't processed any sort groups at all, then
* exclude it from output since it either didn't launch or didn't
* contribute anything meaningful.
*/
fullsortGroupInfo = &incsort_info->fullsortGroupInfo;
@ -2923,8 +2923,8 @@ show_incremental_sort_info(IncrementalSortState *incrsortstate,
/*
* Since we never have any prefix groups unless we've first sorted
* a full groups and transitioned modes (copying the tuples into a
* prefix group), we don't need to do anything if there were 0 full
* groups.
* prefix group), we don't need to do anything if there were 0
* full groups.
*/
if (fullsortGroupInfo->groupCount == 0)
continue;
@ -3048,8 +3048,8 @@ show_hash_info(HashState *hashstate, ExplainState *es)
static void
show_hashagg_info(AggState *aggstate, ExplainState *es)
{
Agg *agg = (Agg *)aggstate->ss.ps.plan;
int64 memPeakKb = (aggstate->hash_mem_peak + 1023) / 1024;
Agg *agg = (Agg *) aggstate->ss.ps.plan;
int64 memPeakKb = (aggstate->hash_mem_peak + 1023) / 1024;
Assert(IsA(aggstate, AggState));

View File

@ -1402,39 +1402,39 @@ CreateExtensionInternal(char *extensionName,
* does what is needed, we try to find a sequence of update scripts that
* will get us there.
*/
filename = get_extension_script_filename(pcontrol, NULL, versionName);
if (stat(filename, &fst) == 0)
{
/* Easy, no extra scripts */
updateVersions = NIL;
}
else
{
/* Look for best way to install this version */
List *evi_list;
ExtensionVersionInfo *evi_start;
ExtensionVersionInfo *evi_target;
filename = get_extension_script_filename(pcontrol, NULL, versionName);
if (stat(filename, &fst) == 0)
{
/* Easy, no extra scripts */
updateVersions = NIL;
}
else
{
/* Look for best way to install this version */
List *evi_list;
ExtensionVersionInfo *evi_start;
ExtensionVersionInfo *evi_target;
/* Extract the version update graph from the script directory */
evi_list = get_ext_ver_list(pcontrol);
/* Extract the version update graph from the script directory */
evi_list = get_ext_ver_list(pcontrol);
/* Identify the target version */
evi_target = get_ext_ver_info(versionName, &evi_list);
/* Identify the target version */
evi_target = get_ext_ver_info(versionName, &evi_list);
/* Identify best path to reach target */
evi_start = find_install_path(evi_list, evi_target,
&updateVersions);
/* Identify best path to reach target */
evi_start = find_install_path(evi_list, evi_target,
&updateVersions);
/* Fail if no path ... */
if (evi_start == NULL)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("extension \"%s\" has no installation script nor update path for version \"%s\"",
pcontrol->name, versionName)));
/* Fail if no path ... */
if (evi_start == NULL)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("extension \"%s\" has no installation script nor update path for version \"%s\"",
pcontrol->name, versionName)));
/* Otherwise, install best starting point and then upgrade */
versionName = evi_start->name;
}
/* Otherwise, install best starting point and then upgrade */
versionName = evi_start->name;
}
/*
* Fetch control parameters for installation target version

View File

@ -1417,7 +1417,7 @@ CreateCast(CreateCastStmt *stmt)
char castmethod;
HeapTuple tuple;
AclResult aclresult;
ObjectAddress myself;
ObjectAddress myself;
sourcetypeid = typenameTypeId(NULL, stmt->sourcetype);
targettypeid = typenameTypeId(NULL, stmt->targettype);

View File

@ -338,7 +338,7 @@ DefineOpClass(CreateOpClassStmt *stmt)
opfamilyoid, /* oid of containing opfamily */
opclassoid; /* oid of opclass we create */
int maxOpNumber, /* amstrategies value */
optsProcNumber, /* amoptsprocnum value */
optsProcNumber, /* amoptsprocnum value */
maxProcNumber; /* amsupport value */
bool amstorage; /* amstorage flag */
List *operators; /* OpFamilyMember list for operators */
@ -779,7 +779,7 @@ AlterOpFamily(AlterOpFamilyStmt *stmt)
Oid amoid, /* our AM's oid */
opfamilyoid; /* oid of opfamily */
int maxOpNumber, /* amstrategies value */
optsProcNumber, /* amopclassopts value */
optsProcNumber, /* amopclassopts value */
maxProcNumber; /* amsupport value */
HeapTuple tup;
Form_pg_am amform;
@ -1252,6 +1252,7 @@ assignProcTypes(OpFamilyMember *member, Oid amoid, Oid typeoid,
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
errmsg("btree equal image functions must return boolean")));
/*
* pg_amproc functions are indexed by (lefttype, righttype), but
* an equalimage function can only be called at CREATE INDEX time.

View File

@ -322,8 +322,8 @@ AlterPublicationOptions(AlterPublicationStmt *stmt, Relation rel,
* invalidate all partitions contained in the respective partition
* trees, not just those explicitly mentioned in the publication.
*/
List *relids = GetPublicationRelations(pubform->oid,
PUBLICATION_PART_ALL);
List *relids = GetPublicationRelations(pubform->oid,
PUBLICATION_PART_ALL);
/*
* We don't want to send too many individual messages, at some point
@ -380,8 +380,8 @@ AlterPublicationTables(AlterPublicationStmt *stmt, Relation rel,
PublicationDropTables(pubid, rels, false);
else /* DEFELEM_SET */
{
List *oldrelids = GetPublicationRelations(pubid,
PUBLICATION_PART_ROOT);
List *oldrelids = GetPublicationRelations(pubid,
PUBLICATION_PART_ROOT);
List *delrels = NIL;
ListCell *oldlc;

View File

@ -431,7 +431,7 @@ AlterStatistics(AlterStatsStmt *stmt)
Datum repl_val[Natts_pg_statistic_ext];
bool repl_null[Natts_pg_statistic_ext];
bool repl_repl[Natts_pg_statistic_ext];
ObjectAddress address;
ObjectAddress address;
int newtarget = stmt->stxstattarget;
/* Limit statistics target to a sane range */
@ -455,9 +455,9 @@ AlterStatistics(AlterStatsStmt *stmt)
stxoid = get_statistics_object_oid(stmt->defnames, stmt->missing_ok);
/*
* If we got here and the OID is not valid, it means the statistics
* does not exist, but the command specified IF EXISTS. So report
* this as a simple NOTICE and we're done.
* If we got here and the OID is not valid, it means the statistics does
* not exist, but the command specified IF EXISTS. So report this as a
* simple NOTICE and we're done.
*/
if (!OidIsValid(stxoid))
{

View File

@ -177,7 +177,7 @@ typedef struct AlteredTableInfo
List *changedIndexOids; /* OIDs of indexes to rebuild */
List *changedIndexDefs; /* string definitions of same */
char *replicaIdentityIndex; /* index to reset as REPLICA IDENTITY */
char *clusterOnIndex; /* index to use for CLUSTER */
char *clusterOnIndex; /* index to use for CLUSTER */
} AlteredTableInfo;
/* Struct describing one new constraint to check in Phase 3 scan */
@ -1265,9 +1265,9 @@ RemoveRelations(DropStmt *drop)
if (drop->concurrent)
{
/*
* Note that for temporary relations this lock may get upgraded
* later on, but as no other session can access a temporary
* relation, this is actually fine.
* Note that for temporary relations this lock may get upgraded later
* on, but as no other session can access a temporary relation, this
* is actually fine.
*/
lockmode = ShareUpdateExclusiveLock;
Assert(drop->removeType == OBJECT_INDEX);
@ -1620,10 +1620,10 @@ ExecuteTruncate(TruncateStmt *stmt)
}
/*
* Inherited TRUNCATE commands perform access
* permission checks on the parent table only.
* So we skip checking the children's permissions
* and don't call truncate_check_perms() here.
* Inherited TRUNCATE commands perform access permission
* checks on the parent table only. So we skip checking the
* children's permissions and don't call
* truncate_check_perms() here.
*/
truncate_check_rel(RelationGetRelid(rel), rel->rd_rel);
truncate_check_activity(rel);
@ -2650,6 +2650,7 @@ MergeAttributes(List *schema, List *supers, char relpersistence,
errmsg("column \"%s\" inherits from generated column but specifies identity",
def->colname)));
}
/*
* If the parent column is not generated, then take whatever
* the child column definition says.
@ -7500,8 +7501,8 @@ ATExecSetStorage(Relation rel, const char *colName, Node *newValue, LOCKMODE loc
*/
foreach(lc, RelationGetIndexList(rel))
{
Oid indexoid = lfirst_oid(lc);
Relation indrel;
Oid indexoid = lfirst_oid(lc);
Relation indrel;
AttrNumber indattnum = 0;
indrel = index_open(indexoid, lockmode);
@ -16993,7 +16994,7 @@ static void
DropClonedTriggersFromPartition(Oid partitionId)
{
ScanKeyData skey;
SysScanDesc scan;
SysScanDesc scan;
HeapTuple trigtup;
Relation tgrel;
ObjectAddresses *objects;

View File

@ -2240,8 +2240,8 @@ ExecBRInsertTriggers(EState *estate, ResultRelInfo *relinfo,
/*
* After a tuple in a partition goes through a trigger, the user
* could have changed the partition key enough that the tuple
* no longer fits the partition. Verify that.
* could have changed the partition key enough that the tuple no
* longer fits the partition. Verify that.
*/
if (trigger->tgisclone &&
!ExecPartitionCheck(relinfo, slot, estate, false))

View File

@ -3238,7 +3238,7 @@ ExecBuildAggTransCall(ExprState *state, AggState *aggstate,
bool nullcheck)
{
ExprContext *aggcontext;
int adjust_jumpnull = -1;
int adjust_jumpnull = -1;
if (ishash)
aggcontext = aggstate->hashcontext;

View File

@ -167,15 +167,16 @@ static Datum ExecJustAssignOuterVarVirt(ExprState *state, ExprContext *econtext,
static Datum ExecJustAssignScanVarVirt(ExprState *state, ExprContext *econtext, bool *isnull);
/* execution helper functions */
static pg_attribute_always_inline void
ExecAggPlainTransByVal(AggState *aggstate, AggStatePerTrans pertrans,
AggStatePerGroup pergroup,
ExprContext *aggcontext, int setno);
static pg_attribute_always_inline void
ExecAggPlainTransByRef(AggState *aggstate, AggStatePerTrans pertrans,
AggStatePerGroup pergroup,
ExprContext *aggcontext, int setno);
static pg_attribute_always_inline void ExecAggPlainTransByVal(AggState *aggstate,
AggStatePerTrans pertrans,
AggStatePerGroup pergroup,
ExprContext *aggcontext,
int setno);
static pg_attribute_always_inline void ExecAggPlainTransByRef(AggState *aggstate,
AggStatePerTrans pertrans,
AggStatePerGroup pergroup,
ExprContext *aggcontext,
int setno);
/*
* Prepare ExprState for interpreted execution.
@ -1611,8 +1612,8 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
EEO_CASE(EEOP_AGG_PLAIN_PERGROUP_NULLCHECK)
{
AggState *aggstate = castNode(AggState, state->parent);
AggStatePerGroup pergroup_allaggs = aggstate->all_pergroups
[op->d.agg_plain_pergroup_nullcheck.setoff];
AggStatePerGroup pergroup_allaggs =
aggstate->all_pergroups[op->d.agg_plain_pergroup_nullcheck.setoff];
if (pergroup_allaggs == NULL)
EEO_JUMP(op->d.agg_plain_pergroup_nullcheck.jumpnull);
@ -1636,9 +1637,8 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
{
AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
AggStatePerGroup pergroup = &aggstate->all_pergroups
[op->d.agg_trans.setoff]
[op->d.agg_trans.transno];
AggStatePerGroup pergroup =
&aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(pertrans->transtypeByVal);
@ -1665,9 +1665,8 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
{
AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
AggStatePerGroup pergroup = &aggstate->all_pergroups
[op->d.agg_trans.setoff]
[op->d.agg_trans.transno];
AggStatePerGroup pergroup =
&aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(pertrans->transtypeByVal);
@ -1684,9 +1683,8 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
{
AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
AggStatePerGroup pergroup = &aggstate->all_pergroups
[op->d.agg_trans.setoff]
[op->d.agg_trans.transno];
AggStatePerGroup pergroup =
&aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(pertrans->transtypeByVal);
@ -1702,9 +1700,8 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
{
AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
AggStatePerGroup pergroup = &aggstate->all_pergroups
[op->d.agg_trans.setoff]
[op->d.agg_trans.transno];
AggStatePerGroup pergroup =
&aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(!pertrans->transtypeByVal);
@ -1724,9 +1721,8 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
{
AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
AggStatePerGroup pergroup = &aggstate->all_pergroups
[op->d.agg_trans.setoff]
[op->d.agg_trans.transno];
AggStatePerGroup pergroup =
&aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(!pertrans->transtypeByVal);
@ -1742,9 +1738,8 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
{
AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
AggStatePerGroup pergroup = &aggstate->all_pergroups
[op->d.agg_trans.setoff]
[op->d.agg_trans.transno];
AggStatePerGroup pergroup =
&aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(!pertrans->transtypeByVal);
@ -4302,21 +4297,20 @@ ExecAggPlainTransByRef(AggState *aggstate, AggStatePerTrans pertrans,
newVal = FunctionCallInvoke(fcinfo);
/*
* For pass-by-ref datatype, must copy the new value into
* aggcontext and free the prior transValue. But if transfn
* returned a pointer to its first input, we don't need to do
* anything. Also, if transfn returned a pointer to a R/W
* expanded object that is already a child of the aggcontext,
* assume we can adopt that value without copying it.
* For pass-by-ref datatype, must copy the new value into aggcontext and
* free the prior transValue. But if transfn returned a pointer to its
* first input, we don't need to do anything. Also, if transfn returned a
* pointer to a R/W expanded object that is already a child of the
* aggcontext, assume we can adopt that value without copying it.
*
* It's safe to compare newVal with pergroup->transValue without
* regard for either being NULL, because ExecAggTransReparent()
* takes care to set transValue to 0 when NULL. Otherwise we could
* end up accidentally not reparenting, when the transValue has
* the same numerical value as newValue, despite being NULL. This
* is a somewhat hot path, making it undesirable to instead solve
* this with another branch for the common case of the transition
* function returning its (modified) input argument.
* It's safe to compare newVal with pergroup->transValue without regard
* for either being NULL, because ExecAggTransReparent() takes care to set
* transValue to 0 when NULL. Otherwise we could end up accidentally not
* reparenting, when the transValue has the same numerical value as
* newValue, despite being NULL. This is a somewhat hot path, making it
* undesirable to instead solve this with another branch for the common
* case of the transition function returning its (modified) input
* argument.
*/
if (DatumGetPointer(newVal) != DatumGetPointer(pergroup->transValue))
newVal = ExecAggTransReparent(aggstate, pertrans,

View File

@ -300,9 +300,9 @@ TupleHashEntry
LookupTupleHashEntry(TupleHashTable hashtable, TupleTableSlot *slot,
bool *isnew)
{
TupleHashEntry entry;
MemoryContext oldContext;
uint32 hash;
TupleHashEntry entry;
MemoryContext oldContext;
uint32 hash;
/* Need to run the hash functions in short-lived context */
oldContext = MemoryContextSwitchTo(hashtable->tempcxt);
@ -326,8 +326,8 @@ LookupTupleHashEntry(TupleHashTable hashtable, TupleTableSlot *slot,
uint32
TupleHashTableHash(TupleHashTable hashtable, TupleTableSlot *slot)
{
MemoryContext oldContext;
uint32 hash;
MemoryContext oldContext;
uint32 hash;
hashtable->inputslot = slot;
hashtable->in_hash_funcs = hashtable->tab_hash_funcs;
@ -350,8 +350,8 @@ TupleHashEntry
LookupTupleHashEntryHash(TupleHashTable hashtable, TupleTableSlot *slot,
bool *isnew, uint32 hash)
{
TupleHashEntry entry;
MemoryContext oldContext;
TupleHashEntry entry;
MemoryContext oldContext;
/* Need to run the hash functions in short-lived context */
oldContext = MemoryContextSwitchTo(hashtable->tempcxt);

View File

@ -259,7 +259,7 @@ ExecMakeTableFunctionResult(SetExprState *setexpr,
if (first_time)
{
MemoryContext oldcontext =
MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
tupstore = tuplestore_begin_heap(randomAccess, false, work_mem);
rsinfo.setResult = tupstore;
@ -289,7 +289,7 @@ ExecMakeTableFunctionResult(SetExprState *setexpr,
if (tupdesc == NULL)
{
MemoryContext oldcontext =
MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
/*
* This is the first non-NULL result from the
@ -384,7 +384,7 @@ no_function_result:
if (rsinfo.setResult == NULL)
{
MemoryContext oldcontext =
MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
tupstore = tuplestore_begin_heap(randomAccess, false, work_mem);
rsinfo.setResult = tupstore;

View File

@ -320,9 +320,9 @@ CreateExprContext(EState *estate)
ExprContext *
CreateWorkExprContext(EState *estate)
{
Size minContextSize = ALLOCSET_DEFAULT_MINSIZE;
Size initBlockSize = ALLOCSET_DEFAULT_INITSIZE;
Size maxBlockSize = ALLOCSET_DEFAULT_MAXSIZE;
Size minContextSize = ALLOCSET_DEFAULT_MINSIZE;
Size initBlockSize = ALLOCSET_DEFAULT_INITSIZE;
Size maxBlockSize = ALLOCSET_DEFAULT_MAXSIZE;
/* choose the maxBlockSize to be no larger than 1/16 of work_mem */
while (16 * maxBlockSize > work_mem * 1024L)

View File

@ -317,11 +317,11 @@
*/
typedef struct HashTapeInfo
{
LogicalTapeSet *tapeset;
int ntapes;
int *freetapes;
int nfreetapes;
int freetapes_alloc;
LogicalTapeSet *tapeset;
int ntapes;
int *freetapes;
int nfreetapes;
int freetapes_alloc;
} HashTapeInfo;
/*
@ -336,11 +336,11 @@ typedef struct HashTapeInfo
typedef struct HashAggSpill
{
LogicalTapeSet *tapeset; /* borrowed reference to tape set */
int npartitions; /* number of partitions */
int *partitions; /* spill partition tape numbers */
int64 *ntuples; /* number of tuples in each partition */
uint32 mask; /* mask to find partition from hash value */
int shift; /* after masking, shift by this amount */
int npartitions; /* number of partitions */
int *partitions; /* spill partition tape numbers */
int64 *ntuples; /* number of tuples in each partition */
uint32 mask; /* mask to find partition from hash value */
int shift; /* after masking, shift by this amount */
} HashAggSpill;
/*
@ -354,11 +354,11 @@ typedef struct HashAggSpill
*/
typedef struct HashAggBatch
{
int setno; /* grouping set */
int used_bits; /* number of bits of hash already used */
LogicalTapeSet *tapeset; /* borrowed reference to tape set */
int input_tapenum; /* input partition tape */
int64 input_tuples; /* number of tuples in this batch */
int setno; /* grouping set */
int used_bits; /* number of bits of hash already used */
LogicalTapeSet *tapeset; /* borrowed reference to tape set */
int input_tapenum; /* input partition tape */
int64 input_tuples; /* number of tuples in this batch */
} HashAggBatch;
static void select_current_set(AggState *aggstate, int setno, bool is_hash);
@ -402,10 +402,10 @@ static void hashagg_recompile_expressions(AggState *aggstate, bool minslot,
static long hash_choose_num_buckets(double hashentrysize,
long estimated_nbuckets,
Size memory);
static int hash_choose_num_partitions(uint64 input_groups,
double hashentrysize,
int used_bits,
int *log2_npartittions);
static int hash_choose_num_partitions(uint64 input_groups,
double hashentrysize,
int used_bits,
int *log2_npartittions);
static AggStatePerGroup lookup_hash_entry(AggState *aggstate, uint32 hash,
bool *in_hash_table);
static void lookup_hash_entries(AggState *aggstate);
@ -786,14 +786,14 @@ advance_transition_function(AggState *aggstate,
* pointer to a R/W expanded object that is already a child of the
* aggcontext, assume we can adopt that value without copying it.
*
* It's safe to compare newVal with pergroup->transValue without
* regard for either being NULL, because ExecAggTransReparent()
* takes care to set transValue to 0 when NULL. Otherwise we could
* end up accidentally not reparenting, when the transValue has
* the same numerical value as newValue, despite being NULL. This
* is a somewhat hot path, making it undesirable to instead solve
* this with another branch for the common case of the transition
* function returning its (modified) input argument.
* It's safe to compare newVal with pergroup->transValue without regard
* for either being NULL, because ExecAggTransReparent() takes care to set
* transValue to 0 when NULL. Otherwise we could end up accidentally not
* reparenting, when the transValue has the same numerical value as
* newValue, despite being NULL. This is a somewhat hot path, making it
* undesirable to instead solve this with another branch for the common
* case of the transition function returning its (modified) input
* argument.
*/
if (!pertrans->transtypeByVal &&
DatumGetPointer(newVal) != DatumGetPointer(pergroupstate->transValue))
@ -1206,7 +1206,7 @@ prepare_hash_slot(AggState *aggstate)
TupleTableSlot *inputslot = aggstate->tmpcontext->ecxt_outertuple;
AggStatePerHash perhash = &aggstate->perhash[aggstate->current_set];
TupleTableSlot *hashslot = perhash->hashslot;
int i;
int i;
/* transfer just the needed columns into hashslot */
slot_getsomeattrs(inputslot, perhash->largestGrpColIdx);
@ -1438,13 +1438,13 @@ find_unaggregated_cols_walker(Node *node, Bitmapset **colnos)
static void
build_hash_tables(AggState *aggstate)
{
int setno;
int setno;
for (setno = 0; setno < aggstate->num_hashes; ++setno)
{
AggStatePerHash perhash = &aggstate->perhash[setno];
long nbuckets;
Size memory;
long nbuckets;
Size memory;
if (perhash->hashtable != NULL)
{
@ -1457,8 +1457,9 @@ build_hash_tables(AggState *aggstate)
memory = aggstate->hash_mem_limit / aggstate->num_hashes;
/* choose reasonable number of buckets per hashtable */
nbuckets = hash_choose_num_buckets(
aggstate->hashentrysize, perhash->aggnode->numGroups, memory);
nbuckets = hash_choose_num_buckets(aggstate->hashentrysize,
perhash->aggnode->numGroups,
memory);
build_hash_table(aggstate, setno, nbuckets);
}
@ -1473,10 +1474,10 @@ static void
build_hash_table(AggState *aggstate, int setno, long nbuckets)
{
AggStatePerHash perhash = &aggstate->perhash[setno];
MemoryContext metacxt = aggstate->hash_metacxt;
MemoryContext hashcxt = aggstate->hashcontext->ecxt_per_tuple_memory;
MemoryContext tmpcxt = aggstate->tmpcontext->ecxt_per_tuple_memory;
Size additionalsize;
MemoryContext metacxt = aggstate->hash_metacxt;
MemoryContext hashcxt = aggstate->hashcontext->ecxt_per_tuple_memory;
MemoryContext tmpcxt = aggstate->tmpcontext->ecxt_per_tuple_memory;
Size additionalsize;
Assert(aggstate->aggstrategy == AGG_HASHED ||
aggstate->aggstrategy == AGG_MIXED);
@ -1489,20 +1490,19 @@ build_hash_table(AggState *aggstate, int setno, long nbuckets)
*/
additionalsize = aggstate->numtrans * sizeof(AggStatePerGroupData);
perhash->hashtable = BuildTupleHashTableExt(
&aggstate->ss.ps,
perhash->hashslot->tts_tupleDescriptor,
perhash->numCols,
perhash->hashGrpColIdxHash,
perhash->eqfuncoids,
perhash->hashfunctions,
perhash->aggnode->grpCollations,
nbuckets,
additionalsize,
metacxt,
hashcxt,
tmpcxt,
DO_AGGSPLIT_SKIPFINAL(aggstate->aggsplit));
perhash->hashtable = BuildTupleHashTableExt(&aggstate->ss.ps,
perhash->hashslot->tts_tupleDescriptor,
perhash->numCols,
perhash->hashGrpColIdxHash,
perhash->eqfuncoids,
perhash->hashfunctions,
perhash->aggnode->grpCollations,
nbuckets,
additionalsize,
metacxt,
hashcxt,
tmpcxt,
DO_AGGSPLIT_SKIPFINAL(aggstate->aggsplit));
}
/*
@ -1648,12 +1648,12 @@ find_hash_columns(AggState *aggstate)
Size
hash_agg_entry_size(int numTrans, Size tupleWidth, Size transitionSpace)
{
Size tupleChunkSize;
Size pergroupChunkSize;
Size transitionChunkSize;
Size tupleSize = (MAXALIGN(SizeofMinimalTupleHeader) +
tupleWidth);
Size pergroupSize = numTrans * sizeof(AggStatePerGroupData);
Size tupleChunkSize;
Size pergroupChunkSize;
Size transitionChunkSize;
Size tupleSize = (MAXALIGN(SizeofMinimalTupleHeader) +
tupleWidth);
Size pergroupSize = numTrans * sizeof(AggStatePerGroupData);
tupleChunkSize = CHUNKHDRSZ + tupleSize;
@ -1695,24 +1695,24 @@ hash_agg_entry_size(int numTrans, Size tupleWidth, Size transitionSpace)
static void
hashagg_recompile_expressions(AggState *aggstate, bool minslot, bool nullcheck)
{
AggStatePerPhase phase;
int i = minslot ? 1 : 0;
int j = nullcheck ? 1 : 0;
AggStatePerPhase phase;
int i = minslot ? 1 : 0;
int j = nullcheck ? 1 : 0;
Assert(aggstate->aggstrategy == AGG_HASHED ||
aggstate->aggstrategy == AGG_MIXED);
if (aggstate->aggstrategy == AGG_HASHED)
phase = &aggstate->phases[0];
else /* AGG_MIXED */
else /* AGG_MIXED */
phase = &aggstate->phases[1];
if (phase->evaltrans_cache[i][j] == NULL)
{
const TupleTableSlotOps *outerops = aggstate->ss.ps.outerops;
bool outerfixed = aggstate->ss.ps.outeropsfixed;
bool dohash = true;
bool dosort;
const TupleTableSlotOps *outerops = aggstate->ss.ps.outerops;
bool outerfixed = aggstate->ss.ps.outeropsfixed;
bool dohash = true;
bool dosort;
dosort = aggstate->aggstrategy == AGG_MIXED ? true : false;
@ -1723,8 +1723,9 @@ hashagg_recompile_expressions(AggState *aggstate, bool minslot, bool nullcheck)
aggstate->ss.ps.outeropsfixed = true;
}
phase->evaltrans_cache[i][j] = ExecBuildAggTrans(
aggstate, phase, dosort, dohash, nullcheck);
phase->evaltrans_cache[i][j] = ExecBuildAggTrans(aggstate, phase,
dosort, dohash,
nullcheck);
/* change back */
aggstate->ss.ps.outerops = outerops;
@ -1747,8 +1748,8 @@ hash_agg_set_limits(double hashentrysize, uint64 input_groups, int used_bits,
Size *mem_limit, uint64 *ngroups_limit,
int *num_partitions)
{
int npartitions;
Size partition_mem;
int npartitions;
Size partition_mem;
/* if not expected to spill, use all of work_mem */
if (input_groups * hashentrysize < work_mem * 1024L)
@ -1762,9 +1763,8 @@ hash_agg_set_limits(double hashentrysize, uint64 input_groups, int used_bits,
/*
* Calculate expected memory requirements for spilling, which is the size
* of the buffers needed for all the tapes that need to be open at
* once. Then, subtract that from the memory available for holding hash
* tables.
* of the buffers needed for all the tapes that need to be open at once.
* Then, subtract that from the memory available for holding hash tables.
*/
npartitions = hash_choose_num_partitions(input_groups,
hashentrysize,
@ -1803,11 +1803,11 @@ hash_agg_set_limits(double hashentrysize, uint64 input_groups, int used_bits,
static void
hash_agg_check_limits(AggState *aggstate)
{
uint64 ngroups = aggstate->hash_ngroups_current;
Size meta_mem = MemoryContextMemAllocated(
aggstate->hash_metacxt, true);
Size hash_mem = MemoryContextMemAllocated(
aggstate->hashcontext->ecxt_per_tuple_memory, true);
uint64 ngroups = aggstate->hash_ngroups_current;
Size meta_mem = MemoryContextMemAllocated(aggstate->hash_metacxt,
true);
Size hash_mem = MemoryContextMemAllocated(aggstate->hashcontext->ecxt_per_tuple_memory,
true);
/*
* Don't spill unless there's at least one group in the hash table so we
@ -1841,13 +1841,12 @@ hash_agg_enter_spill_mode(AggState *aggstate)
hashagg_tapeinfo_init(aggstate);
aggstate->hash_spills = palloc(
sizeof(HashAggSpill) * aggstate->num_hashes);
aggstate->hash_spills = palloc(sizeof(HashAggSpill) * aggstate->num_hashes);
for (int setno = 0; setno < aggstate->num_hashes; setno++)
{
AggStatePerHash perhash = &aggstate->perhash[setno];
HashAggSpill *spill = &aggstate->hash_spills[setno];
AggStatePerHash perhash = &aggstate->perhash[setno];
HashAggSpill *spill = &aggstate->hash_spills[setno];
hashagg_spill_init(spill, aggstate->hash_tapeinfo, 0,
perhash->aggnode->numGroups,
@ -1865,10 +1864,10 @@ hash_agg_enter_spill_mode(AggState *aggstate)
static void
hash_agg_update_metrics(AggState *aggstate, bool from_tape, int npartitions)
{
Size meta_mem;
Size hash_mem;
Size buffer_mem;
Size total_mem;
Size meta_mem;
Size hash_mem;
Size buffer_mem;
Size total_mem;
if (aggstate->aggstrategy != AGG_MIXED &&
aggstate->aggstrategy != AGG_HASHED)
@ -1878,8 +1877,7 @@ hash_agg_update_metrics(AggState *aggstate, bool from_tape, int npartitions)
meta_mem = MemoryContextMemAllocated(aggstate->hash_metacxt, true);
/* memory for the group keys and transition states */
hash_mem = MemoryContextMemAllocated(
aggstate->hashcontext->ecxt_per_tuple_memory, true);
hash_mem = MemoryContextMemAllocated(aggstate->hashcontext->ecxt_per_tuple_memory, true);
/* memory for read/write tape buffers, if spilled */
buffer_mem = npartitions * HASHAGG_WRITE_BUFFER_SIZE;
@ -1894,8 +1892,7 @@ hash_agg_update_metrics(AggState *aggstate, bool from_tape, int npartitions)
/* update disk usage */
if (aggstate->hash_tapeinfo != NULL)
{
uint64 disk_used = LogicalTapeSetBlocks(
aggstate->hash_tapeinfo->tapeset) * (BLCKSZ / 1024);
uint64 disk_used = LogicalTapeSetBlocks(aggstate->hash_tapeinfo->tapeset) * (BLCKSZ / 1024);
if (aggstate->hash_disk_used < disk_used)
aggstate->hash_disk_used = disk_used;
@ -1906,7 +1903,7 @@ hash_agg_update_metrics(AggState *aggstate, bool from_tape, int npartitions)
{
aggstate->hashentrysize =
sizeof(TupleHashEntryData) +
(hash_mem / (double)aggstate->hash_ngroups_current);
(hash_mem / (double) aggstate->hash_ngroups_current);
}
}
@ -1916,8 +1913,8 @@ hash_agg_update_metrics(AggState *aggstate, bool from_tape, int npartitions)
static long
hash_choose_num_buckets(double hashentrysize, long ngroups, Size memory)
{
long max_nbuckets;
long nbuckets = ngroups;
long max_nbuckets;
long nbuckets = ngroups;
max_nbuckets = memory / hashentrysize;
@ -1943,10 +1940,10 @@ static int
hash_choose_num_partitions(uint64 input_groups, double hashentrysize,
int used_bits, int *log2_npartitions)
{
Size mem_wanted;
int partition_limit;
int npartitions;
int partition_bits;
Size mem_wanted;
int partition_limit;
int npartitions;
int partition_bits;
/*
* Avoid creating so many partitions that the memory requirements of the
@ -2005,8 +2002,8 @@ lookup_hash_entry(AggState *aggstate, uint32 hash, bool *in_hash_table)
AggStatePerHash perhash = &aggstate->perhash[aggstate->current_set];
TupleTableSlot *hashslot = perhash->hashslot;
TupleHashEntryData *entry;
bool isnew = false;
bool *p_isnew;
bool isnew = false;
bool *p_isnew;
/* if hash table already spilled, don't create new entries */
p_isnew = aggstate->hash_spill_mode ? NULL : &isnew;
@ -2025,8 +2022,8 @@ lookup_hash_entry(AggState *aggstate, uint32 hash, bool *in_hash_table)
if (isnew)
{
AggStatePerGroup pergroup;
int transno;
AggStatePerGroup pergroup;
int transno;
aggstate->hash_ngroups_current++;
hash_agg_check_limits(aggstate);
@ -2083,9 +2080,9 @@ lookup_hash_entries(AggState *aggstate)
for (setno = 0; setno < aggstate->num_hashes; setno++)
{
AggStatePerHash perhash = &aggstate->perhash[setno];
uint32 hash;
bool in_hash_table;
AggStatePerHash perhash = &aggstate->perhash[setno];
uint32 hash;
bool in_hash_table;
select_current_set(aggstate, setno, true);
prepare_hash_slot(aggstate);
@ -2095,8 +2092,8 @@ lookup_hash_entries(AggState *aggstate)
/* check to see if we need to spill the tuple for this grouping set */
if (!in_hash_table)
{
HashAggSpill *spill = &aggstate->hash_spills[setno];
TupleTableSlot *slot = aggstate->tmpcontext->ecxt_outertuple;
HashAggSpill *spill = &aggstate->hash_spills[setno];
TupleTableSlot *slot = aggstate->tmpcontext->ecxt_outertuple;
if (spill->partitions == NULL)
hashagg_spill_init(spill, aggstate->hash_tapeinfo, 0,
@ -2560,11 +2557,11 @@ agg_fill_hash_table(AggState *aggstate)
static bool
agg_refill_hash_table(AggState *aggstate)
{
HashAggBatch *batch;
HashAggSpill spill;
HashTapeInfo *tapeinfo = aggstate->hash_tapeinfo;
uint64 ngroups_estimate;
bool spill_initialized = false;
HashAggBatch *batch;
HashAggSpill spill;
HashTapeInfo *tapeinfo = aggstate->hash_tapeinfo;
uint64 ngroups_estimate;
bool spill_initialized = false;
if (aggstate->hash_batches == NIL)
return false;
@ -2623,11 +2620,12 @@ agg_refill_hash_table(AggState *aggstate)
LogicalTapeRewindForRead(tapeinfo->tapeset, batch->input_tapenum,
HASHAGG_READ_BUFFER_SIZE);
for (;;) {
TupleTableSlot *slot = aggstate->hash_spill_slot;
MinimalTuple tuple;
uint32 hash;
bool in_hash_table;
for (;;)
{
TupleTableSlot *slot = aggstate->hash_spill_slot;
MinimalTuple tuple;
uint32 hash;
bool in_hash_table;
CHECK_FOR_INTERRUPTS();
@ -2639,8 +2637,8 @@ agg_refill_hash_table(AggState *aggstate)
aggstate->tmpcontext->ecxt_outertuple = slot;
prepare_hash_slot(aggstate);
aggstate->hash_pergroup[batch->setno] = lookup_hash_entry(
aggstate, hash, &in_hash_table);
aggstate->hash_pergroup[batch->setno] =
lookup_hash_entry(aggstate, hash, &in_hash_table);
if (in_hash_table)
{
@ -2657,7 +2655,7 @@ agg_refill_hash_table(AggState *aggstate)
*/
spill_initialized = true;
hashagg_spill_init(&spill, tapeinfo, batch->used_bits,
ngroups_estimate, aggstate->hashentrysize);
ngroups_estimate, aggstate->hashentrysize);
}
/* no memory for a new group, spill */
hashagg_spill_tuple(&spill, slot, hash);
@ -2851,8 +2849,8 @@ agg_retrieve_hash_table_in_memory(AggState *aggstate)
static void
hashagg_tapeinfo_init(AggState *aggstate)
{
HashTapeInfo *tapeinfo = palloc(sizeof(HashTapeInfo));
int init_tapes = 16; /* expanded dynamically */
HashTapeInfo *tapeinfo = palloc(sizeof(HashTapeInfo));
int init_tapes = 16; /* expanded dynamically */
tapeinfo->tapeset = LogicalTapeSetCreate(init_tapes, NULL, NULL, -1);
tapeinfo->ntapes = init_tapes;
@ -2873,7 +2871,7 @@ static void
hashagg_tapeinfo_assign(HashTapeInfo *tapeinfo, int *partitions,
int npartitions)
{
int partidx = 0;
int partidx = 0;
/* use free tapes if available */
while (partidx < npartitions && tapeinfo->nfreetapes > 0)
@ -2899,8 +2897,8 @@ hashagg_tapeinfo_release(HashTapeInfo *tapeinfo, int tapenum)
if (tapeinfo->freetapes_alloc == tapeinfo->nfreetapes)
{
tapeinfo->freetapes_alloc <<= 1;
tapeinfo->freetapes = repalloc(
tapeinfo->freetapes, tapeinfo->freetapes_alloc * sizeof(int));
tapeinfo->freetapes = repalloc(tapeinfo->freetapes,
tapeinfo->freetapes_alloc * sizeof(int));
}
tapeinfo->freetapes[tapeinfo->nfreetapes++] = tapenum;
}
@ -2915,11 +2913,11 @@ static void
hashagg_spill_init(HashAggSpill *spill, HashTapeInfo *tapeinfo, int used_bits,
uint64 input_groups, double hashentrysize)
{
int npartitions;
int partition_bits;
int npartitions;
int partition_bits;
npartitions = hash_choose_num_partitions(
input_groups, hashentrysize, used_bits, &partition_bits);
npartitions = hash_choose_num_partitions(input_groups, hashentrysize,
used_bits, &partition_bits);
spill->partitions = palloc0(sizeof(int) * npartitions);
spill->ntuples = palloc0(sizeof(int64) * npartitions);
@ -2941,12 +2939,12 @@ hashagg_spill_init(HashAggSpill *spill, HashTapeInfo *tapeinfo, int used_bits,
static Size
hashagg_spill_tuple(HashAggSpill *spill, TupleTableSlot *slot, uint32 hash)
{
LogicalTapeSet *tapeset = spill->tapeset;
int partition;
MinimalTuple tuple;
int tapenum;
int total_written = 0;
bool shouldFree;
LogicalTapeSet *tapeset = spill->tapeset;
int partition;
MinimalTuple tuple;
int tapenum;
int total_written = 0;
bool shouldFree;
Assert(spill->partitions != NULL);
@ -2999,11 +2997,11 @@ static MinimalTuple
hashagg_batch_read(HashAggBatch *batch, uint32 *hashp)
{
LogicalTapeSet *tapeset = batch->tapeset;
int tapenum = batch->input_tapenum;
MinimalTuple tuple;
uint32 t_len;
size_t nread;
uint32 hash;
int tapenum = batch->input_tapenum;
MinimalTuple tuple;
uint32 t_len;
size_t nread;
uint32 hash;
nread = LogicalTapeRead(tapeset, tapenum, &hash, sizeof(uint32));
if (nread == 0)
@ -3027,7 +3025,7 @@ hashagg_batch_read(HashAggBatch *batch, uint32 *hashp)
tuple->t_len = t_len;
nread = LogicalTapeRead(tapeset, tapenum,
(void *)((char *)tuple + sizeof(uint32)),
(void *) ((char *) tuple + sizeof(uint32)),
t_len - sizeof(uint32));
if (nread != t_len - sizeof(uint32))
ereport(ERROR,
@ -3048,14 +3046,15 @@ hashagg_batch_read(HashAggBatch *batch, uint32 *hashp)
static void
hashagg_finish_initial_spills(AggState *aggstate)
{
int setno;
int total_npartitions = 0;
int setno;
int total_npartitions = 0;
if (aggstate->hash_spills != NULL)
{
for (setno = 0; setno < aggstate->num_hashes; setno++)
{
HashAggSpill *spill = &aggstate->hash_spills[setno];
total_npartitions += spill->npartitions;
hashagg_spill_finish(aggstate, spill, setno);
}
@ -3081,16 +3080,16 @@ hashagg_finish_initial_spills(AggState *aggstate)
static void
hashagg_spill_finish(AggState *aggstate, HashAggSpill *spill, int setno)
{
int i;
int used_bits = 32 - spill->shift;
int i;
int used_bits = 32 - spill->shift;
if (spill->npartitions == 0)
return; /* didn't spill */
return; /* didn't spill */
for (i = 0; i < spill->npartitions; i++)
{
int tapenum = spill->partitions[i];
HashAggBatch *new_batch;
int tapenum = spill->partitions[i];
HashAggBatch *new_batch;
/* if the partition is empty, don't create a new batch of work */
if (spill->ntuples[i] == 0)
@ -3113,16 +3112,17 @@ hashagg_spill_finish(AggState *aggstate, HashAggSpill *spill, int setno)
static void
hashagg_reset_spill_state(AggState *aggstate)
{
ListCell *lc;
ListCell *lc;
/* free spills from initial pass */
if (aggstate->hash_spills != NULL)
{
int setno;
int setno;
for (setno = 0; setno < aggstate->num_hashes; setno++)
{
HashAggSpill *spill = &aggstate->hash_spills[setno];
pfree(spill->ntuples);
pfree(spill->partitions);
}
@ -3133,7 +3133,8 @@ hashagg_reset_spill_state(AggState *aggstate)
/* free batches */
foreach(lc, aggstate->hash_batches)
{
HashAggBatch *batch = (HashAggBatch*) lfirst(lc);
HashAggBatch *batch = (HashAggBatch *) lfirst(lc);
pfree(batch);
}
list_free(aggstate->hash_batches);
@ -3142,7 +3143,7 @@ hashagg_reset_spill_state(AggState *aggstate)
/* close tape set */
if (aggstate->hash_tapeinfo != NULL)
{
HashTapeInfo *tapeinfo = aggstate->hash_tapeinfo;
HashTapeInfo *tapeinfo = aggstate->hash_tapeinfo;
LogicalTapeSetClose(tapeinfo->tapeset);
pfree(tapeinfo->freetapes);
@ -3558,22 +3559,22 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
*/
if (use_hashing)
{
Plan *outerplan = outerPlan(node);
uint64 totalGroups = 0;
int i;
Plan *outerplan = outerPlan(node);
uint64 totalGroups = 0;
int i;
aggstate->hash_metacxt = AllocSetContextCreate(
aggstate->ss.ps.state->es_query_cxt,
"HashAgg meta context",
ALLOCSET_DEFAULT_SIZES);
aggstate->hash_spill_slot = ExecInitExtraTupleSlot(
estate, scanDesc, &TTSOpsMinimalTuple);
aggstate->hash_metacxt = AllocSetContextCreate(aggstate->ss.ps.state->es_query_cxt,
"HashAgg meta context",
ALLOCSET_DEFAULT_SIZES);
aggstate->hash_spill_slot = ExecInitExtraTupleSlot(estate, scanDesc,
&TTSOpsMinimalTuple);
/* this is an array of pointers, not structures */
aggstate->hash_pergroup = pergroups;
aggstate->hashentrysize = hash_agg_entry_size(
aggstate->numtrans, outerplan->plan_width, node->transitionSpace);
aggstate->hashentrysize = hash_agg_entry_size(aggstate->numtrans,
outerplan->plan_width,
node->transitionSpace);
/*
* Consider all of the grouping sets together when setting the limits

View File

@ -791,8 +791,8 @@ ExecInitBitmapHeapScan(BitmapHeapScan *node, EState *estate, int eflags)
ExecInitQual(node->bitmapqualorig, (PlanState *) scanstate);
/*
* Maximum number of prefetches for the tablespace if configured, otherwise
* the current value of the effective_io_concurrency GUC.
* Maximum number of prefetches for the tablespace if configured,
* otherwise the current value of the effective_io_concurrency GUC.
*/
scanstate->prefetch_maximum =
get_tablespace_io_concurrency(currentRelation->rd_rel->reltablespace);

View File

@ -97,17 +97,24 @@
* - groupName: the token fullsort or prefixsort
*/
#define INSTRUMENT_SORT_GROUP(node, groupName) \
if (node->ss.ps.instrument != NULL) \
{ \
if (node->shared_info && node->am_worker) \
do { \
if ((node)->ss.ps.instrument != NULL) \
{ \
Assert(IsParallelWorker()); \
Assert(ParallelWorkerNumber <= node->shared_info->num_workers); \
instrumentSortedGroup(&node->shared_info->sinfo[ParallelWorkerNumber].groupName##GroupInfo, node->groupName##_state); \
} else { \
instrumentSortedGroup(&node->incsort_info.groupName##GroupInfo, node->groupName##_state); \
if ((node)->shared_info && (node)->am_worker) \
{ \
Assert(IsParallelWorker()); \
Assert(ParallelWorkerNumber <= (node)->shared_info->num_workers); \
instrumentSortedGroup(&(node)->shared_info->sinfo[ParallelWorkerNumber].groupName##GroupInfo, \
(node)->groupName##_state); \
} \
else \
{ \
instrumentSortedGroup(&(node)->incsort_info.groupName##GroupInfo, \
(node)->groupName##_state); \
} \
} \
}
} while (0)
/* ----------------------------------------------------------------
* instrumentSortedGroup
@ -122,6 +129,7 @@ instrumentSortedGroup(IncrementalSortGroupInfo *groupInfo,
Tuplesortstate *sortState)
{
TuplesortInstrumentation sort_instr;
groupInfo->groupCount++;
tuplesort_get_stats(sortState, &sort_instr);
@ -444,7 +452,7 @@ switchToPresortedPrefixMode(PlanState *pstate)
SO1_printf("Sorting presorted prefix tuplesort with %ld tuples\n", nTuples);
tuplesort_performsort(node->prefixsort_state);
INSTRUMENT_SORT_GROUP(node, prefixsort)
INSTRUMENT_SORT_GROUP(node, prefixsort);
if (node->bounded)
{
@ -702,7 +710,7 @@ ExecIncrementalSort(PlanState *pstate)
SO1_printf("Sorting fullsort with %ld tuples\n", nTuples);
tuplesort_performsort(fullsort_state);
INSTRUMENT_SORT_GROUP(node, fullsort)
INSTRUMENT_SORT_GROUP(node, fullsort);
SO_printf("Setting execution_status to INCSORT_READFULLSORT (final tuple)\n");
node->execution_status = INCSORT_READFULLSORT;
@ -783,7 +791,7 @@ ExecIncrementalSort(PlanState *pstate)
nTuples);
tuplesort_performsort(fullsort_state);
INSTRUMENT_SORT_GROUP(node, fullsort)
INSTRUMENT_SORT_GROUP(node, fullsort);
SO_printf("Setting execution_status to INCSORT_READFULLSORT (found end of group)\n");
node->execution_status = INCSORT_READFULLSORT;
@ -792,8 +800,8 @@ ExecIncrementalSort(PlanState *pstate)
}
/*
* Unless we've already transitioned modes to reading from the full
* sort state, then we assume that having read at least
* Unless we've already transitioned modes to reading from the
* full sort state, then we assume that having read at least
* DEFAULT_MAX_FULL_SORT_GROUP_SIZE tuples means it's likely we're
* processing a large group of tuples all having equal prefix keys
* (but haven't yet found the final tuple in that prefix key
@ -823,7 +831,7 @@ ExecIncrementalSort(PlanState *pstate)
SO1_printf("Sorting fullsort tuplesort with %ld tuples\n", nTuples);
tuplesort_performsort(fullsort_state);
INSTRUMENT_SORT_GROUP(node, fullsort)
INSTRUMENT_SORT_GROUP(node, fullsort);
/*
* If the full sort tuplesort happened to switch into top-n
@ -849,8 +857,9 @@ ExecIncrementalSort(PlanState *pstate)
/*
* We might have multiple prefix key groups in the full sort
* state, so the mode transition function needs to know that it
* needs to move from the fullsort to presorted prefix sort.
* state, so the mode transition function needs to know that
* it needs to move from the fullsort to presorted prefix
* sort.
*/
node->n_fullsort_remaining = nTuples;
@ -936,7 +945,7 @@ ExecIncrementalSort(PlanState *pstate)
SO1_printf("Sorting presorted prefix tuplesort with >= %ld tuples\n", nTuples);
tuplesort_performsort(node->prefixsort_state);
INSTRUMENT_SORT_GROUP(node, prefixsort)
INSTRUMENT_SORT_GROUP(node, prefixsort);
SO_printf("Setting execution_status to INCSORT_READPREFIXSORT (found end of group)\n");
node->execution_status = INCSORT_READPREFIXSORT;
@ -986,9 +995,9 @@ ExecInitIncrementalSort(IncrementalSort *node, EState *estate, int eflags)
SO_printf("ExecInitIncrementalSort: initializing sort node\n");
/*
* Incremental sort can't be used with EXEC_FLAG_BACKWARD or EXEC_FLAG_MARK,
* because the current sort state contains only one sort batch rather than
* the full result set.
* Incremental sort can't be used with EXEC_FLAG_BACKWARD or
* EXEC_FLAG_MARK, because the current sort state contains only one sort
* batch rather than the full result set.
*/
Assert((eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)) == 0);
@ -1041,8 +1050,8 @@ ExecInitIncrementalSort(IncrementalSort *node, EState *estate, int eflags)
* Initialize child nodes.
*
* Incremental sort does not support backwards scans and mark/restore, so
* we don't bother removing the flags from eflags here. We allow passing
* a REWIND flag, because although incremental sort can't use it, the child
* we don't bother removing the flags from eflags here. We allow passing a
* REWIND flag, because although incremental sort can't use it, the child
* nodes may be able to do something more useful.
*/
outerPlanState(incrsortstate) = ExecInitNode(outerPlan(node), estate, eflags);
@ -1128,10 +1137,10 @@ ExecReScanIncrementalSort(IncrementalSortState *node)
* re-execute the sort along with the child node. Incremental sort itself
* can't do anything smarter, but maybe the child nodes can.
*
* In theory if we've only filled the full sort with one batch (and haven't
* reset it for a new batch yet) then we could efficiently rewind, but
* that seems a narrow enough case that it's not worth handling specially
* at this time.
* In theory if we've only filled the full sort with one batch (and
* haven't reset it for a new batch yet) then we could efficiently rewind,
* but that seems a narrow enough case that it's not worth handling
* specially at this time.
*/
/* must drop pointer to sort result tuple */
@ -1152,10 +1161,10 @@ ExecReScanIncrementalSort(IncrementalSortState *node)
/*
* If we've set up either of the sort states yet, we need to reset them.
* We could end them and null out the pointers, but there's no reason to
* repay the setup cost, and because ExecIncrementalSort guards
* presorted column functions by checking to see if the full sort state
* has been initialized yet, setting the sort states to null here might
* actually cause a leak.
* repay the setup cost, and because ExecIncrementalSort guards presorted
* column functions by checking to see if the full sort state has been
* initialized yet, setting the sort states to null here might actually
* cause a leak.
*/
if (node->fullsort_state != NULL)
{

View File

@ -144,7 +144,7 @@ TidListEval(TidScanState *tidstate)
if (tidstate->ss.ss_currentScanDesc == NULL)
tidstate->ss.ss_currentScanDesc =
table_beginscan_tid(tidstate->ss.ss_currentRelation,
tidstate->ss.ps.state->es_snapshot);
tidstate->ss.ps.state->es_snapshot);
scan = tidstate->ss.ss_currentScanDesc;
/*

View File

@ -2048,11 +2048,11 @@ llvm_compile_expr(ExprState *state)
case EEOP_AGG_PLAIN_PERGROUP_NULLCHECK:
{
int jumpnull;
LLVMValueRef v_aggstatep;
LLVMValueRef v_allpergroupsp;
LLVMValueRef v_pergroup_allaggs;
LLVMValueRef v_setoff;
int jumpnull;
LLVMValueRef v_aggstatep;
LLVMValueRef v_allpergroupsp;
LLVMValueRef v_pergroup_allaggs;
LLVMValueRef v_setoff;
jumpnull = op->d.agg_plain_pergroup_nullcheck.jumpnull;
@ -2060,28 +2060,23 @@ llvm_compile_expr(ExprState *state)
* pergroup_allaggs = aggstate->all_pergroups
* [op->d.agg_plain_pergroup_nullcheck.setoff];
*/
v_aggstatep = LLVMBuildBitCast(
b, v_parent, l_ptr(StructAggState), "");
v_aggstatep = LLVMBuildBitCast(b, v_parent,
l_ptr(StructAggState), "");
v_allpergroupsp = l_load_struct_gep(
b, v_aggstatep,
FIELDNO_AGGSTATE_ALL_PERGROUPS,
"aggstate.all_pergroups");
v_allpergroupsp = l_load_struct_gep(b, v_aggstatep,
FIELDNO_AGGSTATE_ALL_PERGROUPS,
"aggstate.all_pergroups");
v_setoff = l_int32_const(
op->d.agg_plain_pergroup_nullcheck.setoff);
v_setoff = l_int32_const(op->d.agg_plain_pergroup_nullcheck.setoff);
v_pergroup_allaggs = l_load_gep1(
b, v_allpergroupsp, v_setoff, "");
v_pergroup_allaggs = l_load_gep1(b, v_allpergroupsp, v_setoff, "");
LLVMBuildCondBr(
b,
LLVMBuildICmp(b, LLVMIntEQ,
LLVMBuildPtrToInt(
b, v_pergroup_allaggs, TypeSizeT, ""),
l_sizet_const(0), ""),
opblocks[jumpnull],
opblocks[opno + 1]);
LLVMBuildCondBr(b,
LLVMBuildICmp(b, LLVMIntEQ,
LLVMBuildPtrToInt(b, v_pergroup_allaggs, TypeSizeT, ""),
l_sizet_const(0), ""),
opblocks[jumpnull],
opblocks[opno + 1]);
break;
}

View File

@ -162,7 +162,7 @@ static char *build_server_final_message(scram_state *state);
static bool verify_client_proof(scram_state *state);
static bool verify_final_nonce(scram_state *state);
static void mock_scram_secret(const char *username, int *iterations,
char **salt, uint8 *stored_key, uint8 *server_key);
char **salt, uint8 *stored_key, uint8 *server_key);
static bool is_scram_printable(char *p);
static char *sanitize_char(char c);
static char *sanitize_str(const char *s);
@ -257,7 +257,7 @@ pg_be_scram_init(Port *port,
if (password_type == PASSWORD_TYPE_SCRAM_SHA_256)
{
if (parse_scram_secret(shadow_pass, &state->iterations, &state->salt,
state->StoredKey, state->ServerKey))
state->StoredKey, state->ServerKey))
got_secret = true;
else
{
@ -293,15 +293,15 @@ pg_be_scram_init(Port *port,
}
/*
* If the user did not have a valid SCRAM secret, we still go through
* the motions with a mock one, and fail as if the client supplied an
* If the user did not have a valid SCRAM secret, we still go through the
* motions with a mock one, and fail as if the client supplied an
* incorrect password. This is to avoid revealing information to an
* attacker.
*/
if (!got_secret)
{
mock_scram_secret(state->port->user_name, &state->iterations,
&state->salt, state->StoredKey, state->ServerKey);
&state->salt, state->StoredKey, state->ServerKey);
state->doomed = true;
}
@ -471,7 +471,7 @@ pg_be_scram_build_secret(const char *password)
errmsg("could not generate random salt")));
result = scram_build_secret(saltbuf, SCRAM_DEFAULT_SALT_LEN,
SCRAM_DEFAULT_ITERATIONS, password);
SCRAM_DEFAULT_ITERATIONS, password);
if (prep_password)
pfree(prep_password);
@ -500,7 +500,7 @@ scram_verify_plain_password(const char *username, const char *password,
pg_saslprep_rc rc;
if (!parse_scram_secret(secret, &iterations, &encoded_salt,
stored_key, server_key))
stored_key, server_key))
{
/*
* The password looked like a SCRAM secret, but could not be parsed.
@ -554,7 +554,7 @@ scram_verify_plain_password(const char *username, const char *password,
*/
bool
parse_scram_secret(const char *secret, int *iterations, char **salt,
uint8 *stored_key, uint8 *server_key)
uint8 *stored_key, uint8 *server_key)
{
char *v;
char *p;
@ -645,7 +645,7 @@ invalid_secret:
*/
static void
mock_scram_secret(const char *username, int *iterations, char **salt,
uint8 *stored_key, uint8 *server_key)
uint8 *stored_key, uint8 *server_key)
{
char *raw_salt;
char *encoded_salt;

View File

@ -46,7 +46,7 @@
#include "utils/memutils.h"
/* default init hook can be overridden by a shared library */
static void default_openssl_tls_init(SSL_CTX *context, bool isServerStart);
static void default_openssl_tls_init(SSL_CTX *context, bool isServerStart);
openssl_tls_init_hook_typ openssl_tls_init_hook = default_openssl_tls_init;
static int my_sock_read(BIO *h, char *buf, int size);
@ -122,7 +122,7 @@ be_tls_init(bool isServerStart)
/*
* Call init hook (usually to set password callback)
*/
(* openssl_tls_init_hook)(context, isServerStart);
(*openssl_tls_init_hook) (context, isServerStart);
/* used by the callback */
ssl_is_server_start = isServerStart;
@ -1341,6 +1341,7 @@ default_openssl_tls_init(SSL_CTX *context, bool isServerStart)
if (ssl_passphrase_command[0] && ssl_passphrase_command_supports_reload)
SSL_CTX_set_default_passwd_cb(context, ssl_external_passwd_cb);
else
/*
* If reloading and no external command is configured, override
* OpenSSL's default handling of passphrase-protected files,

View File

@ -98,7 +98,7 @@ get_password_type(const char *shadow_pass)
strspn(shadow_pass + 3, MD5_PASSWD_CHARSET) == MD5_PASSWD_LEN - 3)
return PASSWORD_TYPE_MD5;
if (parse_scram_secret(shadow_pass, &iterations, &encoded_salt,
stored_key, server_key))
stored_key, server_key))
return PASSWORD_TYPE_SCRAM_SHA_256;
return PASSWORD_TYPE_PLAINTEXT;
}

View File

@ -2751,13 +2751,14 @@ get_useful_pathkeys_for_relation(PlannerInfo *root, RelOptInfo *rel)
List *useful_pathkeys_list = NIL;
/*
* Considering query_pathkeys is always worth it, because it might allow us
* to avoid a total sort when we have a partially presorted path available.
* Considering query_pathkeys is always worth it, because it might allow
* us to avoid a total sort when we have a partially presorted path
* available.
*/
if (root->query_pathkeys)
{
ListCell *lc;
int npathkeys = 0; /* useful pathkeys */
int npathkeys = 0; /* useful pathkeys */
foreach(lc, root->query_pathkeys)
{
@ -2765,15 +2766,15 @@ get_useful_pathkeys_for_relation(PlannerInfo *root, RelOptInfo *rel)
EquivalenceClass *pathkey_ec = pathkey->pk_eclass;
/*
* We can only build an Incremental Sort for pathkeys which contain
* an EC member in the current relation, so ignore any suffix of the
* list as soon as we find a pathkey without an EC member the
* relation.
* We can only build an Incremental Sort for pathkeys which
* contain an EC member in the current relation, so ignore any
* suffix of the list as soon as we find a pathkey without an EC
* member the relation.
*
* By still returning the prefix of the pathkeys list that does meet
* criteria of EC membership in the current relation, we enable not
* just an incremental sort on the entirety of query_pathkeys but
* also incremental sort below a JOIN.
* By still returning the prefix of the pathkeys list that does
* meet criteria of EC membership in the current relation, we
* enable not just an incremental sort on the entirety of
* query_pathkeys but also incremental sort below a JOIN.
*/
if (!find_em_expr_for_rel(pathkey_ec, rel))
break;
@ -2782,9 +2783,9 @@ get_useful_pathkeys_for_relation(PlannerInfo *root, RelOptInfo *rel)
}
/*
* The whole query_pathkeys list matches, so append it directly, to allow
* comparing pathkeys easily by comparing list pointer. If we have to truncate
* the pathkeys, we gotta do a copy though.
* The whole query_pathkeys list matches, so append it directly, to
* allow comparing pathkeys easily by comparing list pointer. If we
* have to truncate the pathkeys, we gotta do a copy though.
*/
if (npathkeys == list_length(root->query_pathkeys))
useful_pathkeys_list = lappend(useful_pathkeys_list,
@ -2851,14 +2852,15 @@ generate_useful_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_r
/*
* If the path has no ordering at all, then we can't use either
* incremental sort or rely on implict sorting with a gather merge.
* incremental sort or rely on implict sorting with a gather
* merge.
*/
if (subpath->pathkeys == NIL)
continue;
is_sorted = pathkeys_count_contained_in(useful_pathkeys,
subpath->pathkeys,
&presorted_keys);
subpath->pathkeys,
&presorted_keys);
/*
* We don't need to consider the case where a subpath is already
@ -2915,8 +2917,9 @@ generate_useful_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_r
Path *tmp;
/*
* We should have already excluded pathkeys of length 1 because
* then presorted_keys > 0 would imply is_sorted was true.
* We should have already excluded pathkeys of length 1
* because then presorted_keys > 0 would imply is_sorted was
* true.
*/
Assert(list_length(useful_pathkeys) != 1);

View File

@ -1821,19 +1821,19 @@ cost_incremental_sort(Path *path,
/*
* Extract presorted keys as list of expressions.
*
* We need to be careful about Vars containing "varno 0" which might
* have been introduced by generate_append_tlist, which would confuse
* We need to be careful about Vars containing "varno 0" which might have
* been introduced by generate_append_tlist, which would confuse
* estimate_num_groups (in fact it'd fail for such expressions). See
* recurse_set_operations which has to deal with the same issue.
*
* Unlike recurse_set_operations we can't access the original target
* list here, and even if we could it's not very clear how useful would
* that be for a set operation combining multiple tables. So we simply
* detect if there are any expressions with "varno 0" and use the
* default DEFAULT_NUM_DISTINCT in that case.
* Unlike recurse_set_operations we can't access the original target list
* here, and even if we could it's not very clear how useful would that be
* for a set operation combining multiple tables. So we simply detect if
* there are any expressions with "varno 0" and use the default
* DEFAULT_NUM_DISTINCT in that case.
*
* We might also use either 1.0 (a single group) or input_tuples (each
* row being a separate group), pretty much the worst and best case for
* We might also use either 1.0 (a single group) or input_tuples (each row
* being a separate group), pretty much the worst and best case for
* incremental sort. But those are extreme cases and using something in
* between seems reasonable. Furthermore, generate_append_tlist is used
* for set operations, which are likely to produce mostly unique output
@ -2403,40 +2403,40 @@ cost_agg(Path *path, PlannerInfo *root,
/*
* Add the disk costs of hash aggregation that spills to disk.
*
* Groups that go into the hash table stay in memory until finalized,
* so spilling and reprocessing tuples doesn't incur additional
* invocations of transCost or finalCost. Furthermore, the computed
* hash value is stored with the spilled tuples, so we don't incur
* extra invocations of the hash function.
* Groups that go into the hash table stay in memory until finalized, so
* spilling and reprocessing tuples doesn't incur additional invocations
* of transCost or finalCost. Furthermore, the computed hash value is
* stored with the spilled tuples, so we don't incur extra invocations of
* the hash function.
*
* Hash Agg begins returning tuples after the first batch is
* complete. Accrue writes (spilled tuples) to startup_cost and to
* total_cost; accrue reads only to total_cost.
* Hash Agg begins returning tuples after the first batch is complete.
* Accrue writes (spilled tuples) to startup_cost and to total_cost;
* accrue reads only to total_cost.
*/
if (aggstrategy == AGG_HASHED || aggstrategy == AGG_MIXED)
{
double pages;
double pages_written = 0.0;
double pages_read = 0.0;
double hashentrysize;
double nbatches;
Size mem_limit;
uint64 ngroups_limit;
int num_partitions;
int depth;
double pages;
double pages_written = 0.0;
double pages_read = 0.0;
double hashentrysize;
double nbatches;
Size mem_limit;
uint64 ngroups_limit;
int num_partitions;
int depth;
/*
* Estimate number of batches based on the computed limits. If less
* than or equal to one, all groups are expected to fit in memory;
* otherwise we expect to spill.
*/
hashentrysize = hash_agg_entry_size(
aggcosts->numAggs, input_width, aggcosts->transitionSpace);
hashentrysize = hash_agg_entry_size(aggcosts->numAggs, input_width,
aggcosts->transitionSpace);
hash_agg_set_limits(hashentrysize, numGroups, 0, &mem_limit,
&ngroups_limit, &num_partitions);
nbatches = Max( (numGroups * hashentrysize) / mem_limit,
numGroups / ngroups_limit );
nbatches = Max((numGroups * hashentrysize) / mem_limit,
numGroups / ngroups_limit);
nbatches = Max(ceil(nbatches), 1.0);
num_partitions = Max(num_partitions, 2);
@ -2446,7 +2446,7 @@ cost_agg(Path *path, PlannerInfo *root,
* recursion; but for the purposes of this calculation assume it stays
* constant.
*/
depth = ceil( log(nbatches) / log(num_partitions) );
depth = ceil(log(nbatches) / log(num_partitions));
/*
* Estimate number of pages read and written. For each level of

View File

@ -1378,8 +1378,8 @@ try_partitionwise_join(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
Assert(joinrel->consider_partitionwise_join);
/*
* We can not perform partitionwise join if either of the joining relations
* is not partitioned.
* We can not perform partitionwise join if either of the joining
* relations is not partitioned.
*/
if (!IS_PARTITIONED_REL(rel1) || !IS_PARTITIONED_REL(rel2))
return;
@ -1622,8 +1622,8 @@ compute_partition_bounds(PlannerInfo *root, RelOptInfo *rel1,
* partition bounds as inputs, and the partitions with the same
* cardinal positions form the pairs.
*
* Note: even in cases where one or both inputs have merged bounds,
* it would be possible for both the bounds to be exactly the same, but
* Note: even in cases where one or both inputs have merged bounds, it
* would be possible for both the bounds to be exactly the same, but
* it seems unlikely to be worth the cycles to check.
*/
if (!rel1->partbounds_merged &&
@ -1670,8 +1670,8 @@ compute_partition_bounds(PlannerInfo *root, RelOptInfo *rel1,
/*
* If the join rel's partbounds_merged flag is true, it means inputs
* are not guaranteed to have the same partition bounds, therefore we
* can't assume that the partitions at the same cardinal positions form
* the pairs; let get_matching_part_pairs() generate the pairs.
* can't assume that the partitions at the same cardinal positions
* form the pairs; let get_matching_part_pairs() generate the pairs.
* Otherwise, nothing to do since we can assume that.
*/
if (joinrel->partbounds_merged)
@ -1695,7 +1695,7 @@ get_matching_part_pairs(PlannerInfo *root, RelOptInfo *joinrel,
{
bool rel1_is_simple = IS_SIMPLE_REL(rel1);
bool rel2_is_simple = IS_SIMPLE_REL(rel2);
int cnt_parts;
int cnt_parts;
*parts1 = NIL;
*parts2 = NIL;
@ -1735,9 +1735,10 @@ get_matching_part_pairs(PlannerInfo *root, RelOptInfo *joinrel,
* Get a child rel for rel1 with the relids. Note that we should have
* the child rel even if rel1 is a join rel, because in that case the
* partitions specified in the relids would have matching/overlapping
* boundaries, so the specified partitions should be considered as ones
* to be joined when planning partitionwise joins of rel1, meaning that
* the child rel would have been built by the time we get here.
* boundaries, so the specified partitions should be considered as
* ones to be joined when planning partitionwise joins of rel1,
* meaning that the child rel would have been built by the time we get
* here.
*/
if (rel1_is_simple)
{

View File

@ -1857,7 +1857,7 @@ pathkeys_useful_for_ordering(PlannerInfo *root, List *pathkeys)
return 0; /* unordered path */
(void) pathkeys_count_contained_in(root->query_pathkeys, pathkeys,
&n_common_pathkeys);
&n_common_pathkeys);
return n_common_pathkeys;
}

View File

@ -4866,8 +4866,7 @@ create_distinct_paths(PlannerInfo *root,
allow_hash = false; /* policy-based decision not to hash */
else
{
Size hashentrysize = hash_agg_entry_size(
0, cheapest_input_path->pathtarget->width, 0);
Size hashentrysize = hash_agg_entry_size(0, cheapest_input_path->pathtarget->width, 0);
allow_hash = enable_hashagg_disk ||
(hashentrysize * numDistinctRows <= work_mem * 1024L);
@ -4972,7 +4971,7 @@ create_ordered_paths(PlannerInfo *root,
int presorted_keys;
is_sorted = pathkeys_count_contained_in(root->sort_pathkeys,
input_path->pathkeys, &presorted_keys);
input_path->pathkeys, &presorted_keys);
if (is_sorted)
{
@ -4986,9 +4985,9 @@ create_ordered_paths(PlannerInfo *root,
else
{
/*
* Try adding an explicit sort, but only to the cheapest total path
* since a full sort should generally add the same cost to all
* paths.
* Try adding an explicit sort, but only to the cheapest total
* path since a full sort should generally add the same cost to
* all paths.
*/
if (input_path == cheapest_input_path)
{
@ -5010,11 +5009,11 @@ create_ordered_paths(PlannerInfo *root,
}
/*
* If incremental sort is enabled, then try it as well. Unlike with
* regular sorts, we can't just look at the cheapest path, because
* the cost of incremental sort depends on how well presorted the
* path is. Additionally incremental sort may enable a cheaper
* startup path to win out despite higher total cost.
* If incremental sort is enabled, then try it as well. Unlike
* with regular sorts, we can't just look at the cheapest path,
* because the cost of incremental sort depends on how well
* presorted the path is. Additionally incremental sort may enable
* a cheaper startup path to win out despite higher total cost.
*/
if (!enable_incrementalsort)
continue;
@ -5110,15 +5109,15 @@ create_ordered_paths(PlannerInfo *root,
double total_groups;
/*
* We don't care if this is the cheapest partial path - we can't
* simply skip it, because it may be partially sorted in which
* case we want to consider adding incremental sort (instead of
* full sort, which is what happens above).
* We don't care if this is the cheapest partial path - we
* can't simply skip it, because it may be partially sorted in
* which case we want to consider adding incremental sort
* (instead of full sort, which is what happens above).
*/
is_sorted = pathkeys_count_contained_in(root->sort_pathkeys,
input_path->pathkeys,
&presorted_keys);
input_path->pathkeys,
&presorted_keys);
/* No point in adding incremental sort on fully sorted paths. */
if (is_sorted)
@ -6510,8 +6509,8 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
int presorted_keys;
is_sorted = pathkeys_count_contained_in(root->group_pathkeys,
path->pathkeys,
&presorted_keys);
path->pathkeys,
&presorted_keys);
if (path == cheapest_path || is_sorted)
{
@ -6607,8 +6606,8 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
else if (parse->hasAggs)
{
/*
* We have aggregation, possibly with plain GROUP BY. Make
* an AggPath.
* We have aggregation, possibly with plain GROUP BY. Make an
* AggPath.
*/
add_path(grouped_rel, (Path *)
create_agg_path(root,
@ -6625,8 +6624,8 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
else if (parse->groupClause)
{
/*
* We have GROUP BY without aggregation or grouping sets.
* Make a GroupPath.
* We have GROUP BY without aggregation or grouping sets. Make
* a GroupPath.
*/
add_path(grouped_rel, (Path *)
create_group_path(root,
@ -6657,8 +6656,8 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
int presorted_keys;
is_sorted = pathkeys_count_contained_in(root->group_pathkeys,
path->pathkeys,
&presorted_keys);
path->pathkeys,
&presorted_keys);
/*
* Insert a Sort node, if required. But there's no point in
@ -6712,8 +6711,9 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
continue;
/*
* We should have already excluded pathkeys of length 1 because
* then presorted_keys > 0 would imply is_sorted was true.
* We should have already excluded pathkeys of length 1
* because then presorted_keys > 0 would imply is_sorted was
* true.
*/
Assert(list_length(root->group_pathkeys) != 1);
@ -7032,8 +7032,8 @@ create_partial_grouping_paths(PlannerInfo *root,
int presorted_keys;
is_sorted = pathkeys_count_contained_in(root->group_pathkeys,
path->pathkeys,
&presorted_keys);
path->pathkeys,
&presorted_keys);
/* Ignore already sorted paths */
if (is_sorted)
@ -7086,8 +7086,8 @@ create_partial_grouping_paths(PlannerInfo *root,
int presorted_keys;
is_sorted = pathkeys_count_contained_in(root->group_pathkeys,
path->pathkeys,
&presorted_keys);
path->pathkeys,
&presorted_keys);
if (path == cheapest_partial_path || is_sorted)
{
@ -7301,8 +7301,8 @@ gather_grouping_paths(PlannerInfo *root, RelOptInfo *rel)
* Consider incremental sort on all partial paths, if enabled.
*
* We can also skip the entire loop when we only have a single-item
* group_pathkeys because then we can't possibly have a presorted
* prefix of the list without having the list be fully sorted.
* group_pathkeys because then we can't possibly have a presorted prefix
* of the list without having the list be fully sorted.
*/
if (!enable_incrementalsort || list_length(root->group_pathkeys) == 1)
return;
@ -7316,8 +7316,8 @@ gather_grouping_paths(PlannerInfo *root, RelOptInfo *rel)
double total_groups;
is_sorted = pathkeys_count_contained_in(root->group_pathkeys,
path->pathkeys,
&presorted_keys);
path->pathkeys,
&presorted_keys);
if (is_sorted)
continue;

View File

@ -1767,7 +1767,7 @@ transformLimitClause(ParseState *pstate, Node *clause,
* unadorned NULL that's not accepted back by the grammar.
*/
if (exprKind == EXPR_KIND_LIMIT && limitOption == LIMIT_OPTION_WITH_TIES &&
IsA(clause, A_Const) && ((A_Const *) clause)->val.type == T_Null)
IsA(clause, A_Const) &&((A_Const *) clause)->val.type == T_Null)
ereport(ERROR,
(errcode(ERRCODE_INVALID_ROW_COUNT_IN_LIMIT_CLAUSE),
errmsg("row count cannot be NULL in FETCH FIRST ... WITH TIES clause")));

View File

@ -990,7 +990,7 @@ transformTableLikeClause(CreateStmtContext *cxt, TableLikeClause *table_like_cla
/*
* We must fill the attmap now so that it can be used to process generated
* column default expressions in the per-column loop below.
*/
*/
new_attno = 1;
for (parent_attno = 1; parent_attno <= tupleDesc->natts;
parent_attno++)
@ -2194,7 +2194,7 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt)
* mentioned above.
*/
Datum attoptions =
get_attoptions(RelationGetRelid(index_rel), i + 1);
get_attoptions(RelationGetRelid(index_rel), i + 1);
defopclass = GetDefaultOpClass(attform->atttypid,
index_rel->rd_rel->relam);

View File

@ -76,7 +76,7 @@ typedef struct PartitionRangeBound
typedef struct PartitionMap
{
int nparts; /* number of partitions */
int *merged_indexes; /* indexes of merged partitions */
int *merged_indexes; /* indexes of merged partitions */
bool *merged; /* flags to indicate whether partitions are
* merged with non-dummy partitions */
bool did_remapping; /* did we re-map partitions? */
@ -120,29 +120,29 @@ static PartitionBoundInfo merge_range_bounds(int partnatts,
static void init_partition_map(RelOptInfo *rel, PartitionMap *map);
static void free_partition_map(PartitionMap *map);
static bool is_dummy_partition(RelOptInfo *rel, int part_index);
static int merge_matching_partitions(PartitionMap *outer_map,
PartitionMap *inner_map,
int outer_part,
int inner_part,
int *next_index);
static int process_outer_partition(PartitionMap *outer_map,
PartitionMap *inner_map,
bool outer_has_default,
bool inner_has_default,
int outer_index,
int inner_default,
JoinType jointype,
int *next_index,
int *default_index);
static int process_inner_partition(PartitionMap *outer_map,
PartitionMap *inner_map,
bool outer_has_default,
bool inner_has_default,
int inner_index,
int outer_default,
JoinType jointype,
int *next_index,
int *default_index);
static int merge_matching_partitions(PartitionMap *outer_map,
PartitionMap *inner_map,
int outer_part,
int inner_part,
int *next_index);
static int process_outer_partition(PartitionMap *outer_map,
PartitionMap *inner_map,
bool outer_has_default,
bool inner_has_default,
int outer_index,
int inner_default,
JoinType jointype,
int *next_index,
int *default_index);
static int process_inner_partition(PartitionMap *outer_map,
PartitionMap *inner_map,
bool outer_has_default,
bool inner_has_default,
int inner_index,
int outer_default,
JoinType jointype,
int *next_index,
int *default_index);
static void merge_null_partitions(PartitionMap *outer_map,
PartitionMap *inner_map,
bool outer_has_null,
@ -161,8 +161,8 @@ static void merge_default_partitions(PartitionMap *outer_map,
JoinType jointype,
int *next_index,
int *default_index);
static int merge_partition_with_dummy(PartitionMap *map, int index,
int *next_index);
static int merge_partition_with_dummy(PartitionMap *map, int index,
int *next_index);
static void fix_merged_indexes(PartitionMap *outer_map,
PartitionMap *inner_map,
int nmerged, List *merged_indexes);
@ -179,15 +179,15 @@ static PartitionBoundInfo build_merged_partition_bounds(char strategy,
List *merged_indexes,
int null_index,
int default_index);
static int get_range_partition(RelOptInfo *rel,
PartitionBoundInfo bi,
int *lb_pos,
PartitionRangeBound *lb,
PartitionRangeBound *ub);
static int get_range_partition_internal(PartitionBoundInfo bi,
int *lb_pos,
PartitionRangeBound *lb,
PartitionRangeBound *ub);
static int get_range_partition(RelOptInfo *rel,
PartitionBoundInfo bi,
int *lb_pos,
PartitionRangeBound *lb,
PartitionRangeBound *ub);
static int get_range_partition_internal(PartitionBoundInfo bi,
int *lb_pos,
PartitionRangeBound *lb,
PartitionRangeBound *ub);
static bool compare_range_partitions(int partnatts, FmgrInfo *partsupfuncs,
Oid *partcollations,
PartitionRangeBound *outer_lb,
@ -201,7 +201,7 @@ static void get_merged_range_bounds(int partnatts, FmgrInfo *partsupfuncs,
PartitionRangeBound *outer_ub,
PartitionRangeBound *inner_lb,
PartitionRangeBound *inner_ub,
int lb_cmpval, int ub_cmpval,
int lb_cmpval, int ub_cmpval,
PartitionRangeBound *merged_lb,
PartitionRangeBound *merged_ub);
static void add_merged_range_bounds(int partnatts, FmgrInfo *partsupfuncs,
@ -955,8 +955,8 @@ partition_bounds_copy(PartitionBoundInfo src,
dest->kind = NULL;
/*
* For hash partitioning, datums array will have two elements - modulus and
* remainder.
* For hash partitioning, datums array will have two elements - modulus
* and remainder.
*/
hash_part = (key->strategy == PARTITION_STRATEGY_HASH);
natts = hash_part ? 2 : partnatts;
@ -1076,7 +1076,7 @@ partition_bounds_merge(int partnatts,
default:
elog(ERROR, "unexpected partition strategy: %d",
(int) outer_binfo->strategy);
return NULL; /* keep compiler quiet */
return NULL; /* keep compiler quiet */
}
}
@ -1144,10 +1144,10 @@ merge_list_bounds(FmgrInfo *partsupfunc, Oid *partcollation,
/*
* Merge partitions from both sides. In each iteration we compare a pair
* of list values, one from each side, and decide whether the corresponding
* partitions match or not. If the two values match exactly, move to the
* next pair of list values, otherwise move to the next list value on the
* side with a smaller list value.
* of list values, one from each side, and decide whether the
* corresponding partitions match or not. If the two values match
* exactly, move to the next pair of list values, otherwise move to the
* next list value on the side with a smaller list value.
*/
outer_pos = inner_pos = 0;
while (outer_pos < outer_bi->ndatums || inner_pos < inner_bi->ndatums)
@ -1163,8 +1163,8 @@ merge_list_bounds(FmgrInfo *partsupfunc, Oid *partcollation,
if (outer_pos < outer_bi->ndatums)
{
/*
* If the partition on the outer side has been proven empty, ignore
* it and move to the next datum on the outer side.
* If the partition on the outer side has been proven empty,
* ignore it and move to the next datum on the outer side.
*/
outer_index = outer_bi->indexes[outer_pos];
if (is_dummy_partition(outer_rel, outer_index))
@ -1176,8 +1176,8 @@ merge_list_bounds(FmgrInfo *partsupfunc, Oid *partcollation,
if (inner_pos < inner_bi->ndatums)
{
/*
* If the partition on the inner side has been proven empty, ignore
* it and move to the next datum on the inner side.
* If the partition on the inner side has been proven empty,
* ignore it and move to the next datum on the inner side.
*/
inner_index = inner_bi->indexes[inner_pos];
if (is_dummy_partition(inner_rel, inner_index))
@ -1197,10 +1197,10 @@ merge_list_bounds(FmgrInfo *partsupfunc, Oid *partcollation,
* We run this loop till both sides finish. This allows us to avoid
* duplicating code to handle the remaining values on the side which
* finishes later. For that we set the comparison parameter cmpval in
* such a way that it appears as if the side which finishes earlier has
* an extra value higher than any other value on the unfinished side.
* That way we advance the values on the unfinished side till all of
* its values are exhausted.
* such a way that it appears as if the side which finishes earlier
* has an extra value higher than any other value on the unfinished
* side. That way we advance the values on the unfinished side till
* all of its values are exhausted.
*/
if (outer_pos >= outer_bi->ndatums)
cmpval = 1;
@ -1245,10 +1245,10 @@ merge_list_bounds(FmgrInfo *partsupfunc, Oid *partcollation,
Assert(outer_pos < outer_bi->ndatums);
/*
* If the inner side has the default partition, or this is an outer
* join, try to assign a merged partition to the outer partition
* (see process_outer_partition()). Otherwise, the outer partition
* will not contribute to the result.
* If the inner side has the default partition, or this is an
* outer join, try to assign a merged partition to the outer
* partition (see process_outer_partition()). Otherwise, the
* outer partition will not contribute to the result.
*/
if (inner_has_default || IS_OUTER_JOIN(jointype))
{
@ -1281,8 +1281,8 @@ merge_list_bounds(FmgrInfo *partsupfunc, Oid *partcollation,
/*
* If the outer side has the default partition, or this is a FULL
* join, try to assign a merged partition to the inner partition
* (see process_inner_partition()). Otherwise, the inner partition
* will not contribute to the result.
* (see process_inner_partition()). Otherwise, the inner
* partition will not contribute to the result.
*/
if (outer_has_default || jointype == JOIN_FULL)
{
@ -1459,8 +1459,8 @@ merge_range_bounds(int partnatts, FmgrInfo *partsupfuncs,
* partitions match or not. If the two ranges overlap, move to the next
* pair of ranges, otherwise move to the next range on the side with a
* lower range. outer_lb_pos/inner_lb_pos keep track of the positions of
* lower bounds in the datums arrays in the outer/inner PartitionBoundInfos
* respectively.
* lower bounds in the datums arrays in the outer/inner
* PartitionBoundInfos respectively.
*/
outer_lb_pos = inner_lb_pos = 0;
outer_index = get_range_partition(outer_rel, outer_bi, &outer_lb_pos,
@ -1480,10 +1480,10 @@ merge_range_bounds(int partnatts, FmgrInfo *partsupfuncs,
* We run this loop till both sides finish. This allows us to avoid
* duplicating code to handle the remaining ranges on the side which
* finishes later. For that we set the comparison parameter cmpval in
* such a way that it appears as if the side which finishes earlier has
* an extra range higher than any other range on the unfinished side.
* That way we advance the ranges on the unfinished side till all of
* its ranges are exhausted.
* such a way that it appears as if the side which finishes earlier
* has an extra range higher than any other range on the unfinished
* side. That way we advance the ranges on the unfinished side till
* all of its ranges are exhausted.
*/
if (outer_index == -1)
{
@ -1563,10 +1563,10 @@ merge_range_bounds(int partnatts, FmgrInfo *partsupfuncs,
goto cleanup;
/*
* A row from a non-overlapping portion (if any) of a partition
* on one side might find its join partner in the default
* partition (if any) on the other side, causing the same
* situation as above; give up in that case.
* A row from a non-overlapping portion (if any) of a partition on
* one side might find its join partner in the default partition
* (if any) on the other side, causing the same situation as
* above; give up in that case.
*/
if ((outer_has_default && (lb_cmpval > 0 || ub_cmpval < 0)) ||
(inner_has_default && (lb_cmpval < 0 || ub_cmpval > 0)))
@ -1582,10 +1582,10 @@ merge_range_bounds(int partnatts, FmgrInfo *partsupfuncs,
outer_map.merged[outer_index] == false);
/*
* If the inner side has the default partition, or this is an outer
* join, try to assign a merged partition to the outer partition
* (see process_outer_partition()). Otherwise, the outer partition
* will not contribute to the result.
* If the inner side has the default partition, or this is an
* outer join, try to assign a merged partition to the outer
* partition (see process_outer_partition()). Otherwise, the
* outer partition will not contribute to the result.
*/
if (inner_has_default || IS_OUTER_JOIN(jointype))
{
@ -1621,8 +1621,8 @@ merge_range_bounds(int partnatts, FmgrInfo *partsupfuncs,
/*
* If the outer side has the default partition, or this is a FULL
* join, try to assign a merged partition to the inner partition
* (see process_inner_partition()). Otherwise, the inner partition
* will not contribute to the result.
* (see process_inner_partition()). Otherwise, the inner
* partition will not contribute to the result.
*/
if (outer_has_default || jointype == JOIN_FULL)
{
@ -1647,8 +1647,8 @@ merge_range_bounds(int partnatts, FmgrInfo *partsupfuncs,
}
/*
* If we assigned a merged partition, add the range bounds and index of
* the merged partition if appropriate.
* If we assigned a merged partition, add the range bounds and index
* of the merged partition if appropriate.
*/
if (merged_index >= 0 && merged_index != default_index)
add_merged_range_bounds(partnatts, partsupfuncs, partcollations,
@ -1766,10 +1766,10 @@ static int
merge_matching_partitions(PartitionMap *outer_map, PartitionMap *inner_map,
int outer_index, int inner_index, int *next_index)
{
int outer_merged_index;
int inner_merged_index;
bool outer_merged;
bool inner_merged;
int outer_merged_index;
int inner_merged_index;
bool outer_merged;
bool inner_merged;
Assert(outer_index >= 0 && outer_index < outer_map->nparts);
outer_merged_index = outer_map->merged_indexes[outer_index];
@ -1839,7 +1839,7 @@ merge_matching_partitions(PartitionMap *outer_map, PartitionMap *inner_map,
*/
if (outer_merged_index == -1 && inner_merged_index == -1)
{
int merged_index = *next_index;
int merged_index = *next_index;
Assert(!outer_merged);
Assert(!inner_merged);
@ -1891,16 +1891,16 @@ process_outer_partition(PartitionMap *outer_map,
int *next_index,
int *default_index)
{
int merged_index = -1;
int merged_index = -1;
Assert(outer_index >= 0);
/*
* If the inner side has the default partition, a row from the outer
* partition might find its join partner in the default partition; try
* merging the outer partition with the default partition. Otherwise, this
* should be an outer join, in which case the outer partition has to be
* scanned all the way anyway; merge the outer partition with a dummy
* merging the outer partition with the default partition. Otherwise,
* this should be an outer join, in which case the outer partition has to
* be scanned all the way anyway; merge the outer partition with a dummy
* partition on the other side.
*/
if (inner_has_default)
@ -1909,9 +1909,10 @@ process_outer_partition(PartitionMap *outer_map,
/*
* If the outer side has the default partition as well, the default
* partition on the inner side will have two matching partitions on the
* other side: the outer partition and the default partition on the
* outer side. Partitionwise join doesn't handle this scenario yet.
* partition on the inner side will have two matching partitions on
* the other side: the outer partition and the default partition on
* the outer side. Partitionwise join doesn't handle this scenario
* yet.
*/
if (outer_has_default)
return -1;
@ -1923,10 +1924,10 @@ process_outer_partition(PartitionMap *outer_map,
return -1;
/*
* If this is a FULL join, the default partition on the inner side
* has to be scanned all the way anyway, so the resulting partition
* will contain all key values from the default partition, which any
* other partition of the join relation will not contain. Thus the
* If this is a FULL join, the default partition on the inner side has
* to be scanned all the way anyway, so the resulting partition will
* contain all key values from the default partition, which any other
* partition of the join relation will not contain. Thus the
* resulting partition will act as the default partition of the join
* relation; record the index in *default_index if not already done.
*/
@ -1972,15 +1973,15 @@ process_inner_partition(PartitionMap *outer_map,
int *next_index,
int *default_index)
{
int merged_index = -1;
int merged_index = -1;
Assert(inner_index >= 0);
/*
* If the outer side has the default partition, a row from the inner
* partition might find its join partner in the default partition; try
* merging the inner partition with the default partition. Otherwise, this
* should be a FULL join, in which case the inner partition has to be
* merging the inner partition with the default partition. Otherwise,
* this should be a FULL join, in which case the inner partition has to be
* scanned all the way anyway; merge the inner partition with a dummy
* partition on the other side.
*/
@ -1990,9 +1991,10 @@ process_inner_partition(PartitionMap *outer_map,
/*
* If the inner side has the default partition as well, the default
* partition on the outer side will have two matching partitions on the
* other side: the inner partition and the default partition on the
* inner side. Partitionwise join doesn't handle this scenario yet.
* partition on the outer side will have two matching partitions on
* the other side: the inner partition and the default partition on
* the inner side. Partitionwise join doesn't handle this scenario
* yet.
*/
if (inner_has_default)
return -1;
@ -2056,8 +2058,8 @@ merge_null_partitions(PartitionMap *outer_map,
int *next_index,
int *null_index)
{
bool consider_outer_null = false;
bool consider_inner_null = false;
bool consider_outer_null = false;
bool consider_inner_null = false;
Assert(outer_has_null || inner_has_null);
Assert(*null_index == -1);
@ -2090,10 +2092,10 @@ merge_null_partitions(PartitionMap *outer_map,
/*
* If this is an outer join, the NULL partition on the outer side has
* to be scanned all the way anyway; merge the NULL partition with a
* dummy partition on the other side. In that case consider_outer_null
* means that the NULL partition only contains NULL values as the key
* values, so the merged partition will do so; treat it as the NULL
* partition of the join relation.
* dummy partition on the other side. In that case
* consider_outer_null means that the NULL partition only contains
* NULL values as the key values, so the merged partition will do so;
* treat it as the NULL partition of the join relation.
*/
if (IS_OUTER_JOIN(jointype))
{
@ -2107,12 +2109,12 @@ merge_null_partitions(PartitionMap *outer_map,
Assert(inner_has_null);
/*
* If this is a FULL join, the NULL partition on the inner side has
* to be scanned all the way anyway; merge the NULL partition with a
* dummy partition on the other side. In that case consider_inner_null
* means that the NULL partition only contains NULL values as the key
* values, so the merged partition will do so; treat it as the NULL
* partition of the join relation.
* If this is a FULL join, the NULL partition on the inner side has to
* be scanned all the way anyway; merge the NULL partition with a
* dummy partition on the other side. In that case
* consider_inner_null means that the NULL partition only contains
* NULL values as the key values, so the merged partition will do so;
* treat it as the NULL partition of the join relation.
*/
if (jointype == JOIN_FULL)
*null_index = merge_partition_with_dummy(inner_map, inner_null,
@ -2166,8 +2168,8 @@ merge_default_partitions(PartitionMap *outer_map,
int *next_index,
int *default_index)
{
int outer_merged_index = -1;
int inner_merged_index = -1;
int outer_merged_index = -1;
int inner_merged_index = -1;
Assert(outer_has_default || inner_has_default);
@ -2188,9 +2190,10 @@ merge_default_partitions(PartitionMap *outer_map,
/*
* If this is an outer join, the default partition on the outer side
* has to be scanned all the way anyway; if we have not yet assigned a
* partition, merge the default partition with a dummy partition on the
* other side. The merged partition will act as the default partition
* of the join relation (see comments in process_inner_partition()).
* partition, merge the default partition with a dummy partition on
* the other side. The merged partition will act as the default
* partition of the join relation (see comments in
* process_inner_partition()).
*/
if (IS_OUTER_JOIN(jointype))
{
@ -2211,11 +2214,12 @@ merge_default_partitions(PartitionMap *outer_map,
else if (!outer_has_default && inner_has_default)
{
/*
* If this is a FULL join, the default partition on the inner side
* has to be scanned all the way anyway; if we have not yet assigned a
* partition, merge the default partition with a dummy partition on the
* other side. The merged partition will act as the default partition
* of the join relation (see comments in process_outer_partition()).
* If this is a FULL join, the default partition on the inner side has
* to be scanned all the way anyway; if we have not yet assigned a
* partition, merge the default partition with a dummy partition on
* the other side. The merged partition will act as the default
* partition of the join relation (see comments in
* process_outer_partition()).
*/
if (jointype == JOIN_FULL)
{
@ -2266,7 +2270,7 @@ merge_default_partitions(PartitionMap *outer_map,
static int
merge_partition_with_dummy(PartitionMap *map, int index, int *next_index)
{
int merged_index = *next_index;
int merged_index = *next_index;
Assert(index >= 0 && index < map->nparts);
Assert(map->merged_indexes[index] == -1);
@ -2346,7 +2350,7 @@ generate_matching_part_pairs(RelOptInfo *outer_rel, RelOptInfo *inner_rel,
int *outer_indexes;
int *inner_indexes;
int max_nparts;
int i;
int i;
Assert(nmerged > 0);
Assert(*outer_parts == NIL);
@ -2365,7 +2369,7 @@ generate_matching_part_pairs(RelOptInfo *outer_rel, RelOptInfo *inner_rel,
{
if (i < outer_nparts)
{
int merged_index = outer_map->merged_indexes[i];
int merged_index = outer_map->merged_indexes[i];
if (merged_index >= 0)
{
@ -2375,7 +2379,7 @@ generate_matching_part_pairs(RelOptInfo *outer_rel, RelOptInfo *inner_rel,
}
if (i < inner_nparts)
{
int merged_index = inner_map->merged_indexes[i];
int merged_index = inner_map->merged_indexes[i];
if (merged_index >= 0)
{
@ -2392,10 +2396,10 @@ generate_matching_part_pairs(RelOptInfo *outer_rel, RelOptInfo *inner_rel,
int inner_index = inner_indexes[i];
/*
* If both partitions are dummy, it means the merged partition that had
* been assigned to the outer/inner partition was removed when
* re-merging the outer/inner partition in merge_matching_partitions();
* ignore the merged partition.
* If both partitions are dummy, it means the merged partition that
* had been assigned to the outer/inner partition was removed when
* re-merging the outer/inner partition in
* merge_matching_partitions(); ignore the merged partition.
*/
if (outer_index == -1 && inner_index == -1)
continue;
@ -2484,7 +2488,8 @@ get_range_partition(RelOptInfo *rel,
Assert(bi->strategy == PARTITION_STRATEGY_RANGE);
do {
do
{
part_index = get_range_partition_internal(bi, lb_pos, lb, ub);
if (part_index == -1)
return -1;
@ -2609,7 +2614,7 @@ get_merged_range_bounds(int partnatts, FmgrInfo *partsupfuncs,
PartitionRangeBound *outer_ub,
PartitionRangeBound *inner_lb,
PartitionRangeBound *inner_ub,
int lb_cmpval, int ub_cmpval,
int lb_cmpval, int ub_cmpval,
PartitionRangeBound *merged_lb,
PartitionRangeBound *merged_ub)
{
@ -2638,8 +2643,8 @@ get_merged_range_bounds(int partnatts, FmgrInfo *partsupfuncs,
/*
* A LEFT/ANTI join will have all the rows from the outer side, so
* the bounds of the merged partition will be the same as the outer
* bounds.
* the bounds of the merged partition will be the same as the
* outer bounds.
*/
*merged_lb = *outer_lb;
*merged_ub = *outer_ub;
@ -2648,10 +2653,10 @@ get_merged_range_bounds(int partnatts, FmgrInfo *partsupfuncs,
case JOIN_FULL:
/*
* A FULL join will have all the rows from both sides, so the lower
* bound of the merged partition will be the lower of the two lower
* bounds, and the upper bound of the merged partition will be the
* higher of the two upper bounds.
* A FULL join will have all the rows from both sides, so the
* lower bound of the merged partition will be the lower of the
* two lower bounds, and the upper bound of the merged partition
* will be the higher of the two upper bounds.
*/
*merged_lb = (lb_cmpval < 0) ? *outer_lb : *inner_lb;
*merged_ub = (ub_cmpval > 0) ? *outer_ub : *inner_ub;
@ -2687,7 +2692,7 @@ add_merged_range_bounds(int partnatts, FmgrInfo *partsupfuncs,
}
else
{
PartitionRangeBound prev_ub;
PartitionRangeBound prev_ub;
Assert(*merged_datums);
Assert(*merged_kinds);

View File

@ -854,8 +854,8 @@ gen_partprune_steps_internal(GeneratePruningStepsContext *context,
ListCell *lc;
/*
* If this partitioned relation has a default partition and is itself
* a partition (as evidenced by partition_qual being not NIL), we first
* If this partitioned relation has a default partition and is itself a
* partition (as evidenced by partition_qual being not NIL), we first
* check if the clauses contradict the partition constraint. If they do,
* there's no need to generate any steps as it'd already be proven that no
* partitions need to be scanned.

View File

@ -3097,7 +3097,7 @@ relation_needs_vacanalyze(Oid relid,
/* Determine if this table needs vacuum or analyze. */
*dovacuum = force_vacuum || (vactuples > vacthresh) ||
(vac_ins_base_thresh >= 0 && instuples > vacinsthresh);
(vac_ins_base_thresh >= 0 && instuples > vacinsthresh);
*doanalyze = (anltuples > anlthresh);
}
else

View File

@ -540,29 +540,29 @@ HandleCheckpointerInterrupts(void)
ProcessConfigFile(PGC_SIGHUP);
/*
* Checkpointer is the last process to shut down, so we ask it to
* hold the keys for a range of other tasks required most of which
* have nothing to do with checkpointing at all.
* Checkpointer is the last process to shut down, so we ask it to hold
* the keys for a range of other tasks required most of which have
* nothing to do with checkpointing at all.
*
* For various reasons, some config values can change dynamically
* so the primary copy of them is held in shared memory to make
* sure all backends see the same value. We make Checkpointer
* responsible for updating the shared memory copy if the
* parameter setting changes because of SIGHUP.
* For various reasons, some config values can change dynamically so
* the primary copy of them is held in shared memory to make sure all
* backends see the same value. We make Checkpointer responsible for
* updating the shared memory copy if the parameter setting changes
* because of SIGHUP.
*/
UpdateSharedMemoryConfig();
}
if (ShutdownRequestPending)
{
/*
* From here on, elog(ERROR) should end with exit(1), not send
* control back to the sigsetjmp block above
* From here on, elog(ERROR) should end with exit(1), not send control
* back to the sigsetjmp block above
*/
ExitOnAnyError = true;
/* Close down the database */
ShutdownXLOG(0, 0);
/* Normal exit from the checkpointer is here */
proc_exit(0); /* done */
proc_exit(0); /* done */
}
}

View File

@ -6235,7 +6235,7 @@ static void
pgstat_recv_resetslrucounter(PgStat_MsgResetslrucounter *msg, int len)
{
int i;
TimestampTz ts = GetCurrentTimestamp();
TimestampTz ts = GetCurrentTimestamp();
for (i = 0; i < SLRU_NUM_ELEMENTS; i++)
{
@ -6292,10 +6292,10 @@ pgstat_recv_vacuum(PgStat_MsgVacuum *msg, int len)
/*
* It is quite possible that a non-aggressive VACUUM ended up skipping
* various pages, however, we'll zero the insert counter here regardless.
* It's currently used only to track when we need to perform an
* "insert" autovacuum, which are mainly intended to freeze newly inserted
* tuples. Zeroing this may just mean we'll not try to vacuum the table
* again until enough tuples have been inserted to trigger another insert
* It's currently used only to track when we need to perform an "insert"
* autovacuum, which are mainly intended to freeze newly inserted tuples.
* Zeroing this may just mean we'll not try to vacuum the table again
* until enough tuples have been inserted to trigger another insert
* autovacuum. An anti-wraparound autovacuum will catch any persistent
* stragglers.
*/
@ -6687,7 +6687,7 @@ pgstat_clip_activity(const char *raw_activity)
int
pgstat_slru_index(const char *name)
{
int i;
int i;
for (i = 0; i < SLRU_NUM_ELEMENTS; i++)
{

View File

@ -2036,6 +2036,7 @@ retry1:
if (SSLok == 'S' && secure_open_server(port) == -1)
return STATUS_ERROR;
#endif
/*
* regular startup packet, cancel, etc packet should follow, but not
* another SSL negotiation request, and a GSS request should only
@ -2066,6 +2067,7 @@ retry1:
if (GSSok == 'G' && secure_open_gssapi(port) == -1)
return STATUS_ERROR;
#endif
/*
* regular startup packet, cancel, etc packet should follow, but not
* another GSS negotiation request, and an SSL request should only

View File

@ -80,7 +80,7 @@ InitializeBackupManifest(backup_manifest_info *manifest,
void
AddFileToBackupManifest(backup_manifest_info *manifest, const char *spcoid,
const char *pathname, size_t size, pg_time_t mtime,
pg_checksum_context * checksum_ctx)
pg_checksum_context *checksum_ctx)
{
char pathbuf[MAXPGPATH];
int pathlen;
@ -103,11 +103,11 @@ AddFileToBackupManifest(backup_manifest_info *manifest, const char *spcoid,
}
/*
* Each file's entry needs to be separated from any entry that follows by a
* comma, but there's no comma before the first one or after the last one.
* To make that work, adding a file to the manifest starts by terminating
* the most recently added line, with a comma if appropriate, but does not
* terminate the line inserted for this file.
* Each file's entry needs to be separated from any entry that follows by
* a comma, but there's no comma before the first one or after the last
* one. To make that work, adding a file to the manifest starts by
* terminating the most recently added line, with a comma if appropriate,
* but does not terminate the line inserted for this file.
*/
initStringInfo(&buf);
if (manifest->first_file)

View File

@ -52,7 +52,7 @@ typedef struct LogicalRepPartMapEntry
{
Oid partoid; /* LogicalRepPartMap's key */
LogicalRepRelMapEntry relmapentry;
} LogicalRepPartMapEntry;
} LogicalRepPartMapEntry;
/*
* Relcache invalidation callback for our relation map cache.

View File

@ -692,7 +692,7 @@ get_rel_sync_entry(PGOutputData *data, Oid relid)
if (!publish)
{
bool ancestor_published = false;
bool ancestor_published = false;
/*
* For a partition, check if any of the ancestors are
@ -702,13 +702,16 @@ get_rel_sync_entry(PGOutputData *data, Oid relid)
*/
if (am_partition)
{
List *ancestors = get_partition_ancestors(relid);
ListCell *lc2;
List *ancestors = get_partition_ancestors(relid);
ListCell *lc2;
/* Find the "topmost" ancestor that is in this publication. */
/*
* Find the "topmost" ancestor that is in this
* publication.
*/
foreach(lc2, ancestors)
{
Oid ancestor = lfirst_oid(lc2);
Oid ancestor = lfirst_oid(lc2);
if (list_member_oid(GetRelationPublications(ancestor),
pub->oid))

View File

@ -425,10 +425,9 @@ pg_physical_replication_slot_advance(XLogRecPtr moveto)
retlsn = moveto;
/*
* Dirty the slot so as it is written out at the next checkpoint.
* Note that the LSN position advanced may still be lost in the
* event of a crash, but this makes the data consistent after a
* clean shutdown.
* Dirty the slot so as it is written out at the next checkpoint. Note
* that the LSN position advanced may still be lost in the event of a
* crash, but this makes the data consistent after a clean shutdown.
*/
ReplicationSlotMarkDirty();
}
@ -532,9 +531,9 @@ pg_logical_replication_slot_advance(XLogRecPtr moveto)
* keep track of their progress, so we should make more of an
* effort to save it for them.
*
* Dirty the slot so it is written out at the next checkpoint.
* The LSN position advanced to may still be lost on a crash
* but this makes the data consistent after a clean shutdown.
* Dirty the slot so it is written out at the next checkpoint. The
* LSN position advanced to may still be lost on a crash but this
* makes the data consistent after a clean shutdown.
*/
ReplicationSlotMarkDirty();
}

View File

@ -255,10 +255,10 @@ RequestXLogStreaming(TimeLineID tli, XLogRecPtr recptr, const char *conninfo,
walrcv->conninfo[0] = '\0';
/*
* Use configured replication slot if present, and ignore the value
* of create_temp_slot as the slot name should be persistent. Otherwise,
* use create_temp_slot to determine whether this WAL receiver should
* create a temporary slot by itself and use it, or not.
* Use configured replication slot if present, and ignore the value of
* create_temp_slot as the slot name should be persistent. Otherwise, use
* create_temp_slot to determine whether this WAL receiver should create a
* temporary slot by itself and use it, or not.
*/
if (slotname != NULL && slotname[0] != '\0')
{

View File

@ -346,14 +346,14 @@ WalSndErrorCleanup(void)
void
WalSndResourceCleanup(bool isCommit)
{
ResourceOwner resowner;
ResourceOwner resowner;
if (CurrentResourceOwner == NULL)
return;
/*
* Deleting CurrentResourceOwner is not allowed, so we must save a
* pointer in a local variable and clear it first.
* Deleting CurrentResourceOwner is not allowed, so we must save a pointer
* in a local variable and clear it first.
*/
resowner = CurrentResourceOwner;
CurrentResourceOwner = NULL;

View File

@ -800,7 +800,7 @@ dependency_is_compatible_clause(Node *clause, Index relid, AttrNumber *attnum)
else if (IsA(clause, ScalarArrayOpExpr))
{
/* If it's an scalar array operator, check for Var IN Const. */
ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) clause;
ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) clause;
/*
* Reject ALL() variant, we only care about ANY/IN.
@ -827,8 +827,9 @@ dependency_is_compatible_clause(Node *clause, Index relid, AttrNumber *attnum)
/*
* If it's not an "=" operator, just ignore the clause, as it's not
* compatible with functional dependencies. The operator is identified
* simply by looking at which function it uses to estimate selectivity.
* That's a bit strange, but it's what other similar places do.
* simply by looking at which function it uses to estimate
* selectivity. That's a bit strange, but it's what other similar
* places do.
*/
if (get_oprrest(expr->opno) != F_EQSEL)
return false;
@ -929,7 +930,8 @@ static MVDependency *
find_strongest_dependency(MVDependencies **dependencies, int ndependencies,
Bitmapset *attnums)
{
int i, j;
int i,
j;
MVDependency *strongest = NULL;
/* number of attnums in clauses */
@ -967,8 +969,8 @@ find_strongest_dependency(MVDependencies **dependencies, int ndependencies,
/*
* this dependency is stronger, but we must still check that it's
* fully matched to these attnums. We perform this check last as it's
* slightly more expensive than the previous checks.
* fully matched to these attnums. We perform this check last as
* it's slightly more expensive than the previous checks.
*/
if (dependency_is_fully_matched(dependency, attnums))
strongest = dependency; /* save new best match */

View File

@ -75,8 +75,8 @@ static VacAttrStats **lookup_var_attr_stats(Relation rel, Bitmapset *attrs,
static void statext_store(Oid relid,
MVNDistinct *ndistinct, MVDependencies *dependencies,
MCVList *mcv, VacAttrStats **stats);
static int statext_compute_stattarget(int stattarget,
int natts, VacAttrStats **stats);
static int statext_compute_stattarget(int stattarget,
int natts, VacAttrStats **stats);
/*
* Compute requested extended stats, using the rows sampled for the plain
@ -160,9 +160,9 @@ BuildRelationExtStatistics(Relation onerel, double totalrows,
stats);
/*
* Don't rebuild statistics objects with statistics target set to 0 (we
* just leave the existing values around, just like we do for regular
* per-column statistics).
* Don't rebuild statistics objects with statistics target set to 0
* (we just leave the existing values around, just like we do for
* regular per-column statistics).
*/
if (stattarget == 0)
continue;
@ -231,10 +231,10 @@ ComputeExtStatisticsRows(Relation onerel,
foreach(lc, lstats)
{
StatExtEntry *stat = (StatExtEntry *) lfirst(lc);
int stattarget = stat->stattarget;
VacAttrStats **stats;
int nattrs = bms_num_members(stat->columns);
StatExtEntry *stat = (StatExtEntry *) lfirst(lc);
int stattarget = stat->stattarget;
VacAttrStats **stats;
int nattrs = bms_num_members(stat->columns);
/*
* Check if we can build this statistics object based on the columns
@ -291,19 +291,19 @@ ComputeExtStatisticsRows(Relation onerel,
static int
statext_compute_stattarget(int stattarget, int nattrs, VacAttrStats **stats)
{
int i;
int i;
/*
* If there's statistics target set for the statistics object, use it.
* It may be set to 0 which disables building of that statistic.
* If there's statistics target set for the statistics object, use it. It
* may be set to 0 which disables building of that statistic.
*/
if (stattarget >= 0)
return stattarget;
/*
* The target for the statistics object is set to -1, in which case we
* look at the maximum target set for any of the attributes the object
* is defined on.
* look at the maximum target set for any of the attributes the object is
* defined on.
*/
for (i = 0; i < nattrs; i++)
{
@ -1041,8 +1041,8 @@ statext_is_compatible_clause_internal(PlannerInfo *root, Node *clause,
/* Var IN Array */
if (IsA(clause, ScalarArrayOpExpr))
{
RangeTblEntry *rte = root->simple_rte_array[relid];
ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) clause;
RangeTblEntry *rte = root->simple_rte_array[relid];
ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) clause;
Var *var;
/* Only expressions with two arguments are considered compatible. */
@ -1287,7 +1287,7 @@ statext_mcv_clauselist_selectivity(PlannerInfo *root, List *clauses, int varReli
ListCell *l;
Bitmapset **list_attnums;
int listidx;
Selectivity sel = 1.0;
Selectivity sel = 1.0;
/* check if there's any stats that might be useful for us. */
if (!has_stats_of_kind(rel->statlist, STATS_EXT_MCV))
@ -1338,7 +1338,10 @@ statext_mcv_clauselist_selectivity(PlannerInfo *root, List *clauses, int varReli
stat = choose_best_statistics(rel->statlist, STATS_EXT_MCV,
list_attnums, list_length(clauses));
/* if no (additional) matching stats could be found then we've nothing to do */
/*
* if no (additional) matching stats could be found then we've nothing
* to do
*/
if (!stat)
break;
@ -1352,8 +1355,8 @@ statext_mcv_clauselist_selectivity(PlannerInfo *root, List *clauses, int varReli
foreach(l, clauses)
{
/*
* If the clause is compatible with the selected statistics, mark it
* as estimated and add it to the list to estimate.
* If the clause is compatible with the selected statistics, mark
* it as estimated and add it to the list to estimate.
*/
if (list_attnums[listidx] != NULL &&
bms_is_subset(list_attnums[listidx], stat->keys))
@ -1371,15 +1374,15 @@ statext_mcv_clauselist_selectivity(PlannerInfo *root, List *clauses, int varReli
/*
* First compute "simple" selectivity, i.e. without the extended
* statistics, and essentially assuming independence of the
* columns/clauses. We'll then use the various selectivities computed from
* MCV list to improve it.
* columns/clauses. We'll then use the various selectivities computed
* from MCV list to improve it.
*/
simple_sel = clauselist_selectivity_simple(root, stat_clauses, varRelid,
jointype, sjinfo, NULL);
jointype, sjinfo, NULL);
/*
* Now compute the multi-column estimate from the MCV list, along with the
* other selectivities (base & total selectivity).
* Now compute the multi-column estimate from the MCV list, along with
* the other selectivities (base & total selectivity).
*/
mcv_sel = mcv_clauselist_selectivity(root, stat, stat_clauses, varRelid,
jointype, sjinfo, rel,
@ -1393,7 +1396,10 @@ statext_mcv_clauselist_selectivity(PlannerInfo *root, List *clauses, int varReli
if (other_sel > 1.0 - mcv_totalsel)
other_sel = 1.0 - mcv_totalsel;
/* Overall selectivity is the combination of MCV and non-MCV estimates. */
/*
* Overall selectivity is the combination of MCV and non-MCV
* estimates.
*/
stat_sel = mcv_sel + other_sel;
CLAMP_PROBABILITY(stat_sel);
@ -1454,11 +1460,11 @@ statext_clauselist_selectivity(PlannerInfo *root, List *clauses, int varRelid,
bool
examine_clause_args(List *args, Var **varp, Const **cstp, bool *varonleftp)
{
Var *var;
Const *cst;
bool varonleft;
Node *leftop,
*rightop;
Var *var;
Const *cst;
bool varonleft;
Node *leftop,
*rightop;
/* enforced by statext_is_compatible_clause_internal */
Assert(list_length(args) == 2);
@ -1473,13 +1479,13 @@ examine_clause_args(List *args, Var **varp, Const **cstp, bool *varonleftp)
if (IsA(rightop, RelabelType))
rightop = (Node *) ((RelabelType *) rightop)->arg;
if (IsA(leftop, Var) && IsA(rightop, Const))
if (IsA(leftop, Var) &&IsA(rightop, Const))
{
var = (Var *) leftop;
cst = (Const *) rightop;
varonleft = true;
}
else if (IsA(leftop, Const) && IsA(rightop, Var))
else if (IsA(leftop, Const) &&IsA(rightop, Var))
{
var = (Var *) rightop;
cst = (Const *) leftop;

View File

@ -210,8 +210,8 @@ statext_mcv_build(int numrows, HeapTuple *rows, Bitmapset *attrs,
groups = build_distinct_groups(nitems, items, mss, &ngroups);
/*
* Maximum number of MCV items to store, based on the statistics target
* we computed for the statistics object (from target set for the object
* Maximum number of MCV items to store, based on the statistics target we
* computed for the statistics object (from target set for the object
* itself, attributes and the system default). In any case, we can't keep
* more groups than we have available.
*/
@ -261,7 +261,7 @@ statext_mcv_build(int numrows, HeapTuple *rows, Bitmapset *attrs,
{
int j;
SortItem key;
MultiSortSupport tmp;
MultiSortSupport tmp;
/* frequencies for values in each attribute */
SortItem **freqs;
@ -463,7 +463,7 @@ build_distinct_groups(int numrows, SortItem *items, MultiSortSupport mss,
static int
sort_item_compare(const void *a, const void *b, void *arg)
{
SortSupport ssup = (SortSupport) arg;
SortSupport ssup = (SortSupport) arg;
SortItem *ia = (SortItem *) a;
SortItem *ib = (SortItem *) b;
@ -499,7 +499,7 @@ build_column_frequencies(SortItem *groups, int ngroups,
/* allocate arrays for all columns as a single chunk */
ptr = palloc(MAXALIGN(sizeof(SortItem *) * mss->ndims) +
mss->ndims * MAXALIGN(sizeof(SortItem) * ngroups));
mss->ndims * MAXALIGN(sizeof(SortItem) * ngroups));
/* initial array of pointers */
result = (SortItem **) ptr;
@ -507,7 +507,7 @@ build_column_frequencies(SortItem *groups, int ngroups,
for (dim = 0; dim < mss->ndims; dim++)
{
SortSupport ssup = &mss->ssup[dim];
SortSupport ssup = &mss->ssup[dim];
/* array of values for a single column */
result[dim] = (SortItem *) ptr;
@ -528,15 +528,15 @@ build_column_frequencies(SortItem *groups, int ngroups,
/*
* Identify distinct values, compute frequency (there might be
* multiple MCV items containing this value, so we need to sum
* counts from all of them.
* multiple MCV items containing this value, so we need to sum counts
* from all of them.
*/
ncounts[dim] = 1;
for (i = 1; i < ngroups; i++)
{
if (sort_item_compare(&result[dim][i-1], &result[dim][i], ssup) == 0)
if (sort_item_compare(&result[dim][i - 1], &result[dim][i], ssup) == 0)
{
result[dim][ncounts[dim]-1].count += result[dim][i].count;
result[dim][ncounts[dim] - 1].count += result[dim][i].count;
continue;
}
@ -723,23 +723,23 @@ statext_mcv_serialize(MCVList *mcvlist, VacAttrStats **stats)
*/
info[dim].nvalues = ndistinct;
if (info[dim].typbyval) /* by-value data types */
if (info[dim].typbyval) /* by-value data types */
{
info[dim].nbytes = info[dim].nvalues * info[dim].typlen;
/*
* We copy the data into the MCV item during deserialization, so
* we don't need to allocate any extra space.
*/
*/
info[dim].nbytes_aligned = 0;
}
else if (info[dim].typlen > 0) /* fixed-length by-ref */
else if (info[dim].typlen > 0) /* fixed-length by-ref */
{
/*
* We don't care about alignment in the serialized data, so we
* pack the data as much as possible. But we also track how much
* data will be needed after deserialization, and in that case
* we need to account for alignment of each item.
* data will be needed after deserialization, and in that case we
* need to account for alignment of each item.
*
* Note: As the items are fixed-length, we could easily compute
* this during deserialization, but we do it here anyway.
@ -765,8 +765,8 @@ statext_mcv_serialize(MCVList *mcvlist, VacAttrStats **stats)
/* serialized length (uint32 length + data) */
len = VARSIZE_ANY_EXHDR(values[dim][i]);
info[dim].nbytes += sizeof(uint32); /* length */
info[dim].nbytes += len; /* value (no header) */
info[dim].nbytes += sizeof(uint32); /* length */
info[dim].nbytes += len; /* value (no header) */
/*
* During deserialization we'll build regular varlena values
@ -792,8 +792,8 @@ statext_mcv_serialize(MCVList *mcvlist, VacAttrStats **stats)
/* c-strings include terminator, so +1 byte */
len = strlen(DatumGetCString(values[dim][i])) + 1;
info[dim].nbytes += sizeof(uint32); /* length */
info[dim].nbytes += len; /* value */
info[dim].nbytes += sizeof(uint32); /* length */
info[dim].nbytes += len; /* value */
/* space needed for properly aligned deserialized copies */
info[dim].nbytes_aligned += MAXALIGN(len);
@ -809,9 +809,9 @@ statext_mcv_serialize(MCVList *mcvlist, VacAttrStats **stats)
* whole serialized MCV list (varlena header, MCV header, dimension info
* for each attribute, deduplicated values and items).
*/
total_length = (3 * sizeof(uint32)) /* magic + type + nitems */
+ sizeof(AttrNumber) /* ndimensions */
+ (ndims * sizeof(Oid)); /* attribute types */
total_length = (3 * sizeof(uint32)) /* magic + type + nitems */
+ sizeof(AttrNumber) /* ndimensions */
+ (ndims * sizeof(Oid)); /* attribute types */
/* dimension info */
total_length += ndims * sizeof(DimensionInfo);
@ -954,7 +954,8 @@ statext_mcv_serialize(MCVList *mcvlist, VacAttrStats **stats)
info[dim].nvalues, sizeof(Datum),
compare_scalars_simple, &ssup[dim]);
Assert(value != NULL); /* serialization or deduplication error */
Assert(value != NULL); /* serialization or deduplication
* error */
/* compute index within the deduplicated array */
index = (uint16) (value - values[dim]);
@ -1147,8 +1148,8 @@ statext_mcv_deserialize(bytea *data)
* serialized data - it's not aligned properly, and it may disappear while
* we're still using the MCV list, e.g. due to catcache release.
*
* We do care about alignment here, because we will allocate all the pieces
* at once, but then use pointers to different parts.
* We do care about alignment here, because we will allocate all the
* pieces at once, but then use pointers to different parts.
*/
mcvlen = MAXALIGN(offsetof(MCVList, items) + (sizeof(MCVItem) * nitems));
@ -1291,7 +1292,7 @@ statext_mcv_deserialize(bytea *data)
/* finally translate the indexes (for non-NULL only) */
for (dim = 0; dim < ndims; dim++)
{
uint16 index;
uint16 index;
memcpy(&index, ptr, sizeof(uint16));
ptr += sizeof(uint16);
@ -1377,7 +1378,8 @@ pg_stats_ext_mcvlist_items(PG_FUNCTION_ARGS)
/* stuff done on every call of the function */
funcctx = SRF_PERCALL_SETUP();
if (funcctx->call_cntr < funcctx->max_calls) /* do when there is more left to send */
if (funcctx->call_cntr < funcctx->max_calls) /* do when there is more
* left to send */
{
Datum values[5];
bool nulls[5];
@ -1400,10 +1402,10 @@ pg_stats_ext_mcvlist_items(PG_FUNCTION_ARGS)
{
astate_nulls = accumArrayResult(astate_nulls,
BoolGetDatum(item->isnull[i]),
false,
BOOLOID,
CurrentMemoryContext);
BoolGetDatum(item->isnull[i]),
false,
BOOLOID,
CurrentMemoryContext);
if (!item->isnull[i])
{
@ -1421,17 +1423,17 @@ pg_stats_ext_mcvlist_items(PG_FUNCTION_ARGS)
txt = cstring_to_text(DatumGetPointer(val));
astate_values = accumArrayResult(astate_values,
PointerGetDatum(txt),
false,
TEXTOID,
CurrentMemoryContext);
PointerGetDatum(txt),
false,
TEXTOID,
CurrentMemoryContext);
}
else
astate_values = accumArrayResult(astate_values,
(Datum) 0,
true,
TEXTOID,
CurrentMemoryContext);
(Datum) 0,
true,
TEXTOID,
CurrentMemoryContext);
}
values[0] = Int32GetDatum(funcctx->call_cntr);
@ -1606,9 +1608,9 @@ mcv_get_match_bitmap(PlannerInfo *root, List *clauses,
MCVItem *item = &mcvlist->items[i];
/*
* When the MCV item or the Const value is NULL we can treat
* this as a mismatch. We must not call the operator because
* of strictness.
* When the MCV item or the Const value is NULL we can
* treat this as a mismatch. We must not call the operator
* because of strictness.
*/
if (item->isnull[idx] || cst->constisnull)
{
@ -1631,10 +1633,10 @@ mcv_get_match_bitmap(PlannerInfo *root, List *clauses,
*
* We don't store collations used to build the statistics,
* but we can use the collation for the attribute itself,
* as stored in varcollid. We do reset the statistics after
* a type change (including collation change), so this is
* OK. We may need to relax this after allowing extended
* statistics on expressions.
* as stored in varcollid. We do reset the statistics
* after a type change (including collation change), so
* this is OK. We may need to relax this after allowing
* extended statistics on expressions.
*/
if (varonleft)
match = DatumGetBool(FunctionCall2Coll(&opproc,
@ -1654,7 +1656,7 @@ mcv_get_match_bitmap(PlannerInfo *root, List *clauses,
}
else if (IsA(clause, ScalarArrayOpExpr))
{
ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) clause;
ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) clause;
FmgrInfo opproc;
/* valid only after examine_clause_args returns true */
@ -1707,9 +1709,9 @@ mcv_get_match_bitmap(PlannerInfo *root, List *clauses,
MCVItem *item = &mcvlist->items[i];
/*
* When the MCV item or the Const value is NULL we can treat
* this as a mismatch. We must not call the operator because
* of strictness.
* When the MCV item or the Const value is NULL we can
* treat this as a mismatch. We must not call the operator
* because of strictness.
*/
if (item->isnull[idx] || cst->constisnull)
{
@ -1727,9 +1729,9 @@ mcv_get_match_bitmap(PlannerInfo *root, List *clauses,
for (j = 0; j < num_elems; j++)
{
Datum elem_value = elem_values[j];
bool elem_isnull = elem_nulls[j];
bool elem_match;
Datum elem_value = elem_values[j];
bool elem_isnull = elem_nulls[j];
bool elem_match;
/* NULL values always evaluate as not matching. */
if (elem_isnull)

View File

@ -2994,7 +2994,7 @@ DropRelFileNodeBuffers(RelFileNodeBackend rnode, ForkNumber *forkNum,
bufHdr->tag.forkNum == forkNum[j] &&
bufHdr->tag.blockNum >= firstDelBlock[j])
{
InvalidateBuffer(bufHdr); /* releases spinlock */
InvalidateBuffer(bufHdr); /* releases spinlock */
break;
}
}

View File

@ -287,7 +287,8 @@ FreeSpaceMapPrepareTruncateRel(Relation rel, BlockNumber nblocks)
{
buf = fsm_readbuf(rel, first_removed_address, false);
if (!BufferIsValid(buf))
return InvalidBlockNumber; /* nothing to do; the FSM was already smaller */
return InvalidBlockNumber; /* nothing to do; the FSM was already
* smaller */
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
/* NO EREPORT(ERROR) from here till changes are logged */
@ -317,7 +318,8 @@ FreeSpaceMapPrepareTruncateRel(Relation rel, BlockNumber nblocks)
{
new_nfsmblocks = fsm_logical_to_physical(first_removed_address);
if (smgrnblocks(rel->rd_smgr, FSM_FORKNUM) <= new_nfsmblocks)
return InvalidBlockNumber; /* nothing to do; the FSM was already smaller */
return InvalidBlockNumber; /* nothing to do; the FSM was already
* smaller */
}
return new_nfsmblocks;

View File

@ -1099,9 +1099,9 @@ WaitEventAdjustKqueue(WaitEventSet *set, WaitEvent *event, int old_events)
!PostmasterIsAlive())
{
/*
* The extra PostmasterIsAliveInternal() check prevents false alarms on
* systems that give a different value for getppid() while being traced
* by a debugger.
* The extra PostmasterIsAliveInternal() check prevents false alarms
* on systems that give a different value for getppid() while being
* traced by a debugger.
*/
set->report_postmaster_not_running = true;
}

View File

@ -434,7 +434,7 @@ ProcArrayEndTransaction(PGPROC *proc, TransactionId latestXid)
pgxact->xmin = InvalidTransactionId;
/* must be cleared with xid/xmin: */
pgxact->vacuumFlags &= ~PROC_VACUUM_STATE_MASK;
proc->delayChkpt = false; /* be sure this is cleared in abort */
proc->delayChkpt = false; /* be sure this is cleared in abort */
proc->recoveryConflictPending = false;
Assert(pgxact->nxids == 0);
@ -456,7 +456,7 @@ ProcArrayEndTransactionInternal(PGPROC *proc, PGXACT *pgxact,
pgxact->xmin = InvalidTransactionId;
/* must be cleared with xid/xmin: */
pgxact->vacuumFlags &= ~PROC_VACUUM_STATE_MASK;
proc->delayChkpt = false; /* be sure this is cleared in abort */
proc->delayChkpt = false; /* be sure this is cleared in abort */
proc->recoveryConflictPending = false;
/* Clear the subtransaction-XID cache too while holding the lock */

View File

@ -60,8 +60,8 @@ typedef struct
{
pid_t pss_pid;
sig_atomic_t pss_signalFlags[NUM_PROCSIGNALS];
pg_atomic_uint64 pss_barrierGeneration;
pg_atomic_uint32 pss_barrierCheckMask;
pg_atomic_uint64 pss_barrierGeneration;
pg_atomic_uint32 pss_barrierCheckMask;
} ProcSignalSlot;
/*
@ -72,8 +72,8 @@ typedef struct
*/
typedef struct
{
pg_atomic_uint64 psh_barrierGeneration;
ProcSignalSlot psh_slot[FLEXIBLE_ARRAY_MEMBER];
pg_atomic_uint64 psh_barrierGeneration;
ProcSignalSlot psh_slot[FLEXIBLE_ARRAY_MEMBER];
} ProcSignalHeader;
/*
@ -101,7 +101,7 @@ static void ProcessBarrierPlaceholder(void);
Size
ProcSignalShmemSize(void)
{
Size size;
Size size;
size = mul_size(NumProcSignalSlots, sizeof(ProcSignalSlot));
size = add_size(size, offsetof(ProcSignalHeader, psh_slot));
@ -124,7 +124,7 @@ ProcSignalShmemInit(void)
/* If we're first, initialize. */
if (!found)
{
int i;
int i;
pg_atomic_init_u64(&ProcSignal->psh_barrierGeneration, 0);
@ -168,13 +168,13 @@ ProcSignalInit(int pss_idx)
/*
* Initialize barrier state. Since we're a brand-new process, there
* shouldn't be any leftover backend-private state that needs to be
* updated. Therefore, we can broadcast the latest barrier generation
* and disregard any previously-set check bits.
* updated. Therefore, we can broadcast the latest barrier generation and
* disregard any previously-set check bits.
*
* NB: This only works if this initialization happens early enough in the
* startup sequence that we haven't yet cached any state that might need
* to be invalidated. That's also why we have a memory barrier here, to
* be sure that any later reads of memory happen strictly after this.
* to be invalidated. That's also why we have a memory barrier here, to be
* sure that any later reads of memory happen strictly after this.
*/
pg_atomic_write_u32(&slot->pss_barrierCheckMask, 0);
barrier_generation =
@ -320,16 +320,16 @@ SendProcSignal(pid_t pid, ProcSignalReason reason, BackendId backendId)
uint64
EmitProcSignalBarrier(ProcSignalBarrierType type)
{
uint64 flagbit = UINT64CONST(1) << (uint64) type;
uint64 generation;
uint64 flagbit = UINT64CONST(1) << (uint64) type;
uint64 generation;
/*
* Set all the flags.
*
* Note that pg_atomic_fetch_or_u32 has full barrier semantics, so this
* is totally ordered with respect to anything the caller did before, and
* anything that we do afterwards. (This is also true of the later call
* to pg_atomic_add_fetch_u64.)
* Note that pg_atomic_fetch_or_u32 has full barrier semantics, so this is
* totally ordered with respect to anything the caller did before, and
* anything that we do afterwards. (This is also true of the later call to
* pg_atomic_add_fetch_u64.)
*/
for (int i = 0; i < NumProcSignalSlots; i++)
{
@ -349,18 +349,18 @@ EmitProcSignalBarrier(ProcSignalBarrierType type)
* generation.
*
* Concurrency is not a problem here. Backends that have exited don't
* matter, and new backends that have joined since we entered this function
* must already have current state, since the caller is responsible for
* making sure that the relevant state is entirely visible before calling
* this function in the first place. We still have to wake them up -
* because we can't distinguish between such backends and older backends
* that need to update state - but they won't actually need to change
* any state.
* matter, and new backends that have joined since we entered this
* function must already have current state, since the caller is
* responsible for making sure that the relevant state is entirely visible
* before calling this function in the first place. We still have to wake
* them up - because we can't distinguish between such backends and older
* backends that need to update state - but they won't actually need to
* change any state.
*/
for (int i = NumProcSignalSlots - 1; i >= 0; i--)
{
volatile ProcSignalSlot *slot = &ProcSignal->psh_slot[i];
pid_t pid = slot->pss_pid;
pid_t pid = slot->pss_pid;
if (pid != 0)
kill(pid, SIGUSR1);
@ -381,17 +381,17 @@ EmitProcSignalBarrier(ProcSignalBarrierType type)
void
WaitForProcSignalBarrier(uint64 generation)
{
long timeout = 125L;
long timeout = 125L;
for (int i = NumProcSignalSlots - 1; i >= 0; i--)
{
volatile ProcSignalSlot *slot = &ProcSignal->psh_slot[i];
uint64 oldval;
uint64 oldval;
oldval = pg_atomic_read_u64(&slot->pss_barrierGeneration);
while (oldval < generation)
{
int events;
int events;
CHECK_FOR_INTERRUPTS();
@ -408,11 +408,11 @@ WaitForProcSignalBarrier(uint64 generation)
}
/*
* The caller is probably calling this function because it wants to
* read the shared state or perform further writes to shared state once
* all backends are known to have absorbed the barrier. However, the
* read of pss_barrierGeneration was performed unlocked; insert a memory
* barrier to separate it from whatever follows.
* The caller is probably calling this function because it wants to read
* the shared state or perform further writes to shared state once all
* backends are known to have absorbed the barrier. However, the read of
* pss_barrierGeneration was performed unlocked; insert a memory barrier
* to separate it from whatever follows.
*/
pg_memory_barrier();
}
@ -428,8 +428,8 @@ WaitForProcSignalBarrier(uint64 generation)
void
ProcessProcSignalBarrier(void)
{
uint64 generation;
uint32 flags;
uint64 generation;
uint32 flags;
/* Exit quickly if there's no work to do. */
if (!ProcSignalBarrierPending)
@ -437,8 +437,8 @@ ProcessProcSignalBarrier(void)
ProcSignalBarrierPending = false;
/*
* Read the current barrier generation, and then get the flags that
* are set for this backend. Note that pg_atomic_exchange_u32 is a full
* Read the current barrier generation, and then get the flags that are
* set for this backend. Note that pg_atomic_exchange_u32 is a full
* barrier, so we're guaranteed that the read of the barrier generation
* happens before we atomically extract the flags, and that any subsequent
* state changes happen afterward.
@ -477,8 +477,8 @@ ProcessBarrierPlaceholder(void)
* machinery gets committed. Rename PROCSIGNAL_BARRIER_PLACEHOLDER to
* PROCSIGNAL_BARRIER_SOMETHING_ELSE where SOMETHING_ELSE is something
* appropriately descriptive. Get rid of this function and instead have
* ProcessBarrierSomethingElse. Most likely, that function should live
* in the file pertaining to that subsystem, rather than here.
* ProcessBarrierSomethingElse. Most likely, that function should live in
* the file pertaining to that subsystem, rather than here.
*/
}
@ -515,8 +515,8 @@ CheckProcSignalBarrier(void)
if (slot != NULL)
{
uint64 mygen;
uint64 curgen;
uint64 mygen;
uint64 curgen;
mygen = pg_atomic_read_u64(&slot->pss_barrierGeneration);
curgen = pg_atomic_read_u64(&ProcSignal->psh_barrierGeneration);

View File

@ -461,7 +461,7 @@ ShmemInitStruct(const char *name, Size size, bool *foundPtr)
}
else
{
Size allocated_size;
Size allocated_size;
/* It isn't in the table yet. allocate and initialize it */
structPtr = ShmemAllocRaw(size, &allocated_size);
@ -539,7 +539,7 @@ pg_get_shmem_allocations(PG_FUNCTION_ARGS)
MemoryContext oldcontext;
HASH_SEQ_STATUS hstat;
ShmemIndexEnt *ent;
Size named_allocated = 0;
Size named_allocated = 0;
Datum values[PG_GET_SHMEM_SIZES_COLS];
bool nulls[PG_GET_SHMEM_SIZES_COLS];

View File

@ -1035,7 +1035,7 @@ LockAcquireExtended(const LOCKTAG *locktag,
found_conflict = true;
else
found_conflict = LockCheckConflicts(lockMethodTable, lockmode,
lock, proclock);
lock, proclock);
if (!found_conflict)
{

View File

@ -553,7 +553,7 @@ smgrnblocks(SMgrRelation reln, ForkNumber forknum)
void
smgrtruncate(SMgrRelation reln, ForkNumber *forknum, int nforks, BlockNumber *nblocks)
{
int i;
int i;
/*
* Get rid of any buffers for the about-to-be-deleted blocks. bufmgr will
@ -580,11 +580,11 @@ smgrtruncate(SMgrRelation reln, ForkNumber *forknum, int nforks, BlockNumber *nb
/*
* We might as well update the local smgr_fsm_nblocks and
* smgr_vm_nblocks settings. The smgr cache inval message that
* this function sent will cause other backends to invalidate
* their copies of smgr_fsm_nblocks and smgr_vm_nblocks,
* and these ones too at the next command boundary.
* But these ensure they aren't outright wrong until then.
* smgr_vm_nblocks settings. The smgr cache inval message that this
* function sent will cause other backends to invalidate their copies
* of smgr_fsm_nblocks and smgr_vm_nblocks, and these ones too at the
* next command boundary. But these ensure they aren't outright wrong
* until then.
*/
if (forknum[i] == FSM_FORKNUM)
reln->smgr_fsm_nblocks = nblocks[i];

View File

@ -224,8 +224,8 @@ ClassifyUtilityCommandAsReadOnly(Node *parsetree)
/*
* Surprisingly, ALTER SYSTEM meets all our definitions of
* read-only: it changes nothing that affects the output of
* pg_dump, it doesn't write WAL or imperil the application
* of future WAL, and it doesn't depend on any state that needs
* pg_dump, it doesn't write WAL or imperil the application of
* future WAL, and it doesn't depend on any state that needs
* to be synchronized with parallel workers.
*
* So, despite the fact that it writes to a file, it's read
@ -271,10 +271,10 @@ ClassifyUtilityCommandAsReadOnly(Node *parsetree)
case T_VariableSetStmt:
{
/*
* These modify only backend-local state, so they're OK to
* run in a read-only transaction or on a standby. However,
* they are disallowed in parallel mode, because they either
* rely upon or modify backend-local state that might not be
* These modify only backend-local state, so they're OK to run
* in a read-only transaction or on a standby. However, they
* are disallowed in parallel mode, because they either rely
* upon or modify backend-local state that might not be
* synchronized among cooperating backends.
*/
return COMMAND_OK_IN_RECOVERY | COMMAND_OK_IN_READ_ONLY_TXN;
@ -285,8 +285,9 @@ ClassifyUtilityCommandAsReadOnly(Node *parsetree)
case T_VacuumStmt:
{
/*
* These commands write WAL, so they're not strictly read-only,
* and running them in parallel workers isn't supported.
* These commands write WAL, so they're not strictly
* read-only, and running them in parallel workers isn't
* supported.
*
* However, they don't change the database state in a way that
* would affect pg_dump output, so it's fine to run them in a
@ -299,11 +300,11 @@ ClassifyUtilityCommandAsReadOnly(Node *parsetree)
case T_CopyStmt:
{
CopyStmt *stmt = (CopyStmt *) parsetree;
CopyStmt *stmt = (CopyStmt *) parsetree;
/*
* You might think that COPY FROM is not at all read only,
* but it's OK to copy into a temporary table, because that
* You might think that COPY FROM is not at all read only, but
* it's OK to copy into a temporary table, because that
* wouldn't change the output of pg_dump. If the target table
* turns out to be non-temporary, DoCopy itself will call
* PreventCommandIfReadOnly.
@ -318,8 +319,8 @@ ClassifyUtilityCommandAsReadOnly(Node *parsetree)
case T_VariableShowStmt:
{
/*
* These commands don't modify any data and are safe to run
* in a parallel worker.
* These commands don't modify any data and are safe to run in
* a parallel worker.
*/
return COMMAND_IS_STRICTLY_READ_ONLY;
}
@ -329,8 +330,8 @@ ClassifyUtilityCommandAsReadOnly(Node *parsetree)
{
/*
* NOTIFY requires an XID assignment, so it can't be permitted
* on a standby. Perhaps LISTEN could, since without NOTIFY
* it would be OK to just do nothing, at least until promotion,
* on a standby. Perhaps LISTEN could, since without NOTIFY it
* would be OK to just do nothing, at least until promotion,
* but we currently prohibit it lest the user get the wrong
* idea.
*
@ -342,11 +343,12 @@ ClassifyUtilityCommandAsReadOnly(Node *parsetree)
case T_LockStmt:
{
LockStmt *stmt = (LockStmt *) parsetree;
LockStmt *stmt = (LockStmt *) parsetree;
/*
* Only weaker locker modes are allowed during recovery. The
* restrictions here must match those in LockAcquireExtended().
* restrictions here must match those in
* LockAcquireExtended().
*/
if (stmt->mode > RowExclusiveLock)
return COMMAND_OK_IN_READ_ONLY_TXN;
@ -359,10 +361,10 @@ ClassifyUtilityCommandAsReadOnly(Node *parsetree)
TransactionStmt *stmt = (TransactionStmt *) parsetree;
/*
* PREPARE, COMMIT PREPARED, and ROLLBACK PREPARED all
* write WAL, so they're not read-only in the strict sense;
* but the first and third do not change pg_dump output, so
* they're OK in a read-only transactions.
* PREPARE, COMMIT PREPARED, and ROLLBACK PREPARED all write
* WAL, so they're not read-only in the strict sense; but the
* first and third do not change pg_dump output, so they're OK
* in a read-only transactions.
*
* We also consider COMMIT PREPARED to be OK in a read-only
* transaction environment, by way of exception.

View File

@ -3862,7 +3862,7 @@ EncodeDateOnly(struct pg_tm *tm, int style, char *str)
case USE_XSD_DATES:
/* compatible with ISO date formats */
str = pg_ultostr_zeropad(str,
(tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4);
(tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4);
*str++ = '-';
str = pg_ultostr_zeropad(str, tm->tm_mon, 2);
*str++ = '-';
@ -3885,7 +3885,7 @@ EncodeDateOnly(struct pg_tm *tm, int style, char *str)
}
*str++ = '/';
str = pg_ultostr_zeropad(str,
(tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4);
(tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4);
break;
case USE_GERMAN_DATES:
@ -3895,7 +3895,7 @@ EncodeDateOnly(struct pg_tm *tm, int style, char *str)
str = pg_ultostr_zeropad(str, tm->tm_mon, 2);
*str++ = '.';
str = pg_ultostr_zeropad(str,
(tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4);
(tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4);
break;
case USE_POSTGRES_DATES:
@ -3915,7 +3915,7 @@ EncodeDateOnly(struct pg_tm *tm, int style, char *str)
}
*str++ = '-';
str = pg_ultostr_zeropad(str,
(tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4);
(tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4);
break;
}
@ -3985,7 +3985,7 @@ EncodeDateTime(struct pg_tm *tm, fsec_t fsec, bool print_tz, int tz, const char
case USE_XSD_DATES:
/* Compatible with ISO-8601 date formats */
str = pg_ultostr_zeropad(str,
(tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4);
(tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4);
*str++ = '-';
str = pg_ultostr_zeropad(str, tm->tm_mon, 2);
*str++ = '-';
@ -4016,7 +4016,7 @@ EncodeDateTime(struct pg_tm *tm, fsec_t fsec, bool print_tz, int tz, const char
}
*str++ = '/';
str = pg_ultostr_zeropad(str,
(tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4);
(tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4);
*str++ = ' ';
str = pg_ultostr_zeropad(str, tm->tm_hour, 2);
*str++ = ':';
@ -4048,7 +4048,7 @@ EncodeDateTime(struct pg_tm *tm, fsec_t fsec, bool print_tz, int tz, const char
str = pg_ultostr_zeropad(str, tm->tm_mon, 2);
*str++ = '.';
str = pg_ultostr_zeropad(str,
(tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4);
(tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4);
*str++ = ' ';
str = pg_ultostr_zeropad(str, tm->tm_hour, 2);
*str++ = ':';
@ -4098,7 +4098,7 @@ EncodeDateTime(struct pg_tm *tm, fsec_t fsec, bool print_tz, int tz, const char
str = AppendTimestampSeconds(str, tm, fsec);
*str++ = ' ';
str = pg_ultostr_zeropad(str,
(tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4);
(tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4);
if (print_tz)
{

View File

@ -1213,8 +1213,9 @@ int2abs(PG_FUNCTION_ARGS)
static int32
int4gcd_internal(int32 arg1, int32 arg2)
{
int32 swap;
int32 a1, a2;
int32 swap;
int32 a1,
a2;
/*
* Put the greater absolute value in arg1.
@ -1273,9 +1274,9 @@ int4gcd_internal(int32 arg1, int32 arg2)
Datum
int4gcd(PG_FUNCTION_ARGS)
{
int32 arg1 = PG_GETARG_INT32(0);
int32 arg2 = PG_GETARG_INT32(1);
int32 result;
int32 arg1 = PG_GETARG_INT32(0);
int32 arg2 = PG_GETARG_INT32(1);
int32 result;
result = int4gcd_internal(arg1, arg2);
@ -1288,10 +1289,10 @@ int4gcd(PG_FUNCTION_ARGS)
Datum
int4lcm(PG_FUNCTION_ARGS)
{
int32 arg1 = PG_GETARG_INT32(0);
int32 arg2 = PG_GETARG_INT32(1);
int32 gcd;
int32 result;
int32 arg1 = PG_GETARG_INT32(0);
int32 arg2 = PG_GETARG_INT32(1);
int32 gcd;
int32 result;
/*
* Handle lcm(x, 0) = lcm(0, x) = 0 as a special case. This prevents a

View File

@ -684,8 +684,9 @@ int8mod(PG_FUNCTION_ARGS)
static int64
int8gcd_internal(int64 arg1, int64 arg2)
{
int64 swap;
int64 a1, a2;
int64 swap;
int64 a1,
a2;
/*
* Put the greater absolute value in arg1.
@ -744,9 +745,9 @@ int8gcd_internal(int64 arg1, int64 arg2)
Datum
int8gcd(PG_FUNCTION_ARGS)
{
int64 arg1 = PG_GETARG_INT64(0);
int64 arg2 = PG_GETARG_INT64(1);
int64 result;
int64 arg1 = PG_GETARG_INT64(0);
int64 arg2 = PG_GETARG_INT64(1);
int64 result;
result = int8gcd_internal(arg1, arg2);
@ -759,10 +760,10 @@ int8gcd(PG_FUNCTION_ARGS)
Datum
int8lcm(PG_FUNCTION_ARGS)
{
int64 arg1 = PG_GETARG_INT64(0);
int64 arg2 = PG_GETARG_INT64(1);
int64 gcd;
int64 result;
int64 arg1 = PG_GETARG_INT64(0);
int64 arg2 = PG_GETARG_INT64(1);
int64 gcd;
int64 result;
/*
* Handle lcm(x, 0) = lcm(0, x) = 0 as a special case. This prevents a

View File

@ -1337,7 +1337,7 @@ json_typeof(PG_FUNCTION_ARGS)
JsonLexContext *lex;
JsonTokenType tok;
char *type;
JsonParseErrorType result;
JsonParseErrorType result;
json = PG_GETARG_TEXT_PP(0);
lex = makeJsonLexContext(json, false);

View File

@ -496,7 +496,7 @@ static void transform_string_values_scalar(void *state, char *token, JsonTokenTy
void
pg_parse_json_or_ereport(JsonLexContext *lex, JsonSemAction *sem)
{
JsonParseErrorType result;
JsonParseErrorType result;
result = pg_parse_json(lex, sem);
if (result != JSON_SUCCESS)
@ -4524,8 +4524,8 @@ jsonb_set_lax(PG_FUNCTION_ARGS)
/* ArrayType *path = PG_GETARG_ARRAYTYPE_P(1); */
/* Jsonb *newval = PG_GETARG_JSONB_P(2); */
/* bool create = PG_GETARG_BOOL(3); */
text *handle_null;
char *handle_val;
text *handle_null;
char *handle_val;
if (PG_ARGISNULL(0) || PG_ARGISNULL(1) || PG_ARGISNULL(3))
PG_RETURN_NULL();
@ -4537,13 +4537,13 @@ jsonb_set_lax(PG_FUNCTION_ARGS)
errmsg("null_value_treatment must be \"delete_key\", \"return_target\", \"use_json_null\", or \"raise_exception\"")));
/* if the new value isn't an SQL NULL just call jsonb_set */
if (! PG_ARGISNULL(2))
if (!PG_ARGISNULL(2))
return jsonb_set(fcinfo);
handle_null = PG_GETARG_TEXT_P(4);
handle_val = text_to_cstring(handle_null);
if (strcmp(handle_val,"raise_exception") == 0)
if (strcmp(handle_val, "raise_exception") == 0)
{
ereport(ERROR,
(errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
@ -4554,7 +4554,7 @@ jsonb_set_lax(PG_FUNCTION_ARGS)
}
else if (strcmp(handle_val, "use_json_null") == 0)
{
Datum newval;
Datum newval;
newval = DirectFunctionCall1(jsonb_in, CStringGetDatum("null"));
@ -4569,6 +4569,7 @@ jsonb_set_lax(PG_FUNCTION_ARGS)
else if (strcmp(handle_val, "return_target") == 0)
{
Jsonb *in = PG_GETARG_JSONB_P(0);
PG_RETURN_JSONB_P(in);
}
else

View File

@ -45,11 +45,13 @@ static inline int
decimalLength32(const uint32 v)
{
int t;
static uint32 PowersOfTen[] =
{1, 10, 100,
1000, 10000, 100000,
1000000, 10000000, 100000000,
1000000000};
static const uint32 PowersOfTen[] = {
1, 10, 100,
1000, 10000, 100000,
1000000, 10000000, 100000000,
1000000000
};
/*
* Compute base-10 logarithm by dividing the base-2 logarithm by a
* good-enough approximation of the base-2 logarithm of 10
@ -62,16 +64,16 @@ static inline int
decimalLength64(const uint64 v)
{
int t;
static uint64 PowersOfTen[] = {
UINT64CONST(1), UINT64CONST(10),
UINT64CONST(100), UINT64CONST(1000),
UINT64CONST(10000), UINT64CONST(100000),
UINT64CONST(1000000), UINT64CONST(10000000),
UINT64CONST(100000000), UINT64CONST(1000000000),
UINT64CONST(10000000000), UINT64CONST(100000000000),
UINT64CONST(1000000000000), UINT64CONST(10000000000000),
UINT64CONST(100000000000000), UINT64CONST(1000000000000000),
UINT64CONST(10000000000000000), UINT64CONST(100000000000000000),
static const uint64 PowersOfTen[] = {
UINT64CONST(1), UINT64CONST(10),
UINT64CONST(100), UINT64CONST(1000),
UINT64CONST(10000), UINT64CONST(100000),
UINT64CONST(1000000), UINT64CONST(10000000),
UINT64CONST(100000000), UINT64CONST(1000000000),
UINT64CONST(10000000000), UINT64CONST(100000000000),
UINT64CONST(1000000000000), UINT64CONST(10000000000000),
UINT64CONST(100000000000000), UINT64CONST(1000000000000000),
UINT64CONST(10000000000000000), UINT64CONST(100000000000000000),
UINT64CONST(1000000000000000000), UINT64CONST(10000000000000000000)
};

View File

@ -1698,12 +1698,12 @@ Datum
pg_stat_get_slru(PG_FUNCTION_ARGS)
{
#define PG_STAT_GET_SLRU_COLS 9
ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
TupleDesc tupdesc;
ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
TupleDesc tupdesc;
Tuplestorestate *tupstore;
MemoryContext per_query_ctx;
MemoryContext oldcontext;
int i;
MemoryContext per_query_ctx;
MemoryContext oldcontext;
int i;
PgStat_SLRUStats *stats;
/* check to see if caller supports us returning a tuplestore */
@ -1733,12 +1733,12 @@ pg_stat_get_slru(PG_FUNCTION_ARGS)
/* request SLRU stats from the stat collector */
stats = pgstat_fetch_slru();
for (i = 0; ; i++)
for (i = 0;; i++)
{
/* for each row */
Datum values[PG_STAT_GET_SLRU_COLS];
bool nulls[PG_STAT_GET_SLRU_COLS];
PgStat_SLRUStats stat = stats[i];
PgStat_SLRUStats stat = stats[i];
const char *name;
name = pgstat_slru_name(i);

View File

@ -47,11 +47,11 @@ static int16 getQuadrant(TypeCacheEntry *typcache, const RangeType *centroid,
const RangeType *tst);
static int bound_cmp(const void *a, const void *b, void *arg);
static int adjacent_inner_consistent(TypeCacheEntry *typcache,
const RangeBound *arg, const RangeBound *centroid,
const RangeBound *prev);
static int adjacent_cmp_bounds(TypeCacheEntry *typcache, const RangeBound *arg,
const RangeBound *centroid);
static int adjacent_inner_consistent(TypeCacheEntry *typcache,
const RangeBound *arg, const RangeBound *centroid,
const RangeBound *prev);
static int adjacent_cmp_bounds(TypeCacheEntry *typcache, const RangeBound *arg,
const RangeBound *centroid);
/*
* SP-GiST 'config' interface function.

View File

@ -1152,7 +1152,8 @@ regcollationout(PG_FUNCTION_ARGS)
char *nspname;
/*
* Would this collation be found by regcollationin? If not, qualify it.
* Would this collation be found by regcollationin? If not,
* qualify it.
*/
if (CollationIsVisible(collationid))
nspname = NULL;

View File

@ -10611,7 +10611,7 @@ generate_opclass_name(Oid opclass)
initStringInfo(&buf);
get_opclass_name(opclass, InvalidOid, &buf);
return &buf.data[1]; /* get_opclass_name() prepends space */
return &buf.data[1]; /* get_opclass_name() prepends space */
}
/*
@ -11313,8 +11313,8 @@ get_reloptions(StringInfo buf, Datum reloptions)
char *value;
/*
* Each array element should have the form name=value. If the "="
* is missing for some reason, treat it like an empty value.
* Each array element should have the form name=value. If the "=" is
* missing for some reason, treat it like an empty value.
*/
name = option;
separator = strchr(option, '=');
@ -11332,11 +11332,11 @@ get_reloptions(StringInfo buf, Datum reloptions)
/*
* In general we need to quote the value; but to avoid unnecessary
* clutter, do not quote if it is an identifier that would not
* need quoting. (We could also allow numbers, but that is a bit
* trickier than it looks --- for example, are leading zeroes
* significant? We don't want to assume very much here about what
* custom reloptions might mean.)
* clutter, do not quote if it is an identifier that would not need
* quoting. (We could also allow numbers, but that is a bit trickier
* than it looks --- for example, are leading zeroes significant? We
* don't want to assume very much here about what custom reloptions
* might mean.)
*/
if (quote_identifier(value) == value)
appendStringInfoString(buf, value);

View File

@ -307,7 +307,7 @@ checkcondition_arr(void *checkval, QueryOperand *val, ExecPhraseData *data)
static bool
checkcondition_bit(void *checkval, QueryOperand *val, ExecPhraseData *data)
{
void *key = (SignTSVector *) checkval;
void *key = (SignTSVector *) checkval;
/*
* we are not able to find a prefix in signature tree
@ -499,8 +499,8 @@ hemdistsign(BITVECP a, BITVECP b, int siglen)
static int
hemdist(SignTSVector *a, SignTSVector *b)
{
int siglena = GETSIGLEN(a);
int siglenb = GETSIGLEN(b);
int siglena = GETSIGLEN(a);
int siglenb = GETSIGLEN(b);
if (ISALLTRUE(a))
{
@ -721,9 +721,9 @@ gtsvector_picksplit(PG_FUNCTION_ARGS)
else
size_alpha = SIGLENBIT(siglen) -
sizebitvec((cache[j].allistrue) ?
GETSIGN(datum_l) :
GETSIGN(cache[j].sign),
siglen);
GETSIGN(datum_l) :
GETSIGN(cache[j].sign),
siglen);
}
else
size_alpha = hemdistsign(cache[j].sign, GETSIGN(datum_l), siglen);

View File

@ -937,7 +937,7 @@ get_attoptions(Oid relid, int16 attnum)
if (isnull)
result = (Datum) 0;
else
result = datumCopy(attopts, false, -1); /* text[] */
result = datumCopy(attopts, false, -1); /* text[] */
ReleaseSysCache(tuple);
@ -3297,9 +3297,9 @@ get_index_column_opclass(Oid index_oid, int attno)
bool
get_index_isreplident(Oid index_oid)
{
HeapTuple tuple;
Form_pg_index rd_index;
bool result;
HeapTuple tuple;
Form_pg_index rd_index;
bool result;
tuple = SearchSysCache1(INDEXRELID, ObjectIdGetDatum(index_oid));
if (!HeapTupleIsValid(tuple))

Some files were not shown because too many files have changed in this diff Show More