Initial pgindent and pgperltidy run for v13.

Includes some manual cleanup of places that pgindent messed up,
most of which weren't per project style anyway.

Notably, it seems some people didn't absorb the style rules of
commit c9d297751, because there were a bunch of new occurrences
of function calls with a newline just after the left paren, all
with faulty expectations about how the rest of the call would get
indented.
This commit is contained in:
Tom Lane 2020-05-14 13:06:38 -04:00
parent 1255466f83
commit 5cbfce562f
198 changed files with 2019 additions and 1786 deletions

View File

@ -2681,6 +2681,7 @@ JumbleRowMarks(pgssJumbleState *jstate, List *rowMarks)
foreach(lc, rowMarks)
{
RowMarkClause *rowmark = lfirst_node(RowMarkClause, lc);
if (!rowmark->pushedDown)
{
APP_JUMB(rowmark->rti);

View File

@ -303,8 +303,8 @@ connect_pg_server(ForeignServer *server, UserMapping *user)
/*
* Check that non-superuser has used password to establish connection;
* otherwise, he's piggybacking on the postgres server's user
* identity. See also dblink_security_check() in contrib/dblink
* and check_conn_params.
* identity. See also dblink_security_check() in contrib/dblink and
* check_conn_params.
*/
if (!superuser_arg(user->userid) && UserMappingPasswordRequired(user) &&
!PQconnectionUsedPassword(conn))
@ -361,6 +361,7 @@ UserMappingPasswordRequired(UserMapping *user)
foreach(cell, user->options)
{
DefElem *def = (DefElem *) lfirst(cell);
if (strcmp(def->defname, "password_required") == 0)
return defGetBoolean(def);
}

View File

@ -149,8 +149,8 @@ postgres_fdw_validator(PG_FUNCTION_ARGS)
/*
* Only the superuser may set this option on a user mapping, or
* alter a user mapping on which this option is set. We allow a
* user to clear this option if it's set - in fact, we don't have a
* choice since we can't see the old mapping when validating an
* user to clear this option if it's set - in fact, we don't have
* a choice since we can't see the old mapping when validating an
* alter.
*/
if (!superuser() && !pw_required)
@ -204,11 +204,11 @@ InitPgFdwOptions(void)
{"fetch_size", ForeignServerRelationId, false},
{"fetch_size", ForeignTableRelationId, false},
{"password_required", UserMappingRelationId, false},
/*
* sslcert and sslkey are in fact libpq options, but we repeat them
* here to allow them to appear in both foreign server context
* (when we generate libpq options) and user mapping context
* (from here).
* here to allow them to appear in both foreign server context (when
* we generate libpq options) and user mapping context (from here).
*/
{"sslcert", UserMappingRelationId, true},
{"sslkey", UserMappingRelationId, true},

View File

@ -55,7 +55,9 @@ while (<$feat>)
print " <entry>$feature_id</entry>\n";
}
print " <entry>",
defined($feature_packages{$feature_id}) ? $feature_packages{$feature_id} : "",
defined($feature_packages{$feature_id})
? $feature_packages{$feature_id}
: "",
"</entry>\n";
if ($subfeature_id)
{

View File

@ -220,8 +220,8 @@ detoast_attr_slice(struct varlena *attr,
/*
* For compressed values, we need to fetch enough slices to decompress
* at least the requested part (when a prefix is requested). Otherwise,
* just fetch all slices.
* at least the requested part (when a prefix is requested).
* Otherwise, just fetch all slices.
*/
if (slicelength > 0 && sliceoffset >= 0)
{
@ -343,7 +343,8 @@ toast_fetch_datum(struct varlena *attr)
SET_VARSIZE(result, attrsize + VARHDRSZ);
if (attrsize == 0)
return result; /* Probably shouldn't happen, but just in case. */
return result; /* Probably shouldn't happen, but just in
* case. */
/*
* Open the toast relation and its indexes
@ -387,9 +388,9 @@ toast_fetch_datum_slice(struct varlena *attr, int32 sliceoffset,
VARATT_EXTERNAL_GET_POINTER(toast_pointer, attr);
/*
* It's nonsense to fetch slices of a compressed datum unless when it's
* a prefix -- this isn't lo_* we can't return a compressed datum which
* is meaningful to toast later.
* It's nonsense to fetch slices of a compressed datum unless when it's a
* prefix -- this isn't lo_* we can't return a compressed datum which is
* meaningful to toast later.
*/
Assert(!VARATT_EXTERNAL_IS_COMPRESSED(toast_pointer) || 0 == sliceoffset);

View File

@ -1347,8 +1347,8 @@ gistfinishsplit(GISTInsertState *state, GISTInsertStack *stack,
left->buf, right->buf, false, false))
{
/*
* If the parent page was split, the existing downlink might
* have moved.
* If the parent page was split, the existing downlink might have
* moved.
*/
stack->downlinkoffnum = InvalidOffsetNumber;
}
@ -1371,7 +1371,8 @@ gistfinishsplit(GISTInsertState *state, GISTInsertStack *stack,
stack->downlinkoffnum,
left->buf, right->buf,
true, /* Unlock parent */
unlockbuf /* Unlock stack->buffer if caller wants that */
unlockbuf /* Unlock stack->buffer if caller wants
* that */
))
{
/*

View File

@ -144,6 +144,7 @@ _hash_spareindex(uint32 num_bucket)
{
uint32 splitpoint_group;
uint32 splitpoint_phases;
splitpoint_group = pg_ceil_log2_32(num_bucket);
if (splitpoint_group < HASH_SPLITPOINT_GROUPS_WITH_ONE_PHASE)

View File

@ -2153,8 +2153,8 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
RelationPutHeapTuple(relation, buffer, heaptuples[ndone], false);
/*
* Note that heap_multi_insert is not used for catalog tuples yet,
* but this will cover the gap once that is the case.
* Note that heap_multi_insert is not used for catalog tuples yet, but
* this will cover the gap once that is the case.
*/
if (needwal && need_cids)
log_heap_new_cid(relation, heaptuples[ndone]);

View File

@ -1566,7 +1566,8 @@ _bt_pagedel(Relation rel, Buffer leafbuf, TransactionId *oldestBtpoXact)
BTScanInsert itup_key;
ItemId itemid;
IndexTuple targetkey;
BlockNumber leftsib, leafblkno;
BlockNumber leftsib,
leafblkno;
Buffer sleafbuf;
itemid = PageGetItemId(page, P_HIKEY);
@ -1777,6 +1778,7 @@ _bt_mark_page_halfdead(Relation rel, Buffer leafbuf, BTStack stack)
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
#ifdef USE_ASSERT_CHECKING
/*
* This is just an assertion because _bt_lock_subtree_parent should have
* guaranteed tuple has the expected contents
@ -2368,7 +2370,8 @@ _bt_lock_subtree_parent(Relation rel, BlockNumber child, BTStack stack,
Buffer *subtreeparent, OffsetNumber *poffset,
BlockNumber *topparent, BlockNumber *topparentrightsib)
{
BlockNumber parent, leftsibparent;
BlockNumber parent,
leftsibparent;
OffsetNumber parentoffset,
maxoff;
Buffer pbuf;
@ -2439,9 +2442,9 @@ _bt_lock_subtree_parent(Relation rel, BlockNumber child, BTStack stack,
/*
* Now make sure that the parent deletion is itself safe by examining the
* child's grandparent page. Recurse, passing the parent page as the
* child page (child's grandparent is the parent on the next level up).
* If parent deletion is unsafe, then child deletion must also be unsafe
* (in which case caller cannot delete any pages at all).
* child page (child's grandparent is the parent on the next level up). If
* parent deletion is unsafe, then child deletion must also be unsafe (in
* which case caller cannot delete any pages at all).
*/
*topparent = parent;
*topparentrightsib = opaque->btpo_next;

View File

@ -1091,7 +1091,8 @@ btvacuumpage(BTVacState *vstate, BlockNumber scanblkno)
void *callback_state = vstate->callback_state;
Relation rel = info->index;
bool attempt_pagedel;
BlockNumber blkno, backtrack_to;
BlockNumber blkno,
backtrack_to;
Buffer buf;
Page page;
BTPageOpaque opaque;

View File

@ -156,11 +156,10 @@ _bt_search(Relation rel, BTScanInsert key, Buffer *bufP, int access,
/*
* We need to save the location of the pivot tuple we chose in the
* parent page on a stack. If we need to split a page, we'll use
* the stack to work back up to its parent page. If caller ends up
* splitting a page one level down, it usually ends up inserting a
* new pivot tuple/downlink immediately after the location recorded
* here.
* parent page on a stack. If we need to split a page, we'll use the
* stack to work back up to its parent page. If caller ends up
* splitting a page one level down, it usually ends up inserting a new
* pivot tuple/downlink immediately after the location recorded here.
*/
new_stack = (BTStack) palloc(sizeof(BTStackData));
new_stack->bts_blkno = par_blkno;

View File

@ -6341,7 +6341,11 @@ StartupXLOG(void)
switch (ControlFile->state)
{
case DB_SHUTDOWNED:
/* This is the expected case, so don't be chatty in standalone mode */
/*
* This is the expected case, so don't be chatty in standalone
* mode
*/
ereport(IsPostmasterEnvironment ? LOG : NOTICE,
(errmsg("database system was shut down at %s",
str_time(ControlFile->time))));
@ -10691,8 +10695,8 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p,
datadirpathlen = strlen(DataDir);
/*
* Report that we are now estimating the total backup size
* if we're streaming base backup as requested by pg_basebackup
* Report that we are now estimating the total backup size if we're
* streaming base backup as requested by pg_basebackup
*/
if (tablespaces)
pgstat_progress_update_param(PROGRESS_BASEBACKUP_PHASE,
@ -12154,8 +12158,8 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
*/
/*
* We should be able to move to XLOG_FROM_STREAM
* only in standby mode.
* We should be able to move to XLOG_FROM_STREAM only in
* standby mode.
*/
Assert(StandbyMode);
@ -12242,6 +12246,7 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
{
case XLOG_FROM_ARCHIVE:
case XLOG_FROM_PG_WAL:
/*
* WAL receiver must not be running when reading WAL from
* archive or pg_wal.
@ -12279,8 +12284,8 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
bool havedata;
/*
* We should be able to move to XLOG_FROM_STREAM
* only in standby mode.
* We should be able to move to XLOG_FROM_STREAM only in
* standby mode.
*/
Assert(StandbyMode);

View File

@ -64,8 +64,8 @@ RestoreArchivedFile(char *path, const char *xlogfname,
TimeLineID restartTli;
/*
* Ignore restore_command when not in archive recovery (meaning
* we are in crash recovery).
* Ignore restore_command when not in archive recovery (meaning we are in
* crash recovery).
*/
if (!ArchiveRecoveryRequested)
goto not_available;

View File

@ -1610,8 +1610,8 @@ XLogRecGetFullXid(XLogReaderState *record)
epoch = EpochFromFullTransactionId(ShmemVariableCache->nextFullXid);
/*
* If xid is numerically greater than next_xid, it has to be from the
* last epoch.
* If xid is numerically greater than next_xid, it has to be from the last
* epoch.
*/
if (unlikely(xid > next_xid))
--epoch;

View File

@ -3487,9 +3487,9 @@ restart:
*
* Because of this arrangement, we can correctly catch all
* relevant relations by adding to 'parent_cons' all rows with
* valid conparentid, and to the 'oids' list all rows with a
* zero conparentid. If any oids are added to 'oids', redo the
* first loop above by setting 'restart'.
* valid conparentid, and to the 'oids' list all rows with a zero
* conparentid. If any oids are added to 'oids', redo the first
* loop above by setting 'restart'.
*/
if (OidIsValid(con->conparentid))
parent_cons = list_append_unique_oid(parent_cons,

View File

@ -1324,6 +1324,7 @@ shdepDropOwned(List *roleids, DropBehavior behavior)
sdepForm->objid);
break;
case SHARED_DEPENDENCY_POLICY:
/*
* Try to remove role from policy; if unable to, remove
* policy.
@ -1335,6 +1336,7 @@ shdepDropOwned(List *roleids, DropBehavior behavior)
obj.classId = sdepForm->classid;
obj.objectId = sdepForm->objid;
obj.objectSubId = sdepForm->objsubid;
/*
* Acquire lock on object, then verify this dependency
* is still relevant. If not, the object might have

View File

@ -367,9 +367,9 @@ RelationTruncate(Relation rel, BlockNumber nblocks)
smgrtruncate(rel->rd_smgr, forks, nforks, blocks);
/*
* Update upper-level FSM pages to account for the truncation.
* This is important because the just-truncated pages were likely
* marked as all-free, and would be preferentially selected.
* Update upper-level FSM pages to account for the truncation. This is
* important because the just-truncated pages were likely marked as
* all-free, and would be preferentially selected.
*/
if (need_fsm_vacuum)
FreeSpaceMapVacuumRange(rel, nblocks, InvalidBlockNumber);
@ -995,9 +995,9 @@ smgr_redo(XLogReaderState *record)
smgrtruncate(reln, forks, nforks, blocks);
/*
* Update upper-level FSM pages to account for the truncation.
* This is important because the just-truncated pages were likely
* marked as all-free, and would be preferentially selected.
* Update upper-level FSM pages to account for the truncation. This is
* important because the just-truncated pages were likely marked as
* all-free, and would be preferentially selected.
*/
if (need_fsm_vacuum)
FreeSpaceMapVacuumRange(rel, xlrec->blkno,

View File

@ -72,12 +72,6 @@ typedef struct EventTriggerQueryState
static EventTriggerQueryState *currentEventTriggerState = NULL;
typedef struct
{
const char *obtypename;
bool supported;
} event_trigger_support_data;
/* Support for dropped objects */
typedef struct SQLDropObject
{

View File

@ -2886,8 +2886,8 @@ show_incremental_sort_info(IncrementalSortState *incrsortstate,
* we don't need to do anything if there were 0 full groups.
*
* We still have to continue after this block if there are no full groups,
* though, since it's possible that we have workers that did real work even
* if the leader didn't participate.
* though, since it's possible that we have workers that did real work
* even if the leader didn't participate.
*/
if (fullsortGroupInfo->groupCount > 0)
{
@ -2914,8 +2914,8 @@ show_incremental_sort_info(IncrementalSortState *incrsortstate,
&incrsortstate->shared_info->sinfo[n];
/*
* If a worker hasn't processed any sort groups at all, then exclude
* it from output since it either didn't launch or didn't
* If a worker hasn't processed any sort groups at all, then
* exclude it from output since it either didn't launch or didn't
* contribute anything meaningful.
*/
fullsortGroupInfo = &incsort_info->fullsortGroupInfo;
@ -2923,8 +2923,8 @@ show_incremental_sort_info(IncrementalSortState *incrsortstate,
/*
* Since we never have any prefix groups unless we've first sorted
* a full groups and transitioned modes (copying the tuples into a
* prefix group), we don't need to do anything if there were 0 full
* groups.
* prefix group), we don't need to do anything if there were 0
* full groups.
*/
if (fullsortGroupInfo->groupCount == 0)
continue;

View File

@ -1252,6 +1252,7 @@ assignProcTypes(OpFamilyMember *member, Oid amoid, Oid typeoid,
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
errmsg("btree equal image functions must return boolean")));
/*
* pg_amproc functions are indexed by (lefttype, righttype), but
* an equalimage function can only be called at CREATE INDEX time.

View File

@ -455,9 +455,9 @@ AlterStatistics(AlterStatsStmt *stmt)
stxoid = get_statistics_object_oid(stmt->defnames, stmt->missing_ok);
/*
* If we got here and the OID is not valid, it means the statistics
* does not exist, but the command specified IF EXISTS. So report
* this as a simple NOTICE and we're done.
* If we got here and the OID is not valid, it means the statistics does
* not exist, but the command specified IF EXISTS. So report this as a
* simple NOTICE and we're done.
*/
if (!OidIsValid(stxoid))
{

View File

@ -1265,9 +1265,9 @@ RemoveRelations(DropStmt *drop)
if (drop->concurrent)
{
/*
* Note that for temporary relations this lock may get upgraded
* later on, but as no other session can access a temporary
* relation, this is actually fine.
* Note that for temporary relations this lock may get upgraded later
* on, but as no other session can access a temporary relation, this
* is actually fine.
*/
lockmode = ShareUpdateExclusiveLock;
Assert(drop->removeType == OBJECT_INDEX);
@ -1620,10 +1620,10 @@ ExecuteTruncate(TruncateStmt *stmt)
}
/*
* Inherited TRUNCATE commands perform access
* permission checks on the parent table only.
* So we skip checking the children's permissions
* and don't call truncate_check_perms() here.
* Inherited TRUNCATE commands perform access permission
* checks on the parent table only. So we skip checking the
* children's permissions and don't call
* truncate_check_perms() here.
*/
truncate_check_rel(RelationGetRelid(rel), rel->rd_rel);
truncate_check_activity(rel);
@ -2650,6 +2650,7 @@ MergeAttributes(List *schema, List *supers, char relpersistence,
errmsg("column \"%s\" inherits from generated column but specifies identity",
def->colname)));
}
/*
* If the parent column is not generated, then take whatever
* the child column definition says.

View File

@ -2240,8 +2240,8 @@ ExecBRInsertTriggers(EState *estate, ResultRelInfo *relinfo,
/*
* After a tuple in a partition goes through a trigger, the user
* could have changed the partition key enough that the tuple
* no longer fits the partition. Verify that.
* could have changed the partition key enough that the tuple no
* longer fits the partition. Verify that.
*/
if (trigger->tgisclone &&
!ExecPartitionCheck(relinfo, slot, estate, false))

View File

@ -167,15 +167,16 @@ static Datum ExecJustAssignOuterVarVirt(ExprState *state, ExprContext *econtext,
static Datum ExecJustAssignScanVarVirt(ExprState *state, ExprContext *econtext, bool *isnull);
/* execution helper functions */
static pg_attribute_always_inline void
ExecAggPlainTransByVal(AggState *aggstate, AggStatePerTrans pertrans,
static pg_attribute_always_inline void ExecAggPlainTransByVal(AggState *aggstate,
AggStatePerTrans pertrans,
AggStatePerGroup pergroup,
ExprContext *aggcontext, int setno);
static pg_attribute_always_inline void
ExecAggPlainTransByRef(AggState *aggstate, AggStatePerTrans pertrans,
ExprContext *aggcontext,
int setno);
static pg_attribute_always_inline void ExecAggPlainTransByRef(AggState *aggstate,
AggStatePerTrans pertrans,
AggStatePerGroup pergroup,
ExprContext *aggcontext, int setno);
ExprContext *aggcontext,
int setno);
/*
* Prepare ExprState for interpreted execution.
@ -1611,8 +1612,8 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
EEO_CASE(EEOP_AGG_PLAIN_PERGROUP_NULLCHECK)
{
AggState *aggstate = castNode(AggState, state->parent);
AggStatePerGroup pergroup_allaggs = aggstate->all_pergroups
[op->d.agg_plain_pergroup_nullcheck.setoff];
AggStatePerGroup pergroup_allaggs =
aggstate->all_pergroups[op->d.agg_plain_pergroup_nullcheck.setoff];
if (pergroup_allaggs == NULL)
EEO_JUMP(op->d.agg_plain_pergroup_nullcheck.jumpnull);
@ -1636,9 +1637,8 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
{
AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
AggStatePerGroup pergroup = &aggstate->all_pergroups
[op->d.agg_trans.setoff]
[op->d.agg_trans.transno];
AggStatePerGroup pergroup =
&aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(pertrans->transtypeByVal);
@ -1665,9 +1665,8 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
{
AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
AggStatePerGroup pergroup = &aggstate->all_pergroups
[op->d.agg_trans.setoff]
[op->d.agg_trans.transno];
AggStatePerGroup pergroup =
&aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(pertrans->transtypeByVal);
@ -1684,9 +1683,8 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
{
AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
AggStatePerGroup pergroup = &aggstate->all_pergroups
[op->d.agg_trans.setoff]
[op->d.agg_trans.transno];
AggStatePerGroup pergroup =
&aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(pertrans->transtypeByVal);
@ -1702,9 +1700,8 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
{
AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
AggStatePerGroup pergroup = &aggstate->all_pergroups
[op->d.agg_trans.setoff]
[op->d.agg_trans.transno];
AggStatePerGroup pergroup =
&aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(!pertrans->transtypeByVal);
@ -1724,9 +1721,8 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
{
AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
AggStatePerGroup pergroup = &aggstate->all_pergroups
[op->d.agg_trans.setoff]
[op->d.agg_trans.transno];
AggStatePerGroup pergroup =
&aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(!pertrans->transtypeByVal);
@ -1742,9 +1738,8 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
{
AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
AggStatePerGroup pergroup = &aggstate->all_pergroups
[op->d.agg_trans.setoff]
[op->d.agg_trans.transno];
AggStatePerGroup pergroup =
&aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(!pertrans->transtypeByVal);
@ -4302,21 +4297,20 @@ ExecAggPlainTransByRef(AggState *aggstate, AggStatePerTrans pertrans,
newVal = FunctionCallInvoke(fcinfo);
/*
* For pass-by-ref datatype, must copy the new value into
* aggcontext and free the prior transValue. But if transfn
* returned a pointer to its first input, we don't need to do
* anything. Also, if transfn returned a pointer to a R/W
* expanded object that is already a child of the aggcontext,
* assume we can adopt that value without copying it.
* For pass-by-ref datatype, must copy the new value into aggcontext and
* free the prior transValue. But if transfn returned a pointer to its
* first input, we don't need to do anything. Also, if transfn returned a
* pointer to a R/W expanded object that is already a child of the
* aggcontext, assume we can adopt that value without copying it.
*
* It's safe to compare newVal with pergroup->transValue without
* regard for either being NULL, because ExecAggTransReparent()
* takes care to set transValue to 0 when NULL. Otherwise we could
* end up accidentally not reparenting, when the transValue has
* the same numerical value as newValue, despite being NULL. This
* is a somewhat hot path, making it undesirable to instead solve
* this with another branch for the common case of the transition
* function returning its (modified) input argument.
* It's safe to compare newVal with pergroup->transValue without regard
* for either being NULL, because ExecAggTransReparent() takes care to set
* transValue to 0 when NULL. Otherwise we could end up accidentally not
* reparenting, when the transValue has the same numerical value as
* newValue, despite being NULL. This is a somewhat hot path, making it
* undesirable to instead solve this with another branch for the common
* case of the transition function returning its (modified) input
* argument.
*/
if (DatumGetPointer(newVal) != DatumGetPointer(pergroup->transValue))
newVal = ExecAggTransReparent(aggstate, pertrans,

View File

@ -786,14 +786,14 @@ advance_transition_function(AggState *aggstate,
* pointer to a R/W expanded object that is already a child of the
* aggcontext, assume we can adopt that value without copying it.
*
* It's safe to compare newVal with pergroup->transValue without
* regard for either being NULL, because ExecAggTransReparent()
* takes care to set transValue to 0 when NULL. Otherwise we could
* end up accidentally not reparenting, when the transValue has
* the same numerical value as newValue, despite being NULL. This
* is a somewhat hot path, making it undesirable to instead solve
* this with another branch for the common case of the transition
* function returning its (modified) input argument.
* It's safe to compare newVal with pergroup->transValue without regard
* for either being NULL, because ExecAggTransReparent() takes care to set
* transValue to 0 when NULL. Otherwise we could end up accidentally not
* reparenting, when the transValue has the same numerical value as
* newValue, despite being NULL. This is a somewhat hot path, making it
* undesirable to instead solve this with another branch for the common
* case of the transition function returning its (modified) input
* argument.
*/
if (!pertrans->transtypeByVal &&
DatumGetPointer(newVal) != DatumGetPointer(pergroupstate->transValue))
@ -1457,8 +1457,9 @@ build_hash_tables(AggState *aggstate)
memory = aggstate->hash_mem_limit / aggstate->num_hashes;
/* choose reasonable number of buckets per hashtable */
nbuckets = hash_choose_num_buckets(
aggstate->hashentrysize, perhash->aggnode->numGroups, memory);
nbuckets = hash_choose_num_buckets(aggstate->hashentrysize,
perhash->aggnode->numGroups,
memory);
build_hash_table(aggstate, setno, nbuckets);
}
@ -1489,8 +1490,7 @@ build_hash_table(AggState *aggstate, int setno, long nbuckets)
*/
additionalsize = aggstate->numtrans * sizeof(AggStatePerGroupData);
perhash->hashtable = BuildTupleHashTableExt(
&aggstate->ss.ps,
perhash->hashtable = BuildTupleHashTableExt(&aggstate->ss.ps,
perhash->hashslot->tts_tupleDescriptor,
perhash->numCols,
perhash->hashGrpColIdxHash,
@ -1723,8 +1723,9 @@ hashagg_recompile_expressions(AggState *aggstate, bool minslot, bool nullcheck)
aggstate->ss.ps.outeropsfixed = true;
}
phase->evaltrans_cache[i][j] = ExecBuildAggTrans(
aggstate, phase, dosort, dohash, nullcheck);
phase->evaltrans_cache[i][j] = ExecBuildAggTrans(aggstate, phase,
dosort, dohash,
nullcheck);
/* change back */
aggstate->ss.ps.outerops = outerops;
@ -1762,9 +1763,8 @@ hash_agg_set_limits(double hashentrysize, uint64 input_groups, int used_bits,
/*
* Calculate expected memory requirements for spilling, which is the size
* of the buffers needed for all the tapes that need to be open at
* once. Then, subtract that from the memory available for holding hash
* tables.
* of the buffers needed for all the tapes that need to be open at once.
* Then, subtract that from the memory available for holding hash tables.
*/
npartitions = hash_choose_num_partitions(input_groups,
hashentrysize,
@ -1804,10 +1804,10 @@ static void
hash_agg_check_limits(AggState *aggstate)
{
uint64 ngroups = aggstate->hash_ngroups_current;
Size meta_mem = MemoryContextMemAllocated(
aggstate->hash_metacxt, true);
Size hash_mem = MemoryContextMemAllocated(
aggstate->hashcontext->ecxt_per_tuple_memory, true);
Size meta_mem = MemoryContextMemAllocated(aggstate->hash_metacxt,
true);
Size hash_mem = MemoryContextMemAllocated(aggstate->hashcontext->ecxt_per_tuple_memory,
true);
/*
* Don't spill unless there's at least one group in the hash table so we
@ -1841,8 +1841,7 @@ hash_agg_enter_spill_mode(AggState *aggstate)
hashagg_tapeinfo_init(aggstate);
aggstate->hash_spills = palloc(
sizeof(HashAggSpill) * aggstate->num_hashes);
aggstate->hash_spills = palloc(sizeof(HashAggSpill) * aggstate->num_hashes);
for (int setno = 0; setno < aggstate->num_hashes; setno++)
{
@ -1878,8 +1877,7 @@ hash_agg_update_metrics(AggState *aggstate, bool from_tape, int npartitions)
meta_mem = MemoryContextMemAllocated(aggstate->hash_metacxt, true);
/* memory for the group keys and transition states */
hash_mem = MemoryContextMemAllocated(
aggstate->hashcontext->ecxt_per_tuple_memory, true);
hash_mem = MemoryContextMemAllocated(aggstate->hashcontext->ecxt_per_tuple_memory, true);
/* memory for read/write tape buffers, if spilled */
buffer_mem = npartitions * HASHAGG_WRITE_BUFFER_SIZE;
@ -1894,8 +1892,7 @@ hash_agg_update_metrics(AggState *aggstate, bool from_tape, int npartitions)
/* update disk usage */
if (aggstate->hash_tapeinfo != NULL)
{
uint64 disk_used = LogicalTapeSetBlocks(
aggstate->hash_tapeinfo->tapeset) * (BLCKSZ / 1024);
uint64 disk_used = LogicalTapeSetBlocks(aggstate->hash_tapeinfo->tapeset) * (BLCKSZ / 1024);
if (aggstate->hash_disk_used < disk_used)
aggstate->hash_disk_used = disk_used;
@ -2623,7 +2620,8 @@ agg_refill_hash_table(AggState *aggstate)
LogicalTapeRewindForRead(tapeinfo->tapeset, batch->input_tapenum,
HASHAGG_READ_BUFFER_SIZE);
for (;;) {
for (;;)
{
TupleTableSlot *slot = aggstate->hash_spill_slot;
MinimalTuple tuple;
uint32 hash;
@ -2639,8 +2637,8 @@ agg_refill_hash_table(AggState *aggstate)
aggstate->tmpcontext->ecxt_outertuple = slot;
prepare_hash_slot(aggstate);
aggstate->hash_pergroup[batch->setno] = lookup_hash_entry(
aggstate, hash, &in_hash_table);
aggstate->hash_pergroup[batch->setno] =
lookup_hash_entry(aggstate, hash, &in_hash_table);
if (in_hash_table)
{
@ -2899,8 +2897,8 @@ hashagg_tapeinfo_release(HashTapeInfo *tapeinfo, int tapenum)
if (tapeinfo->freetapes_alloc == tapeinfo->nfreetapes)
{
tapeinfo->freetapes_alloc <<= 1;
tapeinfo->freetapes = repalloc(
tapeinfo->freetapes, tapeinfo->freetapes_alloc * sizeof(int));
tapeinfo->freetapes = repalloc(tapeinfo->freetapes,
tapeinfo->freetapes_alloc * sizeof(int));
}
tapeinfo->freetapes[tapeinfo->nfreetapes++] = tapenum;
}
@ -2918,8 +2916,8 @@ hashagg_spill_init(HashAggSpill *spill, HashTapeInfo *tapeinfo, int used_bits,
int npartitions;
int partition_bits;
npartitions = hash_choose_num_partitions(
input_groups, hashentrysize, used_bits, &partition_bits);
npartitions = hash_choose_num_partitions(input_groups, hashentrysize,
used_bits, &partition_bits);
spill->partitions = palloc0(sizeof(int) * npartitions);
spill->ntuples = palloc0(sizeof(int64) * npartitions);
@ -3056,6 +3054,7 @@ hashagg_finish_initial_spills(AggState *aggstate)
for (setno = 0; setno < aggstate->num_hashes; setno++)
{
HashAggSpill *spill = &aggstate->hash_spills[setno];
total_npartitions += spill->npartitions;
hashagg_spill_finish(aggstate, spill, setno);
}
@ -3123,6 +3122,7 @@ hashagg_reset_spill_state(AggState *aggstate)
for (setno = 0; setno < aggstate->num_hashes; setno++)
{
HashAggSpill *spill = &aggstate->hash_spills[setno];
pfree(spill->ntuples);
pfree(spill->partitions);
}
@ -3134,6 +3134,7 @@ hashagg_reset_spill_state(AggState *aggstate)
foreach(lc, aggstate->hash_batches)
{
HashAggBatch *batch = (HashAggBatch *) lfirst(lc);
pfree(batch);
}
list_free(aggstate->hash_batches);
@ -3562,18 +3563,18 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
uint64 totalGroups = 0;
int i;
aggstate->hash_metacxt = AllocSetContextCreate(
aggstate->ss.ps.state->es_query_cxt,
aggstate->hash_metacxt = AllocSetContextCreate(aggstate->ss.ps.state->es_query_cxt,
"HashAgg meta context",
ALLOCSET_DEFAULT_SIZES);
aggstate->hash_spill_slot = ExecInitExtraTupleSlot(
estate, scanDesc, &TTSOpsMinimalTuple);
aggstate->hash_spill_slot = ExecInitExtraTupleSlot(estate, scanDesc,
&TTSOpsMinimalTuple);
/* this is an array of pointers, not structures */
aggstate->hash_pergroup = pergroups;
aggstate->hashentrysize = hash_agg_entry_size(
aggstate->numtrans, outerplan->plan_width, node->transitionSpace);
aggstate->hashentrysize = hash_agg_entry_size(aggstate->numtrans,
outerplan->plan_width,
node->transitionSpace);
/*
* Consider all of the grouping sets together when setting the limits

View File

@ -791,8 +791,8 @@ ExecInitBitmapHeapScan(BitmapHeapScan *node, EState *estate, int eflags)
ExecInitQual(node->bitmapqualorig, (PlanState *) scanstate);
/*
* Maximum number of prefetches for the tablespace if configured, otherwise
* the current value of the effective_io_concurrency GUC.
* Maximum number of prefetches for the tablespace if configured,
* otherwise the current value of the effective_io_concurrency GUC.
*/
scanstate->prefetch_maximum =
get_tablespace_io_concurrency(currentRelation->rd_rel->reltablespace);

View File

@ -97,17 +97,24 @@
* - groupName: the token fullsort or prefixsort
*/
#define INSTRUMENT_SORT_GROUP(node, groupName) \
if (node->ss.ps.instrument != NULL) \
do { \
if ((node)->ss.ps.instrument != NULL) \
{ \
if (node->shared_info && node->am_worker) \
if ((node)->shared_info && (node)->am_worker) \
{ \
Assert(IsParallelWorker()); \
Assert(ParallelWorkerNumber <= node->shared_info->num_workers); \
instrumentSortedGroup(&node->shared_info->sinfo[ParallelWorkerNumber].groupName##GroupInfo, node->groupName##_state); \
} else { \
instrumentSortedGroup(&node->incsort_info.groupName##GroupInfo, node->groupName##_state); \
Assert(ParallelWorkerNumber <= (node)->shared_info->num_workers); \
instrumentSortedGroup(&(node)->shared_info->sinfo[ParallelWorkerNumber].groupName##GroupInfo, \
(node)->groupName##_state); \
} \
}
else \
{ \
instrumentSortedGroup(&(node)->incsort_info.groupName##GroupInfo, \
(node)->groupName##_state); \
} \
} \
} while (0)
/* ----------------------------------------------------------------
* instrumentSortedGroup
@ -122,6 +129,7 @@ instrumentSortedGroup(IncrementalSortGroupInfo *groupInfo,
Tuplesortstate *sortState)
{
TuplesortInstrumentation sort_instr;
groupInfo->groupCount++;
tuplesort_get_stats(sortState, &sort_instr);
@ -444,7 +452,7 @@ switchToPresortedPrefixMode(PlanState *pstate)
SO1_printf("Sorting presorted prefix tuplesort with %ld tuples\n", nTuples);
tuplesort_performsort(node->prefixsort_state);
INSTRUMENT_SORT_GROUP(node, prefixsort)
INSTRUMENT_SORT_GROUP(node, prefixsort);
if (node->bounded)
{
@ -702,7 +710,7 @@ ExecIncrementalSort(PlanState *pstate)
SO1_printf("Sorting fullsort with %ld tuples\n", nTuples);
tuplesort_performsort(fullsort_state);
INSTRUMENT_SORT_GROUP(node, fullsort)
INSTRUMENT_SORT_GROUP(node, fullsort);
SO_printf("Setting execution_status to INCSORT_READFULLSORT (final tuple)\n");
node->execution_status = INCSORT_READFULLSORT;
@ -783,7 +791,7 @@ ExecIncrementalSort(PlanState *pstate)
nTuples);
tuplesort_performsort(fullsort_state);
INSTRUMENT_SORT_GROUP(node, fullsort)
INSTRUMENT_SORT_GROUP(node, fullsort);
SO_printf("Setting execution_status to INCSORT_READFULLSORT (found end of group)\n");
node->execution_status = INCSORT_READFULLSORT;
@ -792,8 +800,8 @@ ExecIncrementalSort(PlanState *pstate)
}
/*
* Unless we've already transitioned modes to reading from the full
* sort state, then we assume that having read at least
* Unless we've already transitioned modes to reading from the
* full sort state, then we assume that having read at least
* DEFAULT_MAX_FULL_SORT_GROUP_SIZE tuples means it's likely we're
* processing a large group of tuples all having equal prefix keys
* (but haven't yet found the final tuple in that prefix key
@ -823,7 +831,7 @@ ExecIncrementalSort(PlanState *pstate)
SO1_printf("Sorting fullsort tuplesort with %ld tuples\n", nTuples);
tuplesort_performsort(fullsort_state);
INSTRUMENT_SORT_GROUP(node, fullsort)
INSTRUMENT_SORT_GROUP(node, fullsort);
/*
* If the full sort tuplesort happened to switch into top-n
@ -849,8 +857,9 @@ ExecIncrementalSort(PlanState *pstate)
/*
* We might have multiple prefix key groups in the full sort
* state, so the mode transition function needs to know that it
* needs to move from the fullsort to presorted prefix sort.
* state, so the mode transition function needs to know that
* it needs to move from the fullsort to presorted prefix
* sort.
*/
node->n_fullsort_remaining = nTuples;
@ -936,7 +945,7 @@ ExecIncrementalSort(PlanState *pstate)
SO1_printf("Sorting presorted prefix tuplesort with >= %ld tuples\n", nTuples);
tuplesort_performsort(node->prefixsort_state);
INSTRUMENT_SORT_GROUP(node, prefixsort)
INSTRUMENT_SORT_GROUP(node, prefixsort);
SO_printf("Setting execution_status to INCSORT_READPREFIXSORT (found end of group)\n");
node->execution_status = INCSORT_READPREFIXSORT;
@ -986,9 +995,9 @@ ExecInitIncrementalSort(IncrementalSort *node, EState *estate, int eflags)
SO_printf("ExecInitIncrementalSort: initializing sort node\n");
/*
* Incremental sort can't be used with EXEC_FLAG_BACKWARD or EXEC_FLAG_MARK,
* because the current sort state contains only one sort batch rather than
* the full result set.
* Incremental sort can't be used with EXEC_FLAG_BACKWARD or
* EXEC_FLAG_MARK, because the current sort state contains only one sort
* batch rather than the full result set.
*/
Assert((eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)) == 0);
@ -1041,8 +1050,8 @@ ExecInitIncrementalSort(IncrementalSort *node, EState *estate, int eflags)
* Initialize child nodes.
*
* Incremental sort does not support backwards scans and mark/restore, so
* we don't bother removing the flags from eflags here. We allow passing
* a REWIND flag, because although incremental sort can't use it, the child
* we don't bother removing the flags from eflags here. We allow passing a
* REWIND flag, because although incremental sort can't use it, the child
* nodes may be able to do something more useful.
*/
outerPlanState(incrsortstate) = ExecInitNode(outerPlan(node), estate, eflags);
@ -1128,10 +1137,10 @@ ExecReScanIncrementalSort(IncrementalSortState *node)
* re-execute the sort along with the child node. Incremental sort itself
* can't do anything smarter, but maybe the child nodes can.
*
* In theory if we've only filled the full sort with one batch (and haven't
* reset it for a new batch yet) then we could efficiently rewind, but
* that seems a narrow enough case that it's not worth handling specially
* at this time.
* In theory if we've only filled the full sort with one batch (and
* haven't reset it for a new batch yet) then we could efficiently rewind,
* but that seems a narrow enough case that it's not worth handling
* specially at this time.
*/
/* must drop pointer to sort result tuple */
@ -1152,10 +1161,10 @@ ExecReScanIncrementalSort(IncrementalSortState *node)
/*
* If we've set up either of the sort states yet, we need to reset them.
* We could end them and null out the pointers, but there's no reason to
* repay the setup cost, and because ExecIncrementalSort guards
* presorted column functions by checking to see if the full sort state
* has been initialized yet, setting the sort states to null here might
* actually cause a leak.
* repay the setup cost, and because ExecIncrementalSort guards presorted
* column functions by checking to see if the full sort state has been
* initialized yet, setting the sort states to null here might actually
* cause a leak.
*/
if (node->fullsort_state != NULL)
{

View File

@ -2060,25 +2060,20 @@ llvm_compile_expr(ExprState *state)
* pergroup_allaggs = aggstate->all_pergroups
* [op->d.agg_plain_pergroup_nullcheck.setoff];
*/
v_aggstatep = LLVMBuildBitCast(
b, v_parent, l_ptr(StructAggState), "");
v_aggstatep = LLVMBuildBitCast(b, v_parent,
l_ptr(StructAggState), "");
v_allpergroupsp = l_load_struct_gep(
b, v_aggstatep,
v_allpergroupsp = l_load_struct_gep(b, v_aggstatep,
FIELDNO_AGGSTATE_ALL_PERGROUPS,
"aggstate.all_pergroups");
v_setoff = l_int32_const(
op->d.agg_plain_pergroup_nullcheck.setoff);
v_setoff = l_int32_const(op->d.agg_plain_pergroup_nullcheck.setoff);
v_pergroup_allaggs = l_load_gep1(
b, v_allpergroupsp, v_setoff, "");
v_pergroup_allaggs = l_load_gep1(b, v_allpergroupsp, v_setoff, "");
LLVMBuildCondBr(
b,
LLVMBuildCondBr(b,
LLVMBuildICmp(b, LLVMIntEQ,
LLVMBuildPtrToInt(
b, v_pergroup_allaggs, TypeSizeT, ""),
LLVMBuildPtrToInt(b, v_pergroup_allaggs, TypeSizeT, ""),
l_sizet_const(0), ""),
opblocks[jumpnull],
opblocks[opno + 1]);

View File

@ -293,8 +293,8 @@ pg_be_scram_init(Port *port,
}
/*
* If the user did not have a valid SCRAM secret, we still go through
* the motions with a mock one, and fail as if the client supplied an
* If the user did not have a valid SCRAM secret, we still go through the
* motions with a mock one, and fail as if the client supplied an
* incorrect password. This is to avoid revealing information to an
* attacker.
*/

View File

@ -1341,6 +1341,7 @@ default_openssl_tls_init(SSL_CTX *context, bool isServerStart)
if (ssl_passphrase_command[0] && ssl_passphrase_command_supports_reload)
SSL_CTX_set_default_passwd_cb(context, ssl_external_passwd_cb);
else
/*
* If reloading and no external command is configured, override
* OpenSSL's default handling of passphrase-protected files,

View File

@ -2751,8 +2751,9 @@ get_useful_pathkeys_for_relation(PlannerInfo *root, RelOptInfo *rel)
List *useful_pathkeys_list = NIL;
/*
* Considering query_pathkeys is always worth it, because it might allow us
* to avoid a total sort when we have a partially presorted path available.
* Considering query_pathkeys is always worth it, because it might allow
* us to avoid a total sort when we have a partially presorted path
* available.
*/
if (root->query_pathkeys)
{
@ -2765,15 +2766,15 @@ get_useful_pathkeys_for_relation(PlannerInfo *root, RelOptInfo *rel)
EquivalenceClass *pathkey_ec = pathkey->pk_eclass;
/*
* We can only build an Incremental Sort for pathkeys which contain
* an EC member in the current relation, so ignore any suffix of the
* list as soon as we find a pathkey without an EC member the
* relation.
* We can only build an Incremental Sort for pathkeys which
* contain an EC member in the current relation, so ignore any
* suffix of the list as soon as we find a pathkey without an EC
* member the relation.
*
* By still returning the prefix of the pathkeys list that does meet
* criteria of EC membership in the current relation, we enable not
* just an incremental sort on the entirety of query_pathkeys but
* also incremental sort below a JOIN.
* By still returning the prefix of the pathkeys list that does
* meet criteria of EC membership in the current relation, we
* enable not just an incremental sort on the entirety of
* query_pathkeys but also incremental sort below a JOIN.
*/
if (!find_em_expr_for_rel(pathkey_ec, rel))
break;
@ -2782,9 +2783,9 @@ get_useful_pathkeys_for_relation(PlannerInfo *root, RelOptInfo *rel)
}
/*
* The whole query_pathkeys list matches, so append it directly, to allow
* comparing pathkeys easily by comparing list pointer. If we have to truncate
* the pathkeys, we gotta do a copy though.
* The whole query_pathkeys list matches, so append it directly, to
* allow comparing pathkeys easily by comparing list pointer. If we
* have to truncate the pathkeys, we gotta do a copy though.
*/
if (npathkeys == list_length(root->query_pathkeys))
useful_pathkeys_list = lappend(useful_pathkeys_list,
@ -2851,7 +2852,8 @@ generate_useful_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_r
/*
* If the path has no ordering at all, then we can't use either
* incremental sort or rely on implict sorting with a gather merge.
* incremental sort or rely on implict sorting with a gather
* merge.
*/
if (subpath->pathkeys == NIL)
continue;
@ -2915,8 +2917,9 @@ generate_useful_gather_paths(PlannerInfo *root, RelOptInfo *rel, bool override_r
Path *tmp;
/*
* We should have already excluded pathkeys of length 1 because
* then presorted_keys > 0 would imply is_sorted was true.
* We should have already excluded pathkeys of length 1
* because then presorted_keys > 0 would imply is_sorted was
* true.
*/
Assert(list_length(useful_pathkeys) != 1);

View File

@ -1821,19 +1821,19 @@ cost_incremental_sort(Path *path,
/*
* Extract presorted keys as list of expressions.
*
* We need to be careful about Vars containing "varno 0" which might
* have been introduced by generate_append_tlist, which would confuse
* We need to be careful about Vars containing "varno 0" which might have
* been introduced by generate_append_tlist, which would confuse
* estimate_num_groups (in fact it'd fail for such expressions). See
* recurse_set_operations which has to deal with the same issue.
*
* Unlike recurse_set_operations we can't access the original target
* list here, and even if we could it's not very clear how useful would
* that be for a set operation combining multiple tables. So we simply
* detect if there are any expressions with "varno 0" and use the
* default DEFAULT_NUM_DISTINCT in that case.
* Unlike recurse_set_operations we can't access the original target list
* here, and even if we could it's not very clear how useful would that be
* for a set operation combining multiple tables. So we simply detect if
* there are any expressions with "varno 0" and use the default
* DEFAULT_NUM_DISTINCT in that case.
*
* We might also use either 1.0 (a single group) or input_tuples (each
* row being a separate group), pretty much the worst and best case for
* We might also use either 1.0 (a single group) or input_tuples (each row
* being a separate group), pretty much the worst and best case for
* incremental sort. But those are extreme cases and using something in
* between seems reasonable. Furthermore, generate_append_tlist is used
* for set operations, which are likely to produce mostly unique output
@ -2403,15 +2403,15 @@ cost_agg(Path *path, PlannerInfo *root,
/*
* Add the disk costs of hash aggregation that spills to disk.
*
* Groups that go into the hash table stay in memory until finalized,
* so spilling and reprocessing tuples doesn't incur additional
* invocations of transCost or finalCost. Furthermore, the computed
* hash value is stored with the spilled tuples, so we don't incur
* extra invocations of the hash function.
* Groups that go into the hash table stay in memory until finalized, so
* spilling and reprocessing tuples doesn't incur additional invocations
* of transCost or finalCost. Furthermore, the computed hash value is
* stored with the spilled tuples, so we don't incur extra invocations of
* the hash function.
*
* Hash Agg begins returning tuples after the first batch is
* complete. Accrue writes (spilled tuples) to startup_cost and to
* total_cost; accrue reads only to total_cost.
* Hash Agg begins returning tuples after the first batch is complete.
* Accrue writes (spilled tuples) to startup_cost and to total_cost;
* accrue reads only to total_cost.
*/
if (aggstrategy == AGG_HASHED || aggstrategy == AGG_MIXED)
{
@ -2430,8 +2430,8 @@ cost_agg(Path *path, PlannerInfo *root,
* than or equal to one, all groups are expected to fit in memory;
* otherwise we expect to spill.
*/
hashentrysize = hash_agg_entry_size(
aggcosts->numAggs, input_width, aggcosts->transitionSpace);
hashentrysize = hash_agg_entry_size(aggcosts->numAggs, input_width,
aggcosts->transitionSpace);
hash_agg_set_limits(hashentrysize, numGroups, 0, &mem_limit,
&ngroups_limit, &num_partitions);

View File

@ -1378,8 +1378,8 @@ try_partitionwise_join(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
Assert(joinrel->consider_partitionwise_join);
/*
* We can not perform partitionwise join if either of the joining relations
* is not partitioned.
* We can not perform partitionwise join if either of the joining
* relations is not partitioned.
*/
if (!IS_PARTITIONED_REL(rel1) || !IS_PARTITIONED_REL(rel2))
return;
@ -1622,8 +1622,8 @@ compute_partition_bounds(PlannerInfo *root, RelOptInfo *rel1,
* partition bounds as inputs, and the partitions with the same
* cardinal positions form the pairs.
*
* Note: even in cases where one or both inputs have merged bounds,
* it would be possible for both the bounds to be exactly the same, but
* Note: even in cases where one or both inputs have merged bounds, it
* would be possible for both the bounds to be exactly the same, but
* it seems unlikely to be worth the cycles to check.
*/
if (!rel1->partbounds_merged &&
@ -1670,8 +1670,8 @@ compute_partition_bounds(PlannerInfo *root, RelOptInfo *rel1,
/*
* If the join rel's partbounds_merged flag is true, it means inputs
* are not guaranteed to have the same partition bounds, therefore we
* can't assume that the partitions at the same cardinal positions form
* the pairs; let get_matching_part_pairs() generate the pairs.
* can't assume that the partitions at the same cardinal positions
* form the pairs; let get_matching_part_pairs() generate the pairs.
* Otherwise, nothing to do since we can assume that.
*/
if (joinrel->partbounds_merged)
@ -1735,9 +1735,10 @@ get_matching_part_pairs(PlannerInfo *root, RelOptInfo *joinrel,
* Get a child rel for rel1 with the relids. Note that we should have
* the child rel even if rel1 is a join rel, because in that case the
* partitions specified in the relids would have matching/overlapping
* boundaries, so the specified partitions should be considered as ones
* to be joined when planning partitionwise joins of rel1, meaning that
* the child rel would have been built by the time we get here.
* boundaries, so the specified partitions should be considered as
* ones to be joined when planning partitionwise joins of rel1,
* meaning that the child rel would have been built by the time we get
* here.
*/
if (rel1_is_simple)
{

View File

@ -4866,8 +4866,7 @@ create_distinct_paths(PlannerInfo *root,
allow_hash = false; /* policy-based decision not to hash */
else
{
Size hashentrysize = hash_agg_entry_size(
0, cheapest_input_path->pathtarget->width, 0);
Size hashentrysize = hash_agg_entry_size(0, cheapest_input_path->pathtarget->width, 0);
allow_hash = enable_hashagg_disk ||
(hashentrysize * numDistinctRows <= work_mem * 1024L);
@ -4986,9 +4985,9 @@ create_ordered_paths(PlannerInfo *root,
else
{
/*
* Try adding an explicit sort, but only to the cheapest total path
* since a full sort should generally add the same cost to all
* paths.
* Try adding an explicit sort, but only to the cheapest total
* path since a full sort should generally add the same cost to
* all paths.
*/
if (input_path == cheapest_input_path)
{
@ -5010,11 +5009,11 @@ create_ordered_paths(PlannerInfo *root,
}
/*
* If incremental sort is enabled, then try it as well. Unlike with
* regular sorts, we can't just look at the cheapest path, because
* the cost of incremental sort depends on how well presorted the
* path is. Additionally incremental sort may enable a cheaper
* startup path to win out despite higher total cost.
* If incremental sort is enabled, then try it as well. Unlike
* with regular sorts, we can't just look at the cheapest path,
* because the cost of incremental sort depends on how well
* presorted the path is. Additionally incremental sort may enable
* a cheaper startup path to win out despite higher total cost.
*/
if (!enable_incrementalsort)
continue;
@ -5110,10 +5109,10 @@ create_ordered_paths(PlannerInfo *root,
double total_groups;
/*
* We don't care if this is the cheapest partial path - we can't
* simply skip it, because it may be partially sorted in which
* case we want to consider adding incremental sort (instead of
* full sort, which is what happens above).
* We don't care if this is the cheapest partial path - we
* can't simply skip it, because it may be partially sorted in
* which case we want to consider adding incremental sort
* (instead of full sort, which is what happens above).
*/
is_sorted = pathkeys_count_contained_in(root->sort_pathkeys,
@ -6607,8 +6606,8 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
else if (parse->hasAggs)
{
/*
* We have aggregation, possibly with plain GROUP BY. Make
* an AggPath.
* We have aggregation, possibly with plain GROUP BY. Make an
* AggPath.
*/
add_path(grouped_rel, (Path *)
create_agg_path(root,
@ -6625,8 +6624,8 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
else if (parse->groupClause)
{
/*
* We have GROUP BY without aggregation or grouping sets.
* Make a GroupPath.
* We have GROUP BY without aggregation or grouping sets. Make
* a GroupPath.
*/
add_path(grouped_rel, (Path *)
create_group_path(root,
@ -6712,8 +6711,9 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
continue;
/*
* We should have already excluded pathkeys of length 1 because
* then presorted_keys > 0 would imply is_sorted was true.
* We should have already excluded pathkeys of length 1
* because then presorted_keys > 0 would imply is_sorted was
* true.
*/
Assert(list_length(root->group_pathkeys) != 1);
@ -7301,8 +7301,8 @@ gather_grouping_paths(PlannerInfo *root, RelOptInfo *rel)
* Consider incremental sort on all partial paths, if enabled.
*
* We can also skip the entire loop when we only have a single-item
* group_pathkeys because then we can't possibly have a presorted
* prefix of the list without having the list be fully sorted.
* group_pathkeys because then we can't possibly have a presorted prefix
* of the list without having the list be fully sorted.
*/
if (!enable_incrementalsort || list_length(root->group_pathkeys) == 1)
return;

View File

@ -955,8 +955,8 @@ partition_bounds_copy(PartitionBoundInfo src,
dest->kind = NULL;
/*
* For hash partitioning, datums array will have two elements - modulus and
* remainder.
* For hash partitioning, datums array will have two elements - modulus
* and remainder.
*/
hash_part = (key->strategy == PARTITION_STRATEGY_HASH);
natts = hash_part ? 2 : partnatts;
@ -1144,10 +1144,10 @@ merge_list_bounds(FmgrInfo *partsupfunc, Oid *partcollation,
/*
* Merge partitions from both sides. In each iteration we compare a pair
* of list values, one from each side, and decide whether the corresponding
* partitions match or not. If the two values match exactly, move to the
* next pair of list values, otherwise move to the next list value on the
* side with a smaller list value.
* of list values, one from each side, and decide whether the
* corresponding partitions match or not. If the two values match
* exactly, move to the next pair of list values, otherwise move to the
* next list value on the side with a smaller list value.
*/
outer_pos = inner_pos = 0;
while (outer_pos < outer_bi->ndatums || inner_pos < inner_bi->ndatums)
@ -1163,8 +1163,8 @@ merge_list_bounds(FmgrInfo *partsupfunc, Oid *partcollation,
if (outer_pos < outer_bi->ndatums)
{
/*
* If the partition on the outer side has been proven empty, ignore
* it and move to the next datum on the outer side.
* If the partition on the outer side has been proven empty,
* ignore it and move to the next datum on the outer side.
*/
outer_index = outer_bi->indexes[outer_pos];
if (is_dummy_partition(outer_rel, outer_index))
@ -1176,8 +1176,8 @@ merge_list_bounds(FmgrInfo *partsupfunc, Oid *partcollation,
if (inner_pos < inner_bi->ndatums)
{
/*
* If the partition on the inner side has been proven empty, ignore
* it and move to the next datum on the inner side.
* If the partition on the inner side has been proven empty,
* ignore it and move to the next datum on the inner side.
*/
inner_index = inner_bi->indexes[inner_pos];
if (is_dummy_partition(inner_rel, inner_index))
@ -1197,10 +1197,10 @@ merge_list_bounds(FmgrInfo *partsupfunc, Oid *partcollation,
* We run this loop till both sides finish. This allows us to avoid
* duplicating code to handle the remaining values on the side which
* finishes later. For that we set the comparison parameter cmpval in
* such a way that it appears as if the side which finishes earlier has
* an extra value higher than any other value on the unfinished side.
* That way we advance the values on the unfinished side till all of
* its values are exhausted.
* such a way that it appears as if the side which finishes earlier
* has an extra value higher than any other value on the unfinished
* side. That way we advance the values on the unfinished side till
* all of its values are exhausted.
*/
if (outer_pos >= outer_bi->ndatums)
cmpval = 1;
@ -1245,10 +1245,10 @@ merge_list_bounds(FmgrInfo *partsupfunc, Oid *partcollation,
Assert(outer_pos < outer_bi->ndatums);
/*
* If the inner side has the default partition, or this is an outer
* join, try to assign a merged partition to the outer partition
* (see process_outer_partition()). Otherwise, the outer partition
* will not contribute to the result.
* If the inner side has the default partition, or this is an
* outer join, try to assign a merged partition to the outer
* partition (see process_outer_partition()). Otherwise, the
* outer partition will not contribute to the result.
*/
if (inner_has_default || IS_OUTER_JOIN(jointype))
{
@ -1281,8 +1281,8 @@ merge_list_bounds(FmgrInfo *partsupfunc, Oid *partcollation,
/*
* If the outer side has the default partition, or this is a FULL
* join, try to assign a merged partition to the inner partition
* (see process_inner_partition()). Otherwise, the inner partition
* will not contribute to the result.
* (see process_inner_partition()). Otherwise, the inner
* partition will not contribute to the result.
*/
if (outer_has_default || jointype == JOIN_FULL)
{
@ -1459,8 +1459,8 @@ merge_range_bounds(int partnatts, FmgrInfo *partsupfuncs,
* partitions match or not. If the two ranges overlap, move to the next
* pair of ranges, otherwise move to the next range on the side with a
* lower range. outer_lb_pos/inner_lb_pos keep track of the positions of
* lower bounds in the datums arrays in the outer/inner PartitionBoundInfos
* respectively.
* lower bounds in the datums arrays in the outer/inner
* PartitionBoundInfos respectively.
*/
outer_lb_pos = inner_lb_pos = 0;
outer_index = get_range_partition(outer_rel, outer_bi, &outer_lb_pos,
@ -1480,10 +1480,10 @@ merge_range_bounds(int partnatts, FmgrInfo *partsupfuncs,
* We run this loop till both sides finish. This allows us to avoid
* duplicating code to handle the remaining ranges on the side which
* finishes later. For that we set the comparison parameter cmpval in
* such a way that it appears as if the side which finishes earlier has
* an extra range higher than any other range on the unfinished side.
* That way we advance the ranges on the unfinished side till all of
* its ranges are exhausted.
* such a way that it appears as if the side which finishes earlier
* has an extra range higher than any other range on the unfinished
* side. That way we advance the ranges on the unfinished side till
* all of its ranges are exhausted.
*/
if (outer_index == -1)
{
@ -1563,10 +1563,10 @@ merge_range_bounds(int partnatts, FmgrInfo *partsupfuncs,
goto cleanup;
/*
* A row from a non-overlapping portion (if any) of a partition
* on one side might find its join partner in the default
* partition (if any) on the other side, causing the same
* situation as above; give up in that case.
* A row from a non-overlapping portion (if any) of a partition on
* one side might find its join partner in the default partition
* (if any) on the other side, causing the same situation as
* above; give up in that case.
*/
if ((outer_has_default && (lb_cmpval > 0 || ub_cmpval < 0)) ||
(inner_has_default && (lb_cmpval < 0 || ub_cmpval > 0)))
@ -1582,10 +1582,10 @@ merge_range_bounds(int partnatts, FmgrInfo *partsupfuncs,
outer_map.merged[outer_index] == false);
/*
* If the inner side has the default partition, or this is an outer
* join, try to assign a merged partition to the outer partition
* (see process_outer_partition()). Otherwise, the outer partition
* will not contribute to the result.
* If the inner side has the default partition, or this is an
* outer join, try to assign a merged partition to the outer
* partition (see process_outer_partition()). Otherwise, the
* outer partition will not contribute to the result.
*/
if (inner_has_default || IS_OUTER_JOIN(jointype))
{
@ -1621,8 +1621,8 @@ merge_range_bounds(int partnatts, FmgrInfo *partsupfuncs,
/*
* If the outer side has the default partition, or this is a FULL
* join, try to assign a merged partition to the inner partition
* (see process_inner_partition()). Otherwise, the inner partition
* will not contribute to the result.
* (see process_inner_partition()). Otherwise, the inner
* partition will not contribute to the result.
*/
if (outer_has_default || jointype == JOIN_FULL)
{
@ -1647,8 +1647,8 @@ merge_range_bounds(int partnatts, FmgrInfo *partsupfuncs,
}
/*
* If we assigned a merged partition, add the range bounds and index of
* the merged partition if appropriate.
* If we assigned a merged partition, add the range bounds and index
* of the merged partition if appropriate.
*/
if (merged_index >= 0 && merged_index != default_index)
add_merged_range_bounds(partnatts, partsupfuncs, partcollations,
@ -1898,9 +1898,9 @@ process_outer_partition(PartitionMap *outer_map,
/*
* If the inner side has the default partition, a row from the outer
* partition might find its join partner in the default partition; try
* merging the outer partition with the default partition. Otherwise, this
* should be an outer join, in which case the outer partition has to be
* scanned all the way anyway; merge the outer partition with a dummy
* merging the outer partition with the default partition. Otherwise,
* this should be an outer join, in which case the outer partition has to
* be scanned all the way anyway; merge the outer partition with a dummy
* partition on the other side.
*/
if (inner_has_default)
@ -1909,9 +1909,10 @@ process_outer_partition(PartitionMap *outer_map,
/*
* If the outer side has the default partition as well, the default
* partition on the inner side will have two matching partitions on the
* other side: the outer partition and the default partition on the
* outer side. Partitionwise join doesn't handle this scenario yet.
* partition on the inner side will have two matching partitions on
* the other side: the outer partition and the default partition on
* the outer side. Partitionwise join doesn't handle this scenario
* yet.
*/
if (outer_has_default)
return -1;
@ -1923,10 +1924,10 @@ process_outer_partition(PartitionMap *outer_map,
return -1;
/*
* If this is a FULL join, the default partition on the inner side
* has to be scanned all the way anyway, so the resulting partition
* will contain all key values from the default partition, which any
* other partition of the join relation will not contain. Thus the
* If this is a FULL join, the default partition on the inner side has
* to be scanned all the way anyway, so the resulting partition will
* contain all key values from the default partition, which any other
* partition of the join relation will not contain. Thus the
* resulting partition will act as the default partition of the join
* relation; record the index in *default_index if not already done.
*/
@ -1979,8 +1980,8 @@ process_inner_partition(PartitionMap *outer_map,
/*
* If the outer side has the default partition, a row from the inner
* partition might find its join partner in the default partition; try
* merging the inner partition with the default partition. Otherwise, this
* should be a FULL join, in which case the inner partition has to be
* merging the inner partition with the default partition. Otherwise,
* this should be a FULL join, in which case the inner partition has to be
* scanned all the way anyway; merge the inner partition with a dummy
* partition on the other side.
*/
@ -1990,9 +1991,10 @@ process_inner_partition(PartitionMap *outer_map,
/*
* If the inner side has the default partition as well, the default
* partition on the outer side will have two matching partitions on the
* other side: the inner partition and the default partition on the
* inner side. Partitionwise join doesn't handle this scenario yet.
* partition on the outer side will have two matching partitions on
* the other side: the inner partition and the default partition on
* the inner side. Partitionwise join doesn't handle this scenario
* yet.
*/
if (inner_has_default)
return -1;
@ -2090,10 +2092,10 @@ merge_null_partitions(PartitionMap *outer_map,
/*
* If this is an outer join, the NULL partition on the outer side has
* to be scanned all the way anyway; merge the NULL partition with a
* dummy partition on the other side. In that case consider_outer_null
* means that the NULL partition only contains NULL values as the key
* values, so the merged partition will do so; treat it as the NULL
* partition of the join relation.
* dummy partition on the other side. In that case
* consider_outer_null means that the NULL partition only contains
* NULL values as the key values, so the merged partition will do so;
* treat it as the NULL partition of the join relation.
*/
if (IS_OUTER_JOIN(jointype))
{
@ -2107,12 +2109,12 @@ merge_null_partitions(PartitionMap *outer_map,
Assert(inner_has_null);
/*
* If this is a FULL join, the NULL partition on the inner side has
* to be scanned all the way anyway; merge the NULL partition with a
* dummy partition on the other side. In that case consider_inner_null
* means that the NULL partition only contains NULL values as the key
* values, so the merged partition will do so; treat it as the NULL
* partition of the join relation.
* If this is a FULL join, the NULL partition on the inner side has to
* be scanned all the way anyway; merge the NULL partition with a
* dummy partition on the other side. In that case
* consider_inner_null means that the NULL partition only contains
* NULL values as the key values, so the merged partition will do so;
* treat it as the NULL partition of the join relation.
*/
if (jointype == JOIN_FULL)
*null_index = merge_partition_with_dummy(inner_map, inner_null,
@ -2188,9 +2190,10 @@ merge_default_partitions(PartitionMap *outer_map,
/*
* If this is an outer join, the default partition on the outer side
* has to be scanned all the way anyway; if we have not yet assigned a
* partition, merge the default partition with a dummy partition on the
* other side. The merged partition will act as the default partition
* of the join relation (see comments in process_inner_partition()).
* partition, merge the default partition with a dummy partition on
* the other side. The merged partition will act as the default
* partition of the join relation (see comments in
* process_inner_partition()).
*/
if (IS_OUTER_JOIN(jointype))
{
@ -2211,11 +2214,12 @@ merge_default_partitions(PartitionMap *outer_map,
else if (!outer_has_default && inner_has_default)
{
/*
* If this is a FULL join, the default partition on the inner side
* has to be scanned all the way anyway; if we have not yet assigned a
* partition, merge the default partition with a dummy partition on the
* other side. The merged partition will act as the default partition
* of the join relation (see comments in process_outer_partition()).
* If this is a FULL join, the default partition on the inner side has
* to be scanned all the way anyway; if we have not yet assigned a
* partition, merge the default partition with a dummy partition on
* the other side. The merged partition will act as the default
* partition of the join relation (see comments in
* process_outer_partition()).
*/
if (jointype == JOIN_FULL)
{
@ -2392,10 +2396,10 @@ generate_matching_part_pairs(RelOptInfo *outer_rel, RelOptInfo *inner_rel,
int inner_index = inner_indexes[i];
/*
* If both partitions are dummy, it means the merged partition that had
* been assigned to the outer/inner partition was removed when
* re-merging the outer/inner partition in merge_matching_partitions();
* ignore the merged partition.
* If both partitions are dummy, it means the merged partition that
* had been assigned to the outer/inner partition was removed when
* re-merging the outer/inner partition in
* merge_matching_partitions(); ignore the merged partition.
*/
if (outer_index == -1 && inner_index == -1)
continue;
@ -2484,7 +2488,8 @@ get_range_partition(RelOptInfo *rel,
Assert(bi->strategy == PARTITION_STRATEGY_RANGE);
do {
do
{
part_index = get_range_partition_internal(bi, lb_pos, lb, ub);
if (part_index == -1)
return -1;
@ -2638,8 +2643,8 @@ get_merged_range_bounds(int partnatts, FmgrInfo *partsupfuncs,
/*
* A LEFT/ANTI join will have all the rows from the outer side, so
* the bounds of the merged partition will be the same as the outer
* bounds.
* the bounds of the merged partition will be the same as the
* outer bounds.
*/
*merged_lb = *outer_lb;
*merged_ub = *outer_ub;
@ -2648,10 +2653,10 @@ get_merged_range_bounds(int partnatts, FmgrInfo *partsupfuncs,
case JOIN_FULL:
/*
* A FULL join will have all the rows from both sides, so the lower
* bound of the merged partition will be the lower of the two lower
* bounds, and the upper bound of the merged partition will be the
* higher of the two upper bounds.
* A FULL join will have all the rows from both sides, so the
* lower bound of the merged partition will be the lower of the
* two lower bounds, and the upper bound of the merged partition
* will be the higher of the two upper bounds.
*/
*merged_lb = (lb_cmpval < 0) ? *outer_lb : *inner_lb;
*merged_ub = (ub_cmpval > 0) ? *outer_ub : *inner_ub;

View File

@ -854,8 +854,8 @@ gen_partprune_steps_internal(GeneratePruningStepsContext *context,
ListCell *lc;
/*
* If this partitioned relation has a default partition and is itself
* a partition (as evidenced by partition_qual being not NIL), we first
* If this partitioned relation has a default partition and is itself a
* partition (as evidenced by partition_qual being not NIL), we first
* check if the clauses contradict the partition constraint. If they do,
* there's no need to generate any steps as it'd already be proven that no
* partitions need to be scanned.

View File

@ -540,23 +540,23 @@ HandleCheckpointerInterrupts(void)
ProcessConfigFile(PGC_SIGHUP);
/*
* Checkpointer is the last process to shut down, so we ask it to
* hold the keys for a range of other tasks required most of which
* have nothing to do with checkpointing at all.
* Checkpointer is the last process to shut down, so we ask it to hold
* the keys for a range of other tasks required most of which have
* nothing to do with checkpointing at all.
*
* For various reasons, some config values can change dynamically
* so the primary copy of them is held in shared memory to make
* sure all backends see the same value. We make Checkpointer
* responsible for updating the shared memory copy if the
* parameter setting changes because of SIGHUP.
* For various reasons, some config values can change dynamically so
* the primary copy of them is held in shared memory to make sure all
* backends see the same value. We make Checkpointer responsible for
* updating the shared memory copy if the parameter setting changes
* because of SIGHUP.
*/
UpdateSharedMemoryConfig();
}
if (ShutdownRequestPending)
{
/*
* From here on, elog(ERROR) should end with exit(1), not send
* control back to the sigsetjmp block above
* From here on, elog(ERROR) should end with exit(1), not send control
* back to the sigsetjmp block above
*/
ExitOnAnyError = true;
/* Close down the database */

View File

@ -6292,10 +6292,10 @@ pgstat_recv_vacuum(PgStat_MsgVacuum *msg, int len)
/*
* It is quite possible that a non-aggressive VACUUM ended up skipping
* various pages, however, we'll zero the insert counter here regardless.
* It's currently used only to track when we need to perform an
* "insert" autovacuum, which are mainly intended to freeze newly inserted
* tuples. Zeroing this may just mean we'll not try to vacuum the table
* again until enough tuples have been inserted to trigger another insert
* It's currently used only to track when we need to perform an "insert"
* autovacuum, which are mainly intended to freeze newly inserted tuples.
* Zeroing this may just mean we'll not try to vacuum the table again
* until enough tuples have been inserted to trigger another insert
* autovacuum. An anti-wraparound autovacuum will catch any persistent
* stragglers.
*/

View File

@ -2036,6 +2036,7 @@ retry1:
if (SSLok == 'S' && secure_open_server(port) == -1)
return STATUS_ERROR;
#endif
/*
* regular startup packet, cancel, etc packet should follow, but not
* another SSL negotiation request, and a GSS request should only
@ -2066,6 +2067,7 @@ retry1:
if (GSSok == 'G' && secure_open_gssapi(port) == -1)
return STATUS_ERROR;
#endif
/*
* regular startup packet, cancel, etc packet should follow, but not
* another GSS negotiation request, and an SSL request should only

View File

@ -103,11 +103,11 @@ AddFileToBackupManifest(backup_manifest_info *manifest, const char *spcoid,
}
/*
* Each file's entry needs to be separated from any entry that follows by a
* comma, but there's no comma before the first one or after the last one.
* To make that work, adding a file to the manifest starts by terminating
* the most recently added line, with a comma if appropriate, but does not
* terminate the line inserted for this file.
* Each file's entry needs to be separated from any entry that follows by
* a comma, but there's no comma before the first one or after the last
* one. To make that work, adding a file to the manifest starts by
* terminating the most recently added line, with a comma if appropriate,
* but does not terminate the line inserted for this file.
*/
initStringInfo(&buf);
if (manifest->first_file)

View File

@ -705,7 +705,10 @@ get_rel_sync_entry(PGOutputData *data, Oid relid)
List *ancestors = get_partition_ancestors(relid);
ListCell *lc2;
/* Find the "topmost" ancestor that is in this publication. */
/*
* Find the "topmost" ancestor that is in this
* publication.
*/
foreach(lc2, ancestors)
{
Oid ancestor = lfirst_oid(lc2);

View File

@ -425,10 +425,9 @@ pg_physical_replication_slot_advance(XLogRecPtr moveto)
retlsn = moveto;
/*
* Dirty the slot so as it is written out at the next checkpoint.
* Note that the LSN position advanced may still be lost in the
* event of a crash, but this makes the data consistent after a
* clean shutdown.
* Dirty the slot so as it is written out at the next checkpoint. Note
* that the LSN position advanced may still be lost in the event of a
* crash, but this makes the data consistent after a clean shutdown.
*/
ReplicationSlotMarkDirty();
}
@ -532,9 +531,9 @@ pg_logical_replication_slot_advance(XLogRecPtr moveto)
* keep track of their progress, so we should make more of an
* effort to save it for them.
*
* Dirty the slot so it is written out at the next checkpoint.
* The LSN position advanced to may still be lost on a crash
* but this makes the data consistent after a clean shutdown.
* Dirty the slot so it is written out at the next checkpoint. The
* LSN position advanced to may still be lost on a crash but this
* makes the data consistent after a clean shutdown.
*/
ReplicationSlotMarkDirty();
}

View File

@ -255,10 +255,10 @@ RequestXLogStreaming(TimeLineID tli, XLogRecPtr recptr, const char *conninfo,
walrcv->conninfo[0] = '\0';
/*
* Use configured replication slot if present, and ignore the value
* of create_temp_slot as the slot name should be persistent. Otherwise,
* use create_temp_slot to determine whether this WAL receiver should
* create a temporary slot by itself and use it, or not.
* Use configured replication slot if present, and ignore the value of
* create_temp_slot as the slot name should be persistent. Otherwise, use
* create_temp_slot to determine whether this WAL receiver should create a
* temporary slot by itself and use it, or not.
*/
if (slotname != NULL && slotname[0] != '\0')
{

View File

@ -352,8 +352,8 @@ WalSndResourceCleanup(bool isCommit)
return;
/*
* Deleting CurrentResourceOwner is not allowed, so we must save a
* pointer in a local variable and clear it first.
* Deleting CurrentResourceOwner is not allowed, so we must save a pointer
* in a local variable and clear it first.
*/
resowner = CurrentResourceOwner;
CurrentResourceOwner = NULL;

View File

@ -827,8 +827,9 @@ dependency_is_compatible_clause(Node *clause, Index relid, AttrNumber *attnum)
/*
* If it's not an "=" operator, just ignore the clause, as it's not
* compatible with functional dependencies. The operator is identified
* simply by looking at which function it uses to estimate selectivity.
* That's a bit strange, but it's what other similar places do.
* simply by looking at which function it uses to estimate
* selectivity. That's a bit strange, but it's what other similar
* places do.
*/
if (get_oprrest(expr->opno) != F_EQSEL)
return false;
@ -929,7 +930,8 @@ static MVDependency *
find_strongest_dependency(MVDependencies **dependencies, int ndependencies,
Bitmapset *attnums)
{
int i, j;
int i,
j;
MVDependency *strongest = NULL;
/* number of attnums in clauses */
@ -967,8 +969,8 @@ find_strongest_dependency(MVDependencies **dependencies, int ndependencies,
/*
* this dependency is stronger, but we must still check that it's
* fully matched to these attnums. We perform this check last as it's
* slightly more expensive than the previous checks.
* fully matched to these attnums. We perform this check last as
* it's slightly more expensive than the previous checks.
*/
if (dependency_is_fully_matched(dependency, attnums))
strongest = dependency; /* save new best match */

View File

@ -160,9 +160,9 @@ BuildRelationExtStatistics(Relation onerel, double totalrows,
stats);
/*
* Don't rebuild statistics objects with statistics target set to 0 (we
* just leave the existing values around, just like we do for regular
* per-column statistics).
* Don't rebuild statistics objects with statistics target set to 0
* (we just leave the existing values around, just like we do for
* regular per-column statistics).
*/
if (stattarget == 0)
continue;
@ -294,16 +294,16 @@ statext_compute_stattarget(int stattarget, int nattrs, VacAttrStats **stats)
int i;
/*
* If there's statistics target set for the statistics object, use it.
* It may be set to 0 which disables building of that statistic.
* If there's statistics target set for the statistics object, use it. It
* may be set to 0 which disables building of that statistic.
*/
if (stattarget >= 0)
return stattarget;
/*
* The target for the statistics object is set to -1, in which case we
* look at the maximum target set for any of the attributes the object
* is defined on.
* look at the maximum target set for any of the attributes the object is
* defined on.
*/
for (i = 0; i < nattrs; i++)
{
@ -1338,7 +1338,10 @@ statext_mcv_clauselist_selectivity(PlannerInfo *root, List *clauses, int varReli
stat = choose_best_statistics(rel->statlist, STATS_EXT_MCV,
list_attnums, list_length(clauses));
/* if no (additional) matching stats could be found then we've nothing to do */
/*
* if no (additional) matching stats could be found then we've nothing
* to do
*/
if (!stat)
break;
@ -1352,8 +1355,8 @@ statext_mcv_clauselist_selectivity(PlannerInfo *root, List *clauses, int varReli
foreach(l, clauses)
{
/*
* If the clause is compatible with the selected statistics, mark it
* as estimated and add it to the list to estimate.
* If the clause is compatible with the selected statistics, mark
* it as estimated and add it to the list to estimate.
*/
if (list_attnums[listidx] != NULL &&
bms_is_subset(list_attnums[listidx], stat->keys))
@ -1371,15 +1374,15 @@ statext_mcv_clauselist_selectivity(PlannerInfo *root, List *clauses, int varReli
/*
* First compute "simple" selectivity, i.e. without the extended
* statistics, and essentially assuming independence of the
* columns/clauses. We'll then use the various selectivities computed from
* MCV list to improve it.
* columns/clauses. We'll then use the various selectivities computed
* from MCV list to improve it.
*/
simple_sel = clauselist_selectivity_simple(root, stat_clauses, varRelid,
jointype, sjinfo, NULL);
/*
* Now compute the multi-column estimate from the MCV list, along with the
* other selectivities (base & total selectivity).
* Now compute the multi-column estimate from the MCV list, along with
* the other selectivities (base & total selectivity).
*/
mcv_sel = mcv_clauselist_selectivity(root, stat, stat_clauses, varRelid,
jointype, sjinfo, rel,
@ -1393,7 +1396,10 @@ statext_mcv_clauselist_selectivity(PlannerInfo *root, List *clauses, int varReli
if (other_sel > 1.0 - mcv_totalsel)
other_sel = 1.0 - mcv_totalsel;
/* Overall selectivity is the combination of MCV and non-MCV estimates. */
/*
* Overall selectivity is the combination of MCV and non-MCV
* estimates.
*/
stat_sel = mcv_sel + other_sel;
CLAMP_PROBABILITY(stat_sel);

View File

@ -210,8 +210,8 @@ statext_mcv_build(int numrows, HeapTuple *rows, Bitmapset *attrs,
groups = build_distinct_groups(nitems, items, mss, &ngroups);
/*
* Maximum number of MCV items to store, based on the statistics target
* we computed for the statistics object (from target set for the object
* Maximum number of MCV items to store, based on the statistics target we
* computed for the statistics object (from target set for the object
* itself, attributes and the system default). In any case, we can't keep
* more groups than we have available.
*/
@ -528,8 +528,8 @@ build_column_frequencies(SortItem *groups, int ngroups,
/*
* Identify distinct values, compute frequency (there might be
* multiple MCV items containing this value, so we need to sum
* counts from all of them.
* multiple MCV items containing this value, so we need to sum counts
* from all of them.
*/
ncounts[dim] = 1;
for (i = 1; i < ngroups; i++)
@ -738,8 +738,8 @@ statext_mcv_serialize(MCVList *mcvlist, VacAttrStats **stats)
/*
* We don't care about alignment in the serialized data, so we
* pack the data as much as possible. But we also track how much
* data will be needed after deserialization, and in that case
* we need to account for alignment of each item.
* data will be needed after deserialization, and in that case we
* need to account for alignment of each item.
*
* Note: As the items are fixed-length, we could easily compute
* this during deserialization, but we do it here anyway.
@ -954,7 +954,8 @@ statext_mcv_serialize(MCVList *mcvlist, VacAttrStats **stats)
info[dim].nvalues, sizeof(Datum),
compare_scalars_simple, &ssup[dim]);
Assert(value != NULL); /* serialization or deduplication error */
Assert(value != NULL); /* serialization or deduplication
* error */
/* compute index within the deduplicated array */
index = (uint16) (value - values[dim]);
@ -1147,8 +1148,8 @@ statext_mcv_deserialize(bytea *data)
* serialized data - it's not aligned properly, and it may disappear while
* we're still using the MCV list, e.g. due to catcache release.
*
* We do care about alignment here, because we will allocate all the pieces
* at once, but then use pointers to different parts.
* We do care about alignment here, because we will allocate all the
* pieces at once, but then use pointers to different parts.
*/
mcvlen = MAXALIGN(offsetof(MCVList, items) + (sizeof(MCVItem) * nitems));
@ -1377,7 +1378,8 @@ pg_stats_ext_mcvlist_items(PG_FUNCTION_ARGS)
/* stuff done on every call of the function */
funcctx = SRF_PERCALL_SETUP();
if (funcctx->call_cntr < funcctx->max_calls) /* do when there is more left to send */
if (funcctx->call_cntr < funcctx->max_calls) /* do when there is more
* left to send */
{
Datum values[5];
bool nulls[5];
@ -1606,9 +1608,9 @@ mcv_get_match_bitmap(PlannerInfo *root, List *clauses,
MCVItem *item = &mcvlist->items[i];
/*
* When the MCV item or the Const value is NULL we can treat
* this as a mismatch. We must not call the operator because
* of strictness.
* When the MCV item or the Const value is NULL we can
* treat this as a mismatch. We must not call the operator
* because of strictness.
*/
if (item->isnull[idx] || cst->constisnull)
{
@ -1631,10 +1633,10 @@ mcv_get_match_bitmap(PlannerInfo *root, List *clauses,
*
* We don't store collations used to build the statistics,
* but we can use the collation for the attribute itself,
* as stored in varcollid. We do reset the statistics after
* a type change (including collation change), so this is
* OK. We may need to relax this after allowing extended
* statistics on expressions.
* as stored in varcollid. We do reset the statistics
* after a type change (including collation change), so
* this is OK. We may need to relax this after allowing
* extended statistics on expressions.
*/
if (varonleft)
match = DatumGetBool(FunctionCall2Coll(&opproc,
@ -1707,9 +1709,9 @@ mcv_get_match_bitmap(PlannerInfo *root, List *clauses,
MCVItem *item = &mcvlist->items[i];
/*
* When the MCV item or the Const value is NULL we can treat
* this as a mismatch. We must not call the operator because
* of strictness.
* When the MCV item or the Const value is NULL we can
* treat this as a mismatch. We must not call the operator
* because of strictness.
*/
if (item->isnull[idx] || cst->constisnull)
{

View File

@ -287,7 +287,8 @@ FreeSpaceMapPrepareTruncateRel(Relation rel, BlockNumber nblocks)
{
buf = fsm_readbuf(rel, first_removed_address, false);
if (!BufferIsValid(buf))
return InvalidBlockNumber; /* nothing to do; the FSM was already smaller */
return InvalidBlockNumber; /* nothing to do; the FSM was already
* smaller */
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
/* NO EREPORT(ERROR) from here till changes are logged */
@ -317,7 +318,8 @@ FreeSpaceMapPrepareTruncateRel(Relation rel, BlockNumber nblocks)
{
new_nfsmblocks = fsm_logical_to_physical(first_removed_address);
if (smgrnblocks(rel->rd_smgr, FSM_FORKNUM) <= new_nfsmblocks)
return InvalidBlockNumber; /* nothing to do; the FSM was already smaller */
return InvalidBlockNumber; /* nothing to do; the FSM was already
* smaller */
}
return new_nfsmblocks;

View File

@ -1099,9 +1099,9 @@ WaitEventAdjustKqueue(WaitEventSet *set, WaitEvent *event, int old_events)
!PostmasterIsAlive())
{
/*
* The extra PostmasterIsAliveInternal() check prevents false alarms on
* systems that give a different value for getppid() while being traced
* by a debugger.
* The extra PostmasterIsAliveInternal() check prevents false alarms
* on systems that give a different value for getppid() while being
* traced by a debugger.
*/
set->report_postmaster_not_running = true;
}

View File

@ -168,13 +168,13 @@ ProcSignalInit(int pss_idx)
/*
* Initialize barrier state. Since we're a brand-new process, there
* shouldn't be any leftover backend-private state that needs to be
* updated. Therefore, we can broadcast the latest barrier generation
* and disregard any previously-set check bits.
* updated. Therefore, we can broadcast the latest barrier generation and
* disregard any previously-set check bits.
*
* NB: This only works if this initialization happens early enough in the
* startup sequence that we haven't yet cached any state that might need
* to be invalidated. That's also why we have a memory barrier here, to
* be sure that any later reads of memory happen strictly after this.
* to be invalidated. That's also why we have a memory barrier here, to be
* sure that any later reads of memory happen strictly after this.
*/
pg_atomic_write_u32(&slot->pss_barrierCheckMask, 0);
barrier_generation =
@ -326,10 +326,10 @@ EmitProcSignalBarrier(ProcSignalBarrierType type)
/*
* Set all the flags.
*
* Note that pg_atomic_fetch_or_u32 has full barrier semantics, so this
* is totally ordered with respect to anything the caller did before, and
* anything that we do afterwards. (This is also true of the later call
* to pg_atomic_add_fetch_u64.)
* Note that pg_atomic_fetch_or_u32 has full barrier semantics, so this is
* totally ordered with respect to anything the caller did before, and
* anything that we do afterwards. (This is also true of the later call to
* pg_atomic_add_fetch_u64.)
*/
for (int i = 0; i < NumProcSignalSlots; i++)
{
@ -349,13 +349,13 @@ EmitProcSignalBarrier(ProcSignalBarrierType type)
* generation.
*
* Concurrency is not a problem here. Backends that have exited don't
* matter, and new backends that have joined since we entered this function
* must already have current state, since the caller is responsible for
* making sure that the relevant state is entirely visible before calling
* this function in the first place. We still have to wake them up -
* because we can't distinguish between such backends and older backends
* that need to update state - but they won't actually need to change
* any state.
* matter, and new backends that have joined since we entered this
* function must already have current state, since the caller is
* responsible for making sure that the relevant state is entirely visible
* before calling this function in the first place. We still have to wake
* them up - because we can't distinguish between such backends and older
* backends that need to update state - but they won't actually need to
* change any state.
*/
for (int i = NumProcSignalSlots - 1; i >= 0; i--)
{
@ -408,11 +408,11 @@ WaitForProcSignalBarrier(uint64 generation)
}
/*
* The caller is probably calling this function because it wants to
* read the shared state or perform further writes to shared state once
* all backends are known to have absorbed the barrier. However, the
* read of pss_barrierGeneration was performed unlocked; insert a memory
* barrier to separate it from whatever follows.
* The caller is probably calling this function because it wants to read
* the shared state or perform further writes to shared state once all
* backends are known to have absorbed the barrier. However, the read of
* pss_barrierGeneration was performed unlocked; insert a memory barrier
* to separate it from whatever follows.
*/
pg_memory_barrier();
}
@ -437,8 +437,8 @@ ProcessProcSignalBarrier(void)
ProcSignalBarrierPending = false;
/*
* Read the current barrier generation, and then get the flags that
* are set for this backend. Note that pg_atomic_exchange_u32 is a full
* Read the current barrier generation, and then get the flags that are
* set for this backend. Note that pg_atomic_exchange_u32 is a full
* barrier, so we're guaranteed that the read of the barrier generation
* happens before we atomically extract the flags, and that any subsequent
* state changes happen afterward.
@ -477,8 +477,8 @@ ProcessBarrierPlaceholder(void)
* machinery gets committed. Rename PROCSIGNAL_BARRIER_PLACEHOLDER to
* PROCSIGNAL_BARRIER_SOMETHING_ELSE where SOMETHING_ELSE is something
* appropriately descriptive. Get rid of this function and instead have
* ProcessBarrierSomethingElse. Most likely, that function should live
* in the file pertaining to that subsystem, rather than here.
* ProcessBarrierSomethingElse. Most likely, that function should live in
* the file pertaining to that subsystem, rather than here.
*/
}

View File

@ -580,11 +580,11 @@ smgrtruncate(SMgrRelation reln, ForkNumber *forknum, int nforks, BlockNumber *nb
/*
* We might as well update the local smgr_fsm_nblocks and
* smgr_vm_nblocks settings. The smgr cache inval message that
* this function sent will cause other backends to invalidate
* their copies of smgr_fsm_nblocks and smgr_vm_nblocks,
* and these ones too at the next command boundary.
* But these ensure they aren't outright wrong until then.
* smgr_vm_nblocks settings. The smgr cache inval message that this
* function sent will cause other backends to invalidate their copies
* of smgr_fsm_nblocks and smgr_vm_nblocks, and these ones too at the
* next command boundary. But these ensure they aren't outright wrong
* until then.
*/
if (forknum[i] == FSM_FORKNUM)
reln->smgr_fsm_nblocks = nblocks[i];

View File

@ -224,8 +224,8 @@ ClassifyUtilityCommandAsReadOnly(Node *parsetree)
/*
* Surprisingly, ALTER SYSTEM meets all our definitions of
* read-only: it changes nothing that affects the output of
* pg_dump, it doesn't write WAL or imperil the application
* of future WAL, and it doesn't depend on any state that needs
* pg_dump, it doesn't write WAL or imperil the application of
* future WAL, and it doesn't depend on any state that needs
* to be synchronized with parallel workers.
*
* So, despite the fact that it writes to a file, it's read
@ -271,10 +271,10 @@ ClassifyUtilityCommandAsReadOnly(Node *parsetree)
case T_VariableSetStmt:
{
/*
* These modify only backend-local state, so they're OK to
* run in a read-only transaction or on a standby. However,
* they are disallowed in parallel mode, because they either
* rely upon or modify backend-local state that might not be
* These modify only backend-local state, so they're OK to run
* in a read-only transaction or on a standby. However, they
* are disallowed in parallel mode, because they either rely
* upon or modify backend-local state that might not be
* synchronized among cooperating backends.
*/
return COMMAND_OK_IN_RECOVERY | COMMAND_OK_IN_READ_ONLY_TXN;
@ -285,8 +285,9 @@ ClassifyUtilityCommandAsReadOnly(Node *parsetree)
case T_VacuumStmt:
{
/*
* These commands write WAL, so they're not strictly read-only,
* and running them in parallel workers isn't supported.
* These commands write WAL, so they're not strictly
* read-only, and running them in parallel workers isn't
* supported.
*
* However, they don't change the database state in a way that
* would affect pg_dump output, so it's fine to run them in a
@ -302,8 +303,8 @@ ClassifyUtilityCommandAsReadOnly(Node *parsetree)
CopyStmt *stmt = (CopyStmt *) parsetree;
/*
* You might think that COPY FROM is not at all read only,
* but it's OK to copy into a temporary table, because that
* You might think that COPY FROM is not at all read only, but
* it's OK to copy into a temporary table, because that
* wouldn't change the output of pg_dump. If the target table
* turns out to be non-temporary, DoCopy itself will call
* PreventCommandIfReadOnly.
@ -318,8 +319,8 @@ ClassifyUtilityCommandAsReadOnly(Node *parsetree)
case T_VariableShowStmt:
{
/*
* These commands don't modify any data and are safe to run
* in a parallel worker.
* These commands don't modify any data and are safe to run in
* a parallel worker.
*/
return COMMAND_IS_STRICTLY_READ_ONLY;
}
@ -329,8 +330,8 @@ ClassifyUtilityCommandAsReadOnly(Node *parsetree)
{
/*
* NOTIFY requires an XID assignment, so it can't be permitted
* on a standby. Perhaps LISTEN could, since without NOTIFY
* it would be OK to just do nothing, at least until promotion,
* on a standby. Perhaps LISTEN could, since without NOTIFY it
* would be OK to just do nothing, at least until promotion,
* but we currently prohibit it lest the user get the wrong
* idea.
*
@ -346,7 +347,8 @@ ClassifyUtilityCommandAsReadOnly(Node *parsetree)
/*
* Only weaker locker modes are allowed during recovery. The
* restrictions here must match those in LockAcquireExtended().
* restrictions here must match those in
* LockAcquireExtended().
*/
if (stmt->mode > RowExclusiveLock)
return COMMAND_OK_IN_READ_ONLY_TXN;
@ -359,10 +361,10 @@ ClassifyUtilityCommandAsReadOnly(Node *parsetree)
TransactionStmt *stmt = (TransactionStmt *) parsetree;
/*
* PREPARE, COMMIT PREPARED, and ROLLBACK PREPARED all
* write WAL, so they're not read-only in the strict sense;
* but the first and third do not change pg_dump output, so
* they're OK in a read-only transactions.
* PREPARE, COMMIT PREPARED, and ROLLBACK PREPARED all write
* WAL, so they're not read-only in the strict sense; but the
* first and third do not change pg_dump output, so they're OK
* in a read-only transactions.
*
* We also consider COMMIT PREPARED to be OK in a read-only
* transaction environment, by way of exception.

View File

@ -1214,7 +1214,8 @@ static int32
int4gcd_internal(int32 arg1, int32 arg2)
{
int32 swap;
int32 a1, a2;
int32 a1,
a2;
/*
* Put the greater absolute value in arg1.

View File

@ -685,7 +685,8 @@ static int64
int8gcd_internal(int64 arg1, int64 arg2)
{
int64 swap;
int64 a1, a2;
int64 a1,
a2;
/*
* Put the greater absolute value in arg1.

View File

@ -4569,6 +4569,7 @@ jsonb_set_lax(PG_FUNCTION_ARGS)
else if (strcmp(handle_val, "return_target") == 0)
{
Jsonb *in = PG_GETARG_JSONB_P(0);
PG_RETURN_JSONB_P(in);
}
else

View File

@ -45,11 +45,13 @@ static inline int
decimalLength32(const uint32 v)
{
int t;
static uint32 PowersOfTen[] =
{1, 10, 100,
static const uint32 PowersOfTen[] = {
1, 10, 100,
1000, 10000, 100000,
1000000, 10000000, 100000000,
1000000000};
1000000000
};
/*
* Compute base-10 logarithm by dividing the base-2 logarithm by a
* good-enough approximation of the base-2 logarithm of 10
@ -62,7 +64,7 @@ static inline int
decimalLength64(const uint64 v)
{
int t;
static uint64 PowersOfTen[] = {
static const uint64 PowersOfTen[] = {
UINT64CONST(1), UINT64CONST(10),
UINT64CONST(100), UINT64CONST(1000),
UINT64CONST(10000), UINT64CONST(100000),

View File

@ -1152,7 +1152,8 @@ regcollationout(PG_FUNCTION_ARGS)
char *nspname;
/*
* Would this collation be found by regcollationin? If not, qualify it.
* Would this collation be found by regcollationin? If not,
* qualify it.
*/
if (CollationIsVisible(collationid))
nspname = NULL;

View File

@ -11313,8 +11313,8 @@ get_reloptions(StringInfo buf, Datum reloptions)
char *value;
/*
* Each array element should have the form name=value. If the "="
* is missing for some reason, treat it like an empty value.
* Each array element should have the form name=value. If the "=" is
* missing for some reason, treat it like an empty value.
*/
name = option;
separator = strchr(option, '=');
@ -11332,11 +11332,11 @@ get_reloptions(StringInfo buf, Datum reloptions)
/*
* In general we need to quote the value; but to avoid unnecessary
* clutter, do not quote if it is an identifier that would not
* need quoting. (We could also allow numbers, but that is a bit
* trickier than it looks --- for example, are leading zeroes
* significant? We don't want to assume very much here about what
* custom reloptions might mean.)
* clutter, do not quote if it is an identifier that would not need
* quoting. (We could also allow numbers, but that is a bit trickier
* than it looks --- for example, are leading zeroes significant? We
* don't want to assume very much here about what custom reloptions
* might mean.)
*/
if (quote_identifier(value) == value)
appendStringInfoString(buf, value);

View File

@ -5430,7 +5430,8 @@ RelationGetIndexAttOptions(Relation relation, bool copy)
MemoryContext oldcxt;
bytea **opts = relation->rd_opcoptions;
Oid relid = RelationGetRelid(relation);
int natts = RelationGetNumberOfAttributes(relation); /* XXX IndexRelationGetNumberOfKeyAttributes */
int natts = RelationGetNumberOfAttributes(relation); /* XXX
* IndexRelationGetNumberOfKeyAttributes */
int i;
/* Try to copy cached options. */

View File

@ -1719,7 +1719,10 @@ hash_corrupted(HTAB *hashp)
int
my_log2(long num)
{
/* guard against too-large input, which would be invalid for pg_ceil_log2_*() */
/*
* guard against too-large input, which would be invalid for
* pg_ceil_log2_*()
*/
if (num > LONG_MAX / 2)
num = LONG_MAX / 2;

View File

@ -217,10 +217,11 @@ SlabContextCreate(MemoryContext parent,
headerSize = offsetof(SlabContext, freelist) + freelistSize;
#ifdef MEMORY_CONTEXT_CHECKING
/*
* With memory checking, we need to allocate extra space for the bitmap
* of free chunks. The bitmap is an array of bools, so we don't need to
* worry about alignment.
* With memory checking, we need to allocate extra space for the bitmap of
* free chunks. The bitmap is an array of bools, so we don't need to worry
* about alignment.
*/
headerSize += chunksPerBlock * sizeof(bool);
#endif

View File

@ -440,6 +440,7 @@ ltsReleaseBlock(LogicalTapeSet *lts, long blocknum)
while (pos != 0)
{
unsigned long parent = parent_offset(pos);
if (heap[parent] < heap[pos])
break;
@ -1017,8 +1018,8 @@ LogicalTapeSetExtend(LogicalTapeSet *lts, int nAdditional)
lts->nTapes += nAdditional;
lts->tapes = (LogicalTape *) repalloc(
lts->tapes, lts->nTapes * sizeof(LogicalTape));
lts->tapes = (LogicalTape *) repalloc(lts->tapes,
lts->nTapes * sizeof(LogicalTape));
for (i = nTapesOrig; i < lts->nTapes; i++)
ltsInitTape(&lts->tapes[i]);

View File

@ -1429,11 +1429,11 @@ tuplesort_updatemax(Tuplesortstate *state)
/*
* Sort evicts data to the disk when it wasn't able to fit that data into
* main memory. This is why we assume space used on the disk to be
* more important for tracking resource usage than space used in memory.
* Note that the amount of space occupied by some tupleset on the disk might
* be less than amount of space occupied by the same tupleset in
* memory due to more compact representation.
* main memory. This is why we assume space used on the disk to be more
* important for tracking resource usage than space used in memory. Note
* that the amount of space occupied by some tupleset on the disk might be
* less than amount of space occupied by the same tupleset in memory due
* to more compact representation.
*/
if ((isSpaceDisk && !state->isMaxSpaceDisk) ||
(isSpaceDisk == state->isMaxSpaceDisk && spaceUsed > state->maxSpace))

View File

@ -1051,6 +1051,7 @@ ReceiveTarFile(PGconn *conn, PGresult *res, int rownum)
if (compresslevel != 0)
{
int fd = dup(fileno(stdout));
if (fd < 0)
{
pg_log_error("could not duplicate stdout: %m");

View File

@ -161,7 +161,8 @@ rmtree("$tempdir/backup");
$node->command_ok(
[
'pg_basebackup', '-D', "$tempdir/backup2", '--no-manifest',
'pg_basebackup', '-D',
"$tempdir/backup2", '--no-manifest',
'--waldir', "$tempdir/xlog2"
],
'separate xlog directory');

View File

@ -4074,8 +4074,7 @@ getPublicationTables(Archive *fout, TableInfo tblinfo[], int numTables)
TableInfo *tbinfo = &tblinfo[i];
/*
* Only regular and partitioned tables can be added to
* publications.
* Only regular and partitioned tables can be added to publications.
*/
if (tbinfo->relkind != RELKIND_RELATION &&
tbinfo->relkind != RELKIND_PARTITIONED_TABLE)
@ -7294,7 +7293,10 @@ getIndexes(Archive *fout, TableInfo tblinfo[], int numTables)
indxinfo[j].indisclustered = (PQgetvalue(res, j, i_indisclustered)[0] == 't');
indxinfo[j].indisreplident = (PQgetvalue(res, j, i_indisreplident)[0] == 't');
indxinfo[j].parentidx = atooid(PQgetvalue(res, j, i_parentidx));
indxinfo[j].partattaches = (SimplePtrList) { NULL, NULL };
indxinfo[j].partattaches = (SimplePtrList)
{
NULL, NULL
};
contype = *(PQgetvalue(res, j, i_contype));
if (contype == 'p' || contype == 'u' || contype == 'x')

View File

@ -1411,7 +1411,8 @@ my %tests = (
"CREATE DATABASE dump_test2 LOCALE = 'C'" => {
create_order => 47,
create_sql => "CREATE DATABASE dump_test2 LOCALE = 'C' TEMPLATE = template0;",
create_sql =>
"CREATE DATABASE dump_test2 LOCALE = 'C' TEMPLATE = template0;",
regexp => qr/^
\QCREATE DATABASE dump_test2 \E.*\QLOCALE = 'C';\E
/xm,
@ -2574,7 +2575,8 @@ my %tests = (
'ALTER STATISTICS extended_stats_options' => {
create_order => 98,
create_sql => 'ALTER STATISTICS dump_test.test_ext_stats_opts SET STATISTICS 1000',
create_sql =>
'ALTER STATISTICS dump_test.test_ext_stats_opts SET STATISTICS 1000',
regexp => qr/^
\QALTER STATISTICS dump_test.test_ext_stats_opts SET STATISTICS 1000;\E
/xms,

View File

@ -644,11 +644,10 @@ verify_backup_file(verifier_context *context, char *relpath, char *fullpath)
}
/*
* We don't verify checksums at this stage. We first finish verifying
* that we have the expected set of files with the expected sizes, and
* only afterwards verify the checksums. That's because computing
* checksums may take a while, and we'd like to report more obvious
* problems quickly.
* We don't verify checksums at this stage. We first finish verifying that
* we have the expected set of files with the expected sizes, and only
* afterwards verify the checksums. That's because computing checksums may
* take a while, and we'd like to report more obvious problems quickly.
*/
}

View File

@ -9,13 +9,16 @@ program_help_ok('pg_verifybackup');
program_version_ok('pg_verifybackup');
program_options_handling_ok('pg_verifybackup');
command_fails_like(['pg_verifybackup'],
command_fails_like(
['pg_verifybackup'],
qr/no backup directory specified/,
'target directory must be specified');
command_fails_like(['pg_verifybackup', $tempdir],
command_fails_like(
[ 'pg_verifybackup', $tempdir ],
qr/could not open file.*\/backup_manifest\"/,
'pg_verifybackup requires a manifest');
command_fails_like(['pg_verifybackup', $tempdir, $tempdir],
command_fails_like(
[ 'pg_verifybackup', $tempdir, $tempdir ],
qr/too many command-line arguments/,
'multiple target directories not allowed');
@ -24,7 +27,7 @@ open(my $fh, '>', "$tempdir/backup_manifest") || die "open: $!";
close($fh);
# but then try to use an alternate, nonexisting manifest
command_fails_like(['pg_verifybackup', '-m', "$tempdir/not_the_manifest",
$tempdir],
command_fails_like(
[ 'pg_verifybackup', '-m', "$tempdir/not_the_manifest", $tempdir ],
qr/could not open file.*\/not_the_manifest\"/,
'pg_verifybackup respects -m flag');

View File

@ -16,9 +16,9 @@ $master->start;
for my $algorithm (qw(bogus none crc32c sha224 sha256 sha384 sha512))
{
my $backup_path = $master->backup_dir . '/' . $algorithm;
my @backup = ('pg_basebackup', '-D', $backup_path,
'--manifest-checksums', $algorithm,
'--no-sync');
my @backup = (
'pg_basebackup', '-D', $backup_path,
'--manifest-checksums', $algorithm, '--no-sync');
my @verify = ('pg_verifybackup', '-e', $backup_path);
# A backup with a bogus algorithm should fail.
@ -45,8 +45,8 @@ for my $algorithm (qw(bogus none crc32c sha224 sha256 sha384 sha512))
my $manifest = slurp_file("$backup_path/backup_manifest");
my $count_of_algorithm_in_manifest =
(() = $manifest =~ /$algorithm/mig);
cmp_ok($count_of_algorithm_in_manifest, '>', 100,
"$algorithm is mentioned many times in the manifest");
cmp_ok($count_of_algorithm_in_manifest,
'>', 100, "$algorithm is mentioned many times in the manifest");
}
# Make sure that it verifies OK.

View File

@ -55,14 +55,12 @@ my @scenario = (
{
'name' => 'append_to_file',
'mutilate' => \&mutilate_append_to_file,
'fails_like' =>
qr/has size \d+ on disk but size \d+ in the manifest/
'fails_like' => qr/has size \d+ on disk but size \d+ in the manifest/
},
{
'name' => 'truncate_file',
'mutilate' => \&mutilate_truncate_file,
'fails_like' =>
qr/has size 0 on disk but size \d+ in the manifest/
'fails_like' => qr/has size 0 on disk but size \d+ in the manifest/
},
{
'name' => 'replace_file',
@ -93,8 +91,7 @@ my @scenario = (
'cleanup' => \&cleanup_search_directory_fails,
'fails_like' => qr/could not stat file or directory/,
'skip_on_windows' => 1
}
);
});
for my $scenario (@scenario)
{
@ -112,8 +109,11 @@ for my $scenario (@scenario)
# it. Tell it not to.
# See https://www.msys2.org/wiki/Porting/#filesystem-namespaces
local $ENV{MSYS2_ARG_CONV_EXCL} = $source_ts_prefix;
$master->command_ok(['pg_basebackup', '-D', $backup_path, '--no-sync',
'-T', "${source_ts_path}=${backup_ts_path}"],
$master->command_ok(
[
'pg_basebackup', '-D', $backup_path, '--no-sync',
'-T', "${source_ts_path}=${backup_ts_path}"
],
"base backup ok");
command_ok([ 'pg_verifybackup', $backup_path ],
"intact backup verified");
@ -122,7 +122,8 @@ for my $scenario (@scenario)
$scenario->{'mutilate'}->($backup_path);
# Now check that the backup no longer verifies.
command_fails_like(['pg_verifybackup', $backup_path ],
command_fails_like(
[ 'pg_verifybackup', $backup_path ],
$scenario->{'fails_like'},
"corrupt backup fails verification: $name");
@ -157,8 +158,8 @@ sub mutilate_extra_file
sub mutilate_extra_tablespace_file
{
my ($backup_path) = @_;
my ($tsoid) = grep { $_ ne '.' && $_ ne '..' }
slurp_dir("$backup_path/pg_tblspc");
my ($tsoid) =
grep { $_ ne '.' && $_ ne '..' } slurp_dir("$backup_path/pg_tblspc");
my ($catvdir) = grep { $_ ne '.' && $_ ne '..' }
slurp_dir("$backup_path/pg_tblspc/$tsoid");
my ($tsdboid) = grep { $_ ne '.' && $_ ne '..' }
@ -181,8 +182,8 @@ sub mutilate_missing_file
sub mutilate_missing_tablespace
{
my ($backup_path) = @_;
my ($tsoid) = grep { $_ ne '.' && $_ ne '..' }
slurp_dir("$backup_path/pg_tblspc");
my ($tsoid) =
grep { $_ ne '.' && $_ ne '..' } slurp_dir("$backup_path/pg_tblspc");
my $pathname = "$backup_path/pg_tblspc/$tsoid";
if ($windows_os)
{

View File

@ -34,18 +34,21 @@ print $fh 'q' x length($version_contents);
close($fh);
# Verify that pg_verifybackup -q now fails.
command_fails_like(['pg_verifybackup', '-q', $backup_path ],
command_fails_like(
[ 'pg_verifybackup', '-q', $backup_path ],
qr/checksum mismatch for file \"PG_VERSION\"/,
'-q checksum mismatch');
# Since we didn't change the length of the file, verification should succeed
# if we ignore checksums. Check that we get the right message, too.
command_like(['pg_verifybackup', '-s', $backup_path ],
command_like(
[ 'pg_verifybackup', '-s', $backup_path ],
qr/backup successfully verified/,
'-s skips checksumming');
# Validation should succeed if we ignore the problem file.
command_like(['pg_verifybackup', '-i', 'PG_VERSION', $backup_path ],
command_like(
[ 'pg_verifybackup', '-i', 'PG_VERSION', $backup_path ],
qr/backup successfully verified/,
'-i ignores problem file');
@ -54,13 +57,14 @@ rmtree($backup_path . "/pg_xact");
# We're ignoring the problem with PG_VERSION, but not the problem with
# pg_xact, so verification should fail here.
command_fails_like(['pg_verifybackup', '-i', 'PG_VERSION', $backup_path ],
command_fails_like(
[ 'pg_verifybackup', '-i', 'PG_VERSION', $backup_path ],
qr/pg_xact.*is present in the manifest but not on disk/,
'-i does not ignore all problems');
# If we use -i twice, we should be able to ignore all of the problems.
command_like(['pg_verifybackup', '-i', 'PG_VERSION', '-i', 'pg_xact',
$backup_path ],
command_like(
[ 'pg_verifybackup', '-i', 'PG_VERSION', '-i', 'pg_xact', $backup_path ],
qr/backup successfully verified/,
'multiple -i options work');
@ -68,22 +72,33 @@ command_like(['pg_verifybackup', '-i', 'PG_VERSION', '-i', 'pg_xact',
$result = IPC::Run::run [ 'pg_verifybackup', $backup_path ],
'>', \$stdout, '2>', \$stderr;
ok(!$result, "multiple problems: fails");
like($stderr, qr/pg_xact.*is present in the manifest but not on disk/,
like(
$stderr,
qr/pg_xact.*is present in the manifest but not on disk/,
"multiple problems: missing files reported");
like($stderr, qr/checksum mismatch for file \"PG_VERSION\"/,
like(
$stderr,
qr/checksum mismatch for file \"PG_VERSION\"/,
"multiple problems: checksum mismatch reported");
# Verify that when -e is used, only the problem detected first is reported.
$result = IPC::Run::run [ 'pg_verifybackup', '-e', $backup_path ],
'>', \$stdout, '2>', \$stderr;
ok(!$result, "-e reports 1 error: fails");
like($stderr, qr/pg_xact.*is present in the manifest but not on disk/,
like(
$stderr,
qr/pg_xact.*is present in the manifest but not on disk/,
"-e reports 1 error: missing files reported");
unlike($stderr, qr/checksum mismatch for file \"PG_VERSION\"/,
unlike(
$stderr,
qr/checksum mismatch for file \"PG_VERSION\"/,
"-e reports 1 error: checksum mismatch not reported");
# Test valid manifest with nonexistent backup directory.
command_fails_like(['pg_verifybackup', '-m', "$backup_path/backup_manifest",
"$backup_path/fake" ],
command_fails_like(
[
'pg_verifybackup', '-m',
"$backup_path/backup_manifest", "$backup_path/fake"
],
qr/could not open directory/,
'nonexistent backup directory');

View File

@ -11,7 +11,8 @@ use Test::More tests => 58;
my $tempdir = TestLib::tempdir;
test_bad_manifest('input string ended unexpectedly',
test_bad_manifest(
'input string ended unexpectedly',
qr/could not parse backup manifest: The input string ended unexpectedly/,
<<EOM);
{
@ -184,9 +185,7 @@ sub test_fatal_error
{
my ($test_name, $manifest_contents) = @_;
test_bad_manifest($test_name,
qr/fatal: $test_name/,
$manifest_contents);
test_bad_manifest($test_name, qr/fatal: $test_name/, $manifest_contents);
return;
}
@ -198,7 +197,6 @@ sub test_bad_manifest
print $fh $manifest_contents;
close($fh);
command_fails_like(['pg_verifybackup', $tempdir], $regexp,
$test_name);
command_fails_like([ 'pg_verifybackup', $tempdir ], $regexp, $test_name);
return;
}

View File

@ -12,16 +12,20 @@ my $master = get_new_node('master');
$master->init(allows_streaming => 1);
$master->start;
my $backup_path = $master->backup_dir . '/test_encoding';
$master->command_ok(['pg_basebackup', '-D', $backup_path, '--no-sync',
'--manifest-force-encode' ],
$master->command_ok(
[
'pg_basebackup', '-D',
$backup_path, '--no-sync',
'--manifest-force-encode'
],
"backup ok with forced hex encoding");
my $manifest = slurp_file("$backup_path/backup_manifest");
my $count_of_encoded_path_in_manifest =
(() = $manifest =~ /Encoded-Path/mig);
cmp_ok($count_of_encoded_path_in_manifest, '>', 100,
"many paths are encoded in the manifest");
my $count_of_encoded_path_in_manifest = (() = $manifest =~ /Encoded-Path/mig);
cmp_ok($count_of_encoded_path_in_manifest,
'>', 100, "many paths are encoded in the manifest");
command_like(['pg_verifybackup', '-s', $backup_path ],
command_like(
[ 'pg_verifybackup', '-s', $backup_path ],
qr/backup successfully verified/,
'backup with forced encoding verified');

View File

@ -23,12 +23,14 @@ my $relocated_pg_wal = $master->backup_dir . '/relocated_pg_wal';
rename($original_pg_wal, $relocated_pg_wal) || die "rename pg_wal: $!";
# WAL verification should fail.
command_fails_like(['pg_verifybackup', $backup_path ],
command_fails_like(
[ 'pg_verifybackup', $backup_path ],
qr/WAL parsing failed for timeline 1/,
'missing pg_wal causes failure');
# Should work if we skip WAL verification.
command_ok(['pg_verifybackup', '-n', $backup_path ],
command_ok(
[ 'pg_verifybackup', '-n', $backup_path ],
'missing pg_wal OK if not verifying WAL');
# Should also work if we specify the correct WAL location.
@ -50,6 +52,7 @@ print $fh 'w' x $wal_size;
close($fh);
# WAL verification should fail.
command_fails_like(['pg_verifybackup', $backup_path ],
command_fails_like(
[ 'pg_verifybackup', $backup_path ],
qr/WAL parsing failed for timeline 1/,
'corrupt WAL file causes failure');

View File

@ -68,8 +68,7 @@ my $ets = TestLib::perl2host($ts);
# the next commands will issue a syntax error if the path contains a "'"
$node->safe_psql('postgres',
"CREATE TABLESPACE regress_pgbench_tap_1_ts LOCATION '$ets';"
);
"CREATE TABLESPACE regress_pgbench_tap_1_ts LOCATION '$ets';");
# Test concurrent OID generation via pg_enum_oid_index. This indirectly
# exercises LWLock and spinlock concurrency.
@ -106,8 +105,10 @@ pgbench(
'-i', 0,
[qr{^$}],
[
qr{creating tables}, qr{vacuuming},
qr{creating primary keys}, qr{done in \d+\.\d\d s }
qr{creating tables},
qr{vacuuming},
qr{creating primary keys},
qr{done in \d+\.\d\d s }
],
'pgbench scale 1 initialization',);
@ -276,9 +277,9 @@ COMMIT;
# 1. Logging neither with errors nor with statements
$node->append_conf('postgresql.conf',
"log_min_duration_statement = 0\n" .
"log_parameter_max_length = 0\n" .
"log_parameter_max_length_on_error = 0");
"log_min_duration_statement = 0\n"
. "log_parameter_max_length = 0\n"
. "log_parameter_max_length_on_error = 0");
$node->reload;
pgbench(
'-n -t1 -c1 -M prepared',
@ -296,13 +297,16 @@ select column1::jsonb from (values (:value), (:long)) as q;
]
});
my $log = TestLib::slurp_file($node->logfile);
unlike($log, qr[DETAIL: parameters: \$1 = '\{ invalid ',], "no parameters logged");
unlike(
$log,
qr[DETAIL: parameters: \$1 = '\{ invalid ',],
"no parameters logged");
$log = undef;
# 2. Logging truncated parameters on error, full with statements
$node->append_conf('postgresql.conf',
"log_parameter_max_length = -1\n" .
"log_parameter_max_length_on_error = 64");
"log_parameter_max_length = -1\n"
. "log_parameter_max_length_on_error = 64");
$node->reload;
pgbench(
'-n -t1 -c1 -M prepared',
@ -334,15 +338,17 @@ select column1::jsonb from (values (:value), (:long)) as q;
]
});
$log = TestLib::slurp_file($node->logfile);
like($log, qr[DETAIL: parameters: \$1 = '\{ invalid ', \$2 = '''Valame Dios!'' dijo Sancho; ''no le dije yo a vuestra merced que mirase bien lo que hacia\?'''],
like(
$log,
qr[DETAIL: parameters: \$1 = '\{ invalid ', \$2 = '''Valame Dios!'' dijo Sancho; ''no le dije yo a vuestra merced que mirase bien lo que hacia\?'''],
"parameter report does not truncate");
$log = undef;
# 3. Logging full parameters on error, truncated with statements
$node->append_conf('postgresql.conf',
"log_min_duration_statement = -1\n" .
"log_parameter_max_length = 7\n" .
"log_parameter_max_length_on_error = -1");
"log_min_duration_statement = -1\n"
. "log_parameter_max_length = 7\n"
. "log_parameter_max_length_on_error = -1");
$node->reload;
pgbench(
'-n -t1 -c1 -M prepared',
@ -377,15 +383,17 @@ select column1::jsonb from (values (:value), (:long)) as q;
]
});
$log = TestLib::slurp_file($node->logfile);
like($log, qr[DETAIL: parameters: \$1 = '\{ inval\.\.\.', \$2 = '''Valame\.\.\.'],
like(
$log,
qr[DETAIL: parameters: \$1 = '\{ inval\.\.\.', \$2 = '''Valame\.\.\.'],
"parameter report truncates");
$log = undef;
# Restore default logging config
$node->append_conf('postgresql.conf',
"log_min_duration_statement = -1\n" .
"log_parameter_max_length_on_error = 0\n" .
"log_parameter_max_length = -1");
"log_min_duration_statement = -1\n"
. "log_parameter_max_length_on_error = 0\n"
. "log_parameter_max_length = -1");
$node->reload;
# test expressions

View File

@ -147,7 +147,10 @@ my @options = (
[
'invalid init step',
'-i -I dta',
[ qr{unrecognized initialization step}, qr{Allowed step characters are} ]
[
qr{unrecognized initialization step},
qr{Allowed step characters are}
]
],
[
'bad random seed',
@ -158,8 +161,16 @@ my @options = (
qr{error while setting random seed from --random-seed option}
]
],
[ 'bad partition method', '-i --partition-method=BAD', [qr{"range"}, qr{"hash"}, qr{"BAD"}] ],
[ 'bad partition number', '-i --partitions -1', [ qr{invalid number of partitions: "-1"} ] ],
[
'bad partition method',
'-i --partition-method=BAD',
[ qr{"range"}, qr{"hash"}, qr{"BAD"} ]
],
[
'bad partition number',
'-i --partitions -1',
[qr{invalid number of partitions: "-1"}]
],
[
'partition method without partitioning',
'-i --partition-method=hash',
@ -231,8 +242,10 @@ pgbench(
'--show-script se',
0,
[qr{^$}],
[ qr{select-only: }, qr{SELECT abalance FROM pgbench_accounts WHERE},
qr{(?!UPDATE)}, qr{(?!INSERT)} ],
[
qr{select-only: }, qr{SELECT abalance FROM pgbench_accounts WHERE},
qr{(?!UPDATE)}, qr{(?!INSERT)}
],
'pgbench builtin listing');
my @script_tests = (

View File

@ -238,11 +238,10 @@ MainLoop(FILE *source)
bool found_q = false;
/*
* The assistance words, help/exit/quit, must have no
* whitespace before them, and only whitespace after, with an
* optional semicolon. This prevents indented use of these
* words, perhaps as identifiers, from invoking the assistance
* behavior.
* The assistance words, help/exit/quit, must have no whitespace
* before them, and only whitespace after, with an optional
* semicolon. This prevents indented use of these words, perhaps
* as identifiers, from invoking the assistance behavior.
*/
if (pg_strncasecmp(first_word, "help", 4) == 0)
{

View File

@ -89,8 +89,7 @@ $node->command_fails(
$node->command_fails(
[ 'vacuumdb', '--analyze', '--table', 'vactable(c)', 'postgres' ],
'incorrect column name with ANALYZE');
$node->command_fails(
[ 'vacuumdb', '-P', -1, 'postgres' ],
$node->command_fails([ 'vacuumdb', '-P', -1, 'postgres' ],
'negative parallel degree');
$node->issues_sql_like(
[ 'vacuumdb', '--analyze', '--table', 'vactable(a, b)', 'postgres' ],

View File

@ -94,7 +94,8 @@ while (my $line = <$INPUT>)
my $nfkc_utf8 = codepoint_string_to_hex($nfkc);
my $nfkd_utf8 = codepoint_string_to_hex($nfkd);
print $OUTPUT "\t{ $linenum, { $source_utf8 }, { { $nfc_utf8 }, { $nfd_utf8 }, { $nfkc_utf8 }, { $nfkd_utf8 } } },\n";
print $OUTPUT
"\t{ $linenum, { $source_utf8 }, { { $nfc_utf8 }, { $nfd_utf8 }, { $nfkc_utf8 }, { $nfkd_utf8 } } },\n";
}
# Output terminator entry

View File

@ -14,7 +14,8 @@ my $codepoint;
my $prev_codepoint;
my $count = 0;
print "/* generated by src/common/unicode/generate-unicode_combining_table.pl, do not edit */\n\n";
print
"/* generated by src/common/unicode/generate-unicode_combining_table.pl, do not edit */\n\n";
print "static const struct mbinterval combining[] = {\n";

View File

@ -11,7 +11,8 @@ use warnings;
my %data;
print "/* generated by src/common/unicode/generate-unicode_normprops_table.pl, do not edit */\n\n";
print
"/* generated by src/common/unicode/generate-unicode_normprops_table.pl, do not edit */\n\n";
print <<EOS;
#include "common/unicode_norm.h"
@ -61,7 +62,8 @@ foreach my $prop (sort keys %data)
next if $prop eq "NFD_QC" || $prop eq "NFKD_QC";
print "\n";
print "static const pg_unicode_normprops UnicodeNormProps_${prop}[] = {\n";
print
"static const pg_unicode_normprops UnicodeNormProps_${prop}[] = {\n";
my %subdata = %{ $data{$prop} };
foreach my $cp (sort { $a <=> $b } keys %subdata)

View File

@ -112,8 +112,8 @@ get_decomposed_size(pg_wchar code, bool compat)
/*
* Fast path for Hangul characters not stored in tables to save memory as
* decomposition is algorithmic. See
* https://www.unicode.org/reports/tr15/tr15-18.html, annex 10 for details on
* the matter.
* https://www.unicode.org/reports/tr15/tr15-18.html, annex 10 for details
* on the matter.
*/
if (code >= SBASE && code < SBASE + SCOUNT)
{
@ -238,8 +238,8 @@ decompose_code(pg_wchar code, bool compat, pg_wchar **result, int *current)
/*
* Fast path for Hangul characters not stored in tables to save memory as
* decomposition is algorithmic. See
* https://www.unicode.org/reports/tr15/tr15-18.html, annex 10 for details on
* the matter.
* https://www.unicode.org/reports/tr15/tr15-18.html, annex 10 for details
* on the matter.
*/
if (code >= SBASE && code < SBASE + SCOUNT)
{
@ -369,8 +369,8 @@ unicode_normalize(UnicodeNormalizationForm form, const pg_wchar *input)
continue;
/*
* Per Unicode (https://www.unicode.org/reports/tr15/tr15-18.html) annex 4,
* a sequence of two adjacent characters in a string is an
* Per Unicode (https://www.unicode.org/reports/tr15/tr15-18.html)
* annex 4, a sequence of two adjacent characters in a string is an
* exchangeable pair if the combining class (from the Unicode
* Character Database) for the first character is greater than the
* combining class for the second, and the second is not a starter. A
@ -396,10 +396,10 @@ unicode_normalize(UnicodeNormalizationForm form, const pg_wchar *input)
return decomp_chars;
/*
* The last phase of NFC and NFKC is the recomposition of the reordered Unicode
* string using combining classes. The recomposed string cannot be longer
* than the decomposed one, so make the allocation of the output string
* based on that assumption.
* The last phase of NFC and NFKC is the recomposition of the reordered
* Unicode string using combining classes. The recomposed string cannot be
* longer than the decomposed one, so make the allocation of the output
* string based on that assumption.
*/
recomp_chars = (pg_wchar *) ALLOC((decomp_size + 1) * sizeof(pg_wchar));
if (!recomp_chars)

View File

@ -482,9 +482,9 @@ typedef struct TableAmRoutine
double *tups_recently_dead);
/*
* React to VACUUM command on the relation. The VACUUM can be
* triggered by a user or by autovacuum. The specific actions
* performed by the AM will depend heavily on the individual AM.
* React to VACUUM command on the relation. The VACUUM can be triggered by
* a user or by autovacuum. The specific actions performed by the AM will
* depend heavily on the individual AM.
*
* On entry a transaction is already established, and the relation is
* locked with a ShareUpdateExclusive lock.

View File

@ -281,10 +281,13 @@ typedef struct AggStatePerPhaseData
ExprState *evaltrans; /* evaluation of transition functions */
/* cached variants of the compiled expression */
ExprState *evaltrans_cache
[2] /* 0: outerops; 1: TTSOpsMinimalTuple */
[2]; /* 0: no NULL check; 1: with NULL check */
/*----------
* Cached variants of the compiled expression.
* first subscript: 0: outerops; 1: TTSOpsMinimalTuple
* second subscript: 0: no NULL check; 1: with NULL check
*----------
*/
ExprState *evaltrans_cache[2][2];
} AggStatePerPhaseData;
/*

View File

@ -2164,20 +2164,20 @@ typedef struct AggState
MemoryContext hash_metacxt; /* memory for hash table itself */
struct HashTapeInfo *hash_tapeinfo; /* metadata for spill tapes */
struct HashAggSpill *hash_spills; /* HashAggSpill for each grouping set,
exists only during first pass */
* exists only during first pass */
TupleTableSlot *hash_spill_slot; /* slot for reading from spill files */
List *hash_batches; /* hash batches remaining to be processed */
bool hash_ever_spilled; /* ever spilled during this execution? */
bool hash_spill_mode; /* we hit a limit during the current batch
and we must not create new groups */
* and we must not create new groups */
Size hash_mem_limit; /* limit before spilling hash table */
uint64 hash_ngroups_limit; /* limit before spilling hash table */
int hash_planned_partitions; /* number of partitions planned
for first pass */
* for first pass */
double hashentrysize; /* estimate revised during execution */
Size hash_mem_peak; /* peak hash table memory usage */
uint64 hash_ngroups_current; /* number of groups currently in
memory in all hash tables */
* memory in all hash tables */
uint64 hash_disk_used; /* kB of disk space used */
int hash_batches_used; /* batches used during entire execution */

View File

@ -740,8 +740,8 @@ typedef struct RelOptInfo
/* used for partitioned relations: */
PartitionScheme part_scheme; /* Partitioning scheme */
int nparts; /* Number of partitions; -1 if not yet set;
* in case of a join relation 0 means it's
int nparts; /* Number of partitions; -1 if not yet set; in
* case of a join relation 0 means it's
* considered unpartitioned */
struct PartitionBoundInfoData *boundinfo; /* Partition bounds */
bool partbounds_merged; /* True if partition bounds were created

View File

@ -74,11 +74,11 @@ typedef struct
TimeLineID receiveStartTLI;
/*
* flushedUpto-1 is the last byte position that has already been
* received, and receivedTLI is the timeline it came from. At the first
* startup of walreceiver, these are set to receiveStart and
* receiveStartTLI. After that, walreceiver updates these whenever it
* flushes the received WAL to disk.
* flushedUpto-1 is the last byte position that has already been received,
* and receivedTLI is the timeline it came from. At the first startup of
* walreceiver, these are set to receiveStart and receiveStartTLI. After
* that, walreceiver updates these whenever it flushes the received WAL to
* disk.
*/
XLogRecPtr flushedUpto;
TimeLineID receivedTLI;

View File

@ -1039,7 +1039,8 @@ abstime2tm(AbsoluteTime _time, int *tzp, struct tm *tm, char **tzn)
}
else
tm->tm_isdst = -1;
#else /* not (HAVE_STRUCT_TM_TM_ZONE || HAVE_INT_TIMEZONE) */
#else /* not (HAVE_STRUCT_TM_TM_ZONE ||
* HAVE_INT_TIMEZONE) */
if (tzp != NULL)
{
/* default to UTC */

View File

@ -158,7 +158,8 @@ timestamp2tm(timestamp dt, int *tzp, struct tm *tm, fsec_t *fsec, const char **t
if (tzn != NULL)
*tzn = TZNAME_GLOBAL[(tm->tm_isdst > 0)];
#endif
#else /* not (HAVE_STRUCT_TM_TM_ZONE || HAVE_INT_TIMEZONE) */
#else /* not (HAVE_STRUCT_TM_TM_ZONE ||
* HAVE_INT_TIMEZONE) */
*tzp = 0;
/* Mark this as *no* time zone available */
tm->tm_isdst = -1;

View File

@ -819,17 +819,16 @@ initialize_SSL(PGconn *conn)
}
/*
* Delegate the client cert password prompt to the libpq wrapper
* callback if any is defined.
* Delegate the client cert password prompt to the libpq wrapper callback
* if any is defined.
*
* If the application hasn't installed its own and the sslpassword
* parameter is non-null, we install ours now to make sure we
* supply PGconn->sslpassword to OpenSSL instead of letting it
* prompt on stdin.
* parameter is non-null, we install ours now to make sure we supply
* PGconn->sslpassword to OpenSSL instead of letting it prompt on stdin.
*
* This will replace OpenSSL's default PEM_def_callback (which
* prompts on stdin), but we're only setting it for this SSL
* context so it's harmless.
* This will replace OpenSSL's default PEM_def_callback (which prompts on
* stdin), but we're only setting it for this SSL context so it's
* harmless.
*/
if (PQsslKeyPassHook
|| (conn->sslpassword && strlen(conn->sslpassword) > 0))
@ -1205,14 +1204,14 @@ initialize_SSL(PGconn *conn)
/*
* We'll try to load the file in DER (binary ASN.1) format, and if
* that fails too, report the original error. This could mask
* issues where there's something wrong with a DER-format cert, but
* we'd have to duplicate openssl's format detection to be smarter
* than this. We can't just probe for a leading -----BEGIN because
* PEM can have leading non-matching lines and blanks. OpenSSL
* doesn't expose its get_name(...) and its PEM routines don't
* differentiate between failure modes in enough detail to let us
* tell the difference between "not PEM, try DER" and "wrong
* password".
* issues where there's something wrong with a DER-format cert,
* but we'd have to duplicate openssl's format detection to be
* smarter than this. We can't just probe for a leading -----BEGIN
* because PEM can have leading non-matching lines and blanks.
* OpenSSL doesn't expose its get_name(...) and its PEM routines
* don't differentiate between failure modes in enough detail to
* let us tell the difference between "not PEM, try DER" and
* "wrong password".
*/
if (SSL_use_PrivateKey_file(conn->ssl, fnbuf, SSL_FILETYPE_ASN1) != 1)
{

View File

@ -768,7 +768,10 @@ pltcl_handler(PG_FUNCTION_ARGS, bool pltrusted)
PG_FINALLY();
{
/* Restore static pointer, then clean up the prodesc refcount if any */
/* (We're being paranoid in case an error is thrown in context deletion) */
/*
* (We're being paranoid in case an error is thrown in context
* deletion)
*/
pltcl_current_call_state = save_call_state;
if (current_call_state.prodesc != NULL)
{
@ -2780,9 +2783,8 @@ pltcl_SPI_execute_plan(ClientData cdata, Tcl_Interp *interp,
if (callObjc != qdesc->nargs)
{
Tcl_SetObjResult(interp,
Tcl_NewStringObj(
"argument list length doesn't match number of arguments for query"
,-1));
Tcl_NewStringObj("argument list length doesn't match number of arguments for query",
-1));
return TCL_ERROR;
}
}

View File

@ -12,7 +12,8 @@ use TestLib;
use Test::More;
if (!$use_unix_sockets)
{
plan skip_all => "authentication tests cannot run without Unix-domain sockets";
plan skip_all =>
"authentication tests cannot run without Unix-domain sockets";
}
else
{

View File

@ -9,7 +9,8 @@ use TestLib;
use Test::More;
if (!$use_unix_sockets)
{
plan skip_all => "authentication tests cannot run without Unix-domain sockets";
plan skip_all =>
"authentication tests cannot run without Unix-domain sockets";
}
else
{

View File

@ -31,7 +31,8 @@ relopt_parse_elt di_relopt_tab[6];
/* Kind of relation options for dummy index */
relopt_kind di_relopt_kind;
typedef enum DummyAmEnum {
typedef enum DummyAmEnum
{
DUMMY_AM_ENUM_ONE,
DUMMY_AM_ENUM_TWO
} DummyAmEnum;

View File

@ -27,8 +27,10 @@ static char *ssl_passphrase = NULL;
/* callback function */
static int rot13_passphrase(char *buf, int size, int rwflag, void *userdata);
/* hook function to set the callback */
static void set_rot13(SSL_CTX *context, bool isServerStart);
/*
* Module load callback
*/

View File

@ -703,7 +703,8 @@ port = $port
"unix_socket_directories = '$host'");
}
$self->enable_streaming($root_node) if $params{has_streaming};
$self->enable_restoring($root_node, $params{standby}) if $params{has_restoring};
$self->enable_restoring($root_node, $params{standby})
if $params{has_restoring};
return;
}

View File

@ -1,3 +1,4 @@
=pod
=head1 NAME
@ -122,7 +123,8 @@ BEGIN
# Specifies whether to use Unix sockets for test setups. On
# Windows we don't use them by default since it's not universally
# supported, but it can be overridden if desired.
$use_unix_sockets = (!$windows_os || defined $ENV{PG_TEST_USE_UNIX_SOCKETS});
$use_unix_sockets =
(!$windows_os || defined $ENV{PG_TEST_USE_UNIX_SOCKETS});
}
=pod

View File

@ -348,8 +348,7 @@ is($catalog_xmin, '',
'catalog xmin of cascaded slot still null with hs_feedback reset');
note "check change primary_conninfo without restart";
$node_standby_2->append_conf('postgresql.conf',
"primary_slot_name = ''");
$node_standby_2->append_conf('postgresql.conf', "primary_slot_name = ''");
$node_standby_2->enable_streaming($node_master);
$node_standby_2->reload;
@ -370,22 +369,26 @@ is($is_replayed, qq(1), "standby_2 didn't replay master value $newval");
my $phys_slot = 'phys_slot';
$node_master->safe_psql('postgres',
"SELECT pg_create_physical_replication_slot('$phys_slot', true);");
$node_master->psql('postgres', "
$node_master->psql(
'postgres', "
CREATE TABLE tab_phys_slot (a int);
INSERT INTO tab_phys_slot VALUES (generate_series(1,10));");
my $current_lsn = $node_master->safe_psql('postgres',
"SELECT pg_current_wal_lsn();");
my $current_lsn =
$node_master->safe_psql('postgres', "SELECT pg_current_wal_lsn();");
chomp($current_lsn);
my $psql_rc = $node_master->psql('postgres',
"SELECT pg_replication_slot_advance('$phys_slot', '$current_lsn'::pg_lsn);");
"SELECT pg_replication_slot_advance('$phys_slot', '$current_lsn'::pg_lsn);"
);
is($psql_rc, '0', 'slot advancing with physical slot');
my $phys_restart_lsn_pre = $node_master->safe_psql('postgres',
"SELECT restart_lsn from pg_replication_slots WHERE slot_name = '$phys_slot';");
"SELECT restart_lsn from pg_replication_slots WHERE slot_name = '$phys_slot';"
);
chomp($phys_restart_lsn_pre);
# Slot advance should persist across clean restarts.
$node_master->restart;
my $phys_restart_lsn_post = $node_master->safe_psql('postgres',
"SELECT restart_lsn from pg_replication_slots WHERE slot_name = '$phys_slot';");
"SELECT restart_lsn from pg_replication_slots WHERE slot_name = '$phys_slot';"
);
chomp($phys_restart_lsn_post);
ok( ($phys_restart_lsn_pre cmp $phys_restart_lsn_post) == 0,
"physical slot advance persists across restarts");

View File

@ -150,13 +150,18 @@ ok($logfile =~ qr/multiple recovery targets specified/,
# Check behavior when recovery ends before target is reached
$node_standby = get_new_node('standby_8');
$node_standby->init_from_backup($node_master, 'my_backup',
has_restoring => 1, standby => 0);
$node_standby->init_from_backup(
$node_master, 'my_backup',
has_restoring => 1,
standby => 0);
$node_standby->append_conf('postgresql.conf',
"recovery_target_name = 'does_not_exist'");
run_log(['pg_ctl', '-D', $node_standby->data_dir,
'-l', $node_standby->logfile, 'start']);
run_log(
[
'pg_ctl', '-D', $node_standby->data_dir, '-l',
$node_standby->logfile, 'start'
]);
# wait up to 180s for postgres to terminate
foreach my $i (0 .. 1800)
@ -165,5 +170,6 @@ foreach my $i (0..1800)
usleep(100_000);
}
$logfile = slurp_file($node_standby->logfile());
ok($logfile =~ qr/FATAL: recovery ended before configured recovery target was reached/,
ok( $logfile =~
qr/FATAL: recovery ended before configured recovery target was reached/,
'recovery end before target reached is a fatal error');

Some files were not shown because too many files have changed in this diff Show More