pgindent run for 9.6

This commit is contained in:
Robert Haas 2016-06-09 18:02:36 -04:00
parent 9164deea2f
commit 4bc424b968
252 changed files with 2670 additions and 2558 deletions

View File

@ -165,16 +165,16 @@ _PG_init(void)
DefineCustomRealVariable("auto_explain.sample_rate",
"Fraction of queries to process.",
NULL,
&auto_explain_sample_rate,
1.0,
0.0,
1.0,
PGC_SUSET,
0,
NULL,
NULL,
NULL);
NULL,
&auto_explain_sample_rate,
1.0,
0.0,
1.0,
PGC_SUSET,
0,
NULL,
NULL,
NULL);
EmitWarningsOnPlaceholders("auto_explain");
@ -209,12 +209,12 @@ static void
explain_ExecutorStart(QueryDesc *queryDesc, int eflags)
{
/*
* For rate sampling, randomly choose top-level statement. Either
* all nested statements will be explained or none will.
* For rate sampling, randomly choose top-level statement. Either all
* nested statements will be explained or none will.
*/
if (auto_explain_log_min_duration >= 0 && nesting_level == 0)
current_query_sampled = (random() < auto_explain_sample_rate *
MAX_RANDOM_VALUE);
MAX_RANDOM_VALUE);
if (auto_explain_enabled() && current_query_sampled)
{

View File

@ -33,11 +33,11 @@ PG_MODULE_MAGIC;
typedef struct
{
BloomState blstate; /* bloom index state */
MemoryContext tmpCtx; /* temporary memory context reset after
* each tuple */
MemoryContext tmpCtx; /* temporary memory context reset after each
* tuple */
char data[BLCKSZ]; /* cached page */
int64 count; /* number of tuples in cached page */
} BloomBuildState;
} BloomBuildState;
/*
* Flush page cached in BloomBuildState.
@ -140,8 +140,8 @@ blbuild(Relation heap, Relation index, IndexInfo *indexInfo)
bloomBuildCallback, (void *) &buildstate);
/*
* There are could be some items in cached page. Flush this page
* if needed.
* There are could be some items in cached page. Flush this page if
* needed.
*/
if (buildstate.count > 0)
flushCachedPage(index, &buildstate);

View File

@ -31,14 +31,13 @@
/* Opaque for bloom pages */
typedef struct BloomPageOpaqueData
{
OffsetNumber maxoff; /* number of index tuples on page */
uint16 flags; /* see bit definitions below */
uint16 unused; /* placeholder to force maxaligning of size
* of BloomPageOpaqueData and to place
* bloom_page_id exactly at the end of page
*/
uint16 bloom_page_id; /* for identification of BLOOM indexes */
} BloomPageOpaqueData;
OffsetNumber maxoff; /* number of index tuples on page */
uint16 flags; /* see bit definitions below */
uint16 unused; /* placeholder to force maxaligning of size of
* BloomPageOpaqueData and to place
* bloom_page_id exactly at the end of page */
uint16 bloom_page_id; /* for identification of BLOOM indexes */
} BloomPageOpaqueData;
typedef BloomPageOpaqueData *BloomPageOpaque;
@ -102,9 +101,9 @@ typedef struct BloomOptions
{
int32 vl_len_; /* varlena header (do not touch directly!) */
int bloomLength; /* length of signature in words (not bits!) */
int bitSize[INDEX_MAX_KEYS]; /* # of bits generated for each
* index key */
} BloomOptions;
int bitSize[INDEX_MAX_KEYS]; /* # of bits generated for
* each index key */
} BloomOptions;
/*
* FreeBlockNumberArray - array of block numbers sized so that metadata fill
@ -125,7 +124,7 @@ typedef struct BloomMetaPageData
uint16 nEnd;
BloomOptions opts;
FreeBlockNumberArray notFullPage;
} BloomMetaPageData;
} BloomMetaPageData;
/* Magic number to distinguish bloom pages among anothers */
#define BLOOM_MAGICK_NUMBER (0xDBAC0DED)
@ -146,7 +145,7 @@ typedef struct BloomState
* precompute it
*/
Size sizeOfBloomTuple;
} BloomState;
} BloomState;
#define BloomPageGetFreeSpace(state, page) \
(BLCKSZ - MAXALIGN(SizeOfPageHeaderData) \
@ -160,30 +159,30 @@ typedef struct BloomTuple
{
ItemPointerData heapPtr;
BloomSignatureWord sign[FLEXIBLE_ARRAY_MEMBER];
} BloomTuple;
} BloomTuple;
#define BLOOMTUPLEHDRSZ offsetof(BloomTuple, sign)
/* Opaque data structure for bloom index scan */
typedef struct BloomScanOpaqueData
{
BloomSignatureWord *sign; /* Scan signature */
BloomSignatureWord *sign; /* Scan signature */
BloomState state;
} BloomScanOpaqueData;
} BloomScanOpaqueData;
typedef BloomScanOpaqueData *BloomScanOpaque;
/* blutils.c */
extern void _PG_init(void);
extern Datum blhandler(PG_FUNCTION_ARGS);
extern void initBloomState(BloomState * state, Relation index);
extern void initBloomState(BloomState *state, Relation index);
extern void BloomFillMetapage(Relation index, Page metaPage);
extern void BloomInitMetapage(Relation index);
extern void BloomInitPage(Page page, uint16 flags);
extern Buffer BloomNewBuffer(Relation index);
extern void signValue(BloomState * state, BloomSignatureWord * sign, Datum value, int attno);
extern BloomTuple *BloomFormTuple(BloomState * state, ItemPointer iptr, Datum *values, bool *isnull);
extern bool BloomPageAddItem(BloomState * state, Page page, BloomTuple * tuple);
extern void signValue(BloomState *state, BloomSignatureWord *sign, Datum value, int attno);
extern BloomTuple *BloomFormTuple(BloomState *state, ItemPointer iptr, Datum *values, bool *isnull);
extern bool BloomPageAddItem(BloomState *state, Page page, BloomTuple *tuple);
/* blvalidate.c */
extern bool blvalidate(Oid opclassoid);

View File

@ -37,6 +37,7 @@ PG_FUNCTION_INFO_V1(blhandler);
/* Kind of relation options for bloom index */
static relopt_kind bl_relopt_kind;
/* parse table for fillRelOptions */
static relopt_parse_elt bl_relopt_tab[INDEX_MAX_KEYS + 1];
@ -215,7 +216,9 @@ myRand(void)
* October 1988, p. 1195.
*----------
*/
int32 hi, lo, x;
int32 hi,
lo,
x;
/* Must be in [1, 0x7ffffffe] range at this point. */
hi = next / 127773;

View File

@ -78,7 +78,7 @@ blbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
/* Iterate over the tuples */
itup = itupPtr = BloomPageGetTuple(&state, page, FirstOffsetNumber);
itupEnd = BloomPageGetTuple(&state, page,
OffsetNumberNext(BloomPageGetMaxOffset(page)));
OffsetNumberNext(BloomPageGetMaxOffset(page)));
while (itup < itupEnd)
{
/* Do we have to delete this tuple? */
@ -106,11 +106,11 @@ blbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
}
Assert(itupPtr == BloomPageGetTuple(&state, page,
OffsetNumberNext(BloomPageGetMaxOffset(page))));
OffsetNumberNext(BloomPageGetMaxOffset(page))));
/*
* Add page to notFullPage list if we will not mark page as deleted and
* there is a free space on it
* Add page to notFullPage list if we will not mark page as deleted
* and there is a free space on it
*/
if (BloomPageGetMaxOffset(page) != 0 &&
BloomPageGetFreeSpace(&state, page) > state.sizeOfBloomTuple &&

View File

@ -132,7 +132,7 @@ static bool fileAnalyzeForeignTable(Relation relation,
AcquireSampleRowsFunc *func,
BlockNumber *totalpages);
static bool fileIsForeignScanParallelSafe(PlannerInfo *root, RelOptInfo *rel,
RangeTblEntry *rte);
RangeTblEntry *rte);
/*
* Helper functions
@ -767,12 +767,12 @@ fileAnalyzeForeignTable(Relation relation,
/*
* fileIsForeignScanParallelSafe
* Reading a file in a parallel worker should work just the same as
* reading it in the leader, so mark scans safe.
* Reading a file in a parallel worker should work just the same as
* reading it in the leader, so mark scans safe.
*/
static bool
fileIsForeignScanParallelSafe(PlannerInfo *root, RelOptInfo *rel,
RangeTblEntry *rte)
RangeTblEntry *rte)
{
return true;
}

View File

@ -444,9 +444,9 @@ ean2ISBN(char *isn)
unsigned check;
/*
* The number should come in this format: 978-0-000-00000-0
* or may be an ISBN-13 number, 979-..., which does not have a short
* representation. Do the short output version if possible.
* The number should come in this format: 978-0-000-00000-0 or may be an
* ISBN-13 number, 979-..., which does not have a short representation. Do
* the short output version if possible.
*/
if (strncmp("978-", isn, 4) == 0)
{

View File

@ -82,7 +82,7 @@ text_to_bits(char *str, int len)
else
ereport(ERROR,
(errcode(ERRCODE_DATA_CORRUPTED),
errmsg("illegal character '%c' in t_bits string", str[off])));
errmsg("illegal character '%c' in t_bits string", str[off])));
if (off % 8 == 7)
bits[off / 8] = byte;
@ -192,9 +192,9 @@ heap_page_items(PG_FUNCTION_ARGS)
lp_offset == MAXALIGN(lp_offset) &&
lp_offset + lp_len <= raw_page_size)
{
HeapTupleHeader tuphdr;
bytea *tuple_data_bytea;
int tuple_data_len;
HeapTupleHeader tuphdr;
bytea *tuple_data_bytea;
int tuple_data_len;
/* Extract information from the tuple header */
@ -214,7 +214,7 @@ heap_page_items(PG_FUNCTION_ARGS)
tuple_data_bytea = (bytea *) palloc(tuple_data_len + VARHDRSZ);
SET_VARSIZE(tuple_data_bytea, tuple_data_len + VARHDRSZ);
memcpy(VARDATA(tuple_data_bytea), (char *) tuphdr + tuphdr->t_hoff,
tuple_data_len);
tuple_data_len);
values[13] = PointerGetDatum(tuple_data_bytea);
/*
@ -284,16 +284,16 @@ heap_page_items(PG_FUNCTION_ARGS)
*/
static Datum
tuple_data_split_internal(Oid relid, char *tupdata,
uint16 tupdata_len, uint16 t_infomask,
uint16 t_infomask2, bits8 *t_bits,
bool do_detoast)
uint16 tupdata_len, uint16 t_infomask,
uint16 t_infomask2, bits8 *t_bits,
bool do_detoast)
{
ArrayBuildState *raw_attrs;
int nattrs;
int i;
int off = 0;
Relation rel;
TupleDesc tupdesc;
ArrayBuildState *raw_attrs;
int nattrs;
int i;
int off = 0;
Relation rel;
TupleDesc tupdesc;
/* Get tuple descriptor from relation OID */
rel = relation_open(relid, NoLock);
@ -310,30 +310,31 @@ tuple_data_split_internal(Oid relid, char *tupdata,
for (i = 0; i < nattrs; i++)
{
Form_pg_attribute attr;
bool is_null;
bytea *attr_data = NULL;
Form_pg_attribute attr;
bool is_null;
bytea *attr_data = NULL;
attr = tupdesc->attrs[i];
is_null = (t_infomask & HEAP_HASNULL) && att_isnull(i, t_bits);
/*
* Tuple header can specify less attributes than tuple descriptor
* as ALTER TABLE ADD COLUMN without DEFAULT keyword does not
* actually change tuples in pages, so attributes with numbers greater
* than (t_infomask2 & HEAP_NATTS_MASK) should be treated as NULL.
* Tuple header can specify less attributes than tuple descriptor as
* ALTER TABLE ADD COLUMN without DEFAULT keyword does not actually
* change tuples in pages, so attributes with numbers greater than
* (t_infomask2 & HEAP_NATTS_MASK) should be treated as NULL.
*/
if (i >= (t_infomask2 & HEAP_NATTS_MASK))
is_null = true;
if (!is_null)
{
int len;
int len;
if (attr->attlen == -1)
{
off = att_align_pointer(off, tupdesc->attrs[i]->attalign, -1,
tupdata + off);
/*
* As VARSIZE_ANY throws an exception if it can't properly
* detect the type of external storage in macros VARTAG_SIZE,
@ -343,8 +344,8 @@ tuple_data_split_internal(Oid relid, char *tupdata,
!VARATT_IS_EXTERNAL_ONDISK(tupdata + off) &&
!VARATT_IS_EXTERNAL_INDIRECT(tupdata + off))
ereport(ERROR,
(errcode(ERRCODE_DATA_CORRUPTED),
errmsg("first byte of varlena attribute is incorrect for attribute %d", i)));
(errcode(ERRCODE_DATA_CORRUPTED),
errmsg("first byte of varlena attribute is incorrect for attribute %d", i)));
len = VARSIZE_ANY(tupdata + off);
}
@ -381,7 +382,7 @@ tuple_data_split_internal(Oid relid, char *tupdata,
if (tupdata_len != off)
ereport(ERROR,
(errcode(ERRCODE_DATA_CORRUPTED),
errmsg("end of tuple reached without looking at all its data")));
errmsg("end of tuple reached without looking at all its data")));
return makeArrayResult(raw_attrs, CurrentMemoryContext);
}
@ -397,14 +398,14 @@ PG_FUNCTION_INFO_V1(tuple_data_split);
Datum
tuple_data_split(PG_FUNCTION_ARGS)
{
Oid relid;
bytea *raw_data;
uint16 t_infomask;
uint16 t_infomask2;
char *t_bits_str;
bool do_detoast = false;
bits8 *t_bits = NULL;
Datum res;
Oid relid;
bytea *raw_data;
uint16 t_infomask;
uint16 t_infomask2;
char *t_bits_str;
bool do_detoast = false;
bits8 *t_bits = NULL;
Datum res;
relid = PG_GETARG_OID(0);
raw_data = PG_ARGISNULL(1) ? NULL : PG_GETARG_BYTEA_P(1);
@ -430,8 +431,8 @@ tuple_data_split(PG_FUNCTION_ARGS)
*/
if (t_infomask & HEAP_HASNULL)
{
int bits_str_len;
int bits_len;
int bits_str_len;
int bits_len;
bits_len = (t_infomask2 & HEAP_NATTS_MASK) / 8 + 1;
if (!t_bits_str)

View File

@ -265,13 +265,13 @@ gin_trgm_consistent(PG_FUNCTION_ARGS)
Datum
gin_trgm_triconsistent(PG_FUNCTION_ARGS)
{
GinTernaryValue *check = (GinTernaryValue *) PG_GETARG_POINTER(0);
GinTernaryValue *check = (GinTernaryValue *) PG_GETARG_POINTER(0);
StrategyNumber strategy = PG_GETARG_UINT16(1);
/* text *query = PG_GETARG_TEXT_P(2); */
int32 nkeys = PG_GETARG_INT32(3);
Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4);
GinTernaryValue res = GIN_MAYBE;
GinTernaryValue res = GIN_MAYBE;
int32 i,
ntrue;
bool *boolcheck;
@ -293,11 +293,12 @@ gin_trgm_triconsistent(PG_FUNCTION_ARGS)
}
/*
* See comment in gin_trgm_consistent() about * upper bound formula
* See comment in gin_trgm_consistent() about * upper bound
* formula
*/
res = (nkeys == 0)
? GIN_FALSE : (((((float4) ntrue) / ((float4) nkeys)) >= nlimit)
? GIN_MAYBE : GIN_FALSE);
? GIN_MAYBE : GIN_FALSE);
break;
case ILikeStrategyNumber:
#ifndef IGNORECASE
@ -330,9 +331,9 @@ gin_trgm_triconsistent(PG_FUNCTION_ARGS)
else
{
/*
* As trigramsMatchGraph implements a monotonic boolean function,
* promoting all GIN_MAYBE keys to GIN_TRUE will give a
* conservative result.
* As trigramsMatchGraph implements a monotonic boolean
* function, promoting all GIN_MAYBE keys to GIN_TRUE will
* give a conservative result.
*/
boolcheck = (bool *) palloc(sizeof(bool) * nkeys);
for (i = 0; i < nkeys; i++)
@ -345,7 +346,7 @@ gin_trgm_triconsistent(PG_FUNCTION_ARGS)
break;
default:
elog(ERROR, "unrecognized strategy number: %d", strategy);
res = GIN_FALSE; /* keep compiler quiet */
res = GIN_FALSE; /* keep compiler quiet */
break;
}

View File

@ -296,6 +296,7 @@ gtrgm_consistent(PG_FUNCTION_ARGS)
if (GIST_LEAF(entry))
{ /* all leafs contains orig trgm */
/*
* Prevent gcc optimizing the tmpsml variable using volatile
* keyword. Otherwise comparison of nlimit and tmpsml may give
@ -476,12 +477,14 @@ gtrgm_distance(PG_FUNCTION_ARGS)
*recheck = strategy == WordDistanceStrategyNumber;
if (GIST_LEAF(entry))
{ /* all leafs contains orig trgm */
/*
* Prevent gcc optimizing the sml variable using volatile
* keyword. Otherwise res can differ from the
* word_similarity_dist_op() function.
*/
float4 volatile sml = cnt_sml(qtrg, key, *recheck);
res = 1.0 - sml;
}
else if (ISALLTRUE(key))

View File

@ -16,8 +16,8 @@
PG_MODULE_MAGIC;
/* GUC variables */
double similarity_threshold = 0.3f;
double word_similarity_threshold = 0.6f;
double similarity_threshold = 0.3f;
double word_similarity_threshold = 0.6f;
void _PG_init(void);
@ -36,8 +36,8 @@ PG_FUNCTION_INFO_V1(word_similarity_dist_commutator_op);
/* Trigram with position */
typedef struct
{
trgm trg;
int index;
trgm trg;
int index;
} pos_trgm;
/*
@ -48,29 +48,29 @@ _PG_init(void)
{
/* Define custom GUC variables. */
DefineCustomRealVariable("pg_trgm.similarity_threshold",
"Sets the threshold used by the %% operator.",
"Valid range is 0.0 .. 1.0.",
&similarity_threshold,
0.3,
0.0,
1.0,
PGC_USERSET,
0,
NULL,
NULL,
NULL);
"Sets the threshold used by the %% operator.",
"Valid range is 0.0 .. 1.0.",
&similarity_threshold,
0.3,
0.0,
1.0,
PGC_USERSET,
0,
NULL,
NULL,
NULL);
DefineCustomRealVariable("pg_trgm.word_similarity_threshold",
"Sets the threshold used by the <%% operator.",
"Valid range is 0.0 .. 1.0.",
&word_similarity_threshold,
0.6,
0.0,
1.0,
PGC_USERSET,
0,
NULL,
NULL,
NULL);
"Sets the threshold used by the <%% operator.",
"Valid range is 0.0 .. 1.0.",
&word_similarity_threshold,
0.6,
0.0,
1.0,
PGC_USERSET,
0,
NULL,
NULL,
NULL);
}
/*
@ -352,9 +352,9 @@ generate_trgm(char *str, int slen)
* Make array of positional trigrams from two trigram arrays trg1 and trg2.
*
* trg1: trigram array of search pattern, of length len1. trg1 is required
* word which positions don't matter and replaced with -1.
* word which positions don't matter and replaced with -1.
* trg2: trigram array of text, of length len2. trg2 is haystack where we
* search and have to store its positions.
* search and have to store its positions.
*
* Returns concatenated trigram array.
*/
@ -362,7 +362,8 @@ static pos_trgm *
make_positional_trgm(trgm *trg1, int len1, trgm *trg2, int len2)
{
pos_trgm *result;
int i, len = len1 + len2;
int i,
len = len1 + len2;
result = (pos_trgm *) palloc(sizeof(pos_trgm) * len);
@ -387,9 +388,9 @@ make_positional_trgm(trgm *trg1, int len1, trgm *trg2, int len2)
static int
comp_ptrgm(const void *v1, const void *v2)
{
const pos_trgm *p1 = (const pos_trgm *)v1;
const pos_trgm *p2 = (const pos_trgm *)v2;
int cmp;
const pos_trgm *p1 = (const pos_trgm *) v1;
const pos_trgm *p2 = (const pos_trgm *) v2;
int cmp;
cmp = CMPTRGM(p1->trg, p2->trg);
if (cmp != 0)
@ -413,7 +414,7 @@ comp_ptrgm(const void *v1, const void *v2)
* len2: length of array "trg2" and array "trg2indexes".
* len: length of the array "found".
* check_only: if true then only check existaince of similar search pattern in
* text.
* text.
*
* Returns word similarity.
*/
@ -441,7 +442,7 @@ iterate_word_similarity(int *trg2indexes,
for (i = 0; i < len2; i++)
{
/* Get index of next trigram */
int trgindex = trg2indexes[i];
int trgindex = trg2indexes[i];
/* Update last position of this trigram */
if (lower >= 0 || found[trgindex])
@ -458,10 +459,10 @@ iterate_word_similarity(int *trg2indexes,
/* Adjust lower bound if this trigram is present in required substing */
if (found[trgindex])
{
int prev_lower,
tmp_ulen2,
tmp_lower,
tmp_count;
int prev_lower,
tmp_ulen2,
tmp_lower,
tmp_count;
upper = i;
if (lower == -1)
@ -478,8 +479,8 @@ iterate_word_similarity(int *trg2indexes,
prev_lower = lower;
for (tmp_lower = lower; tmp_lower <= upper; tmp_lower++)
{
float smlr_tmp = CALCSML(tmp_count, ulen1, tmp_ulen2);
int tmp_trgindex;
float smlr_tmp = CALCSML(tmp_count, ulen1, tmp_ulen2);
int tmp_trgindex;
if (smlr_tmp > smlr_cur)
{
@ -488,10 +489,11 @@ iterate_word_similarity(int *trg2indexes,
lower = tmp_lower;
count = tmp_count;
}
/*
* if we only check that word similarity is greater than
* pg_trgm.word_similarity_threshold we do not need to calculate
* a maximum similarity.
* pg_trgm.word_similarity_threshold we do not need to
* calculate a maximum similarity.
*/
if (check_only && smlr_cur >= word_similarity_threshold)
break;
@ -506,6 +508,7 @@ iterate_word_similarity(int *trg2indexes,
}
smlr_max = Max(smlr_max, smlr_cur);
/*
* if we only check that word similarity is greater than
* pg_trgm.word_similarity_threshold we do not need to calculate a
@ -516,7 +519,8 @@ iterate_word_similarity(int *trg2indexes,
for (tmp_lower = prev_lower; tmp_lower < lower; tmp_lower++)
{
int tmp_trgindex;
int tmp_trgindex;
tmp_trgindex = trg2indexes[tmp_lower];
if (lastpos[tmp_trgindex] == tmp_lower)
lastpos[tmp_trgindex] = -1;
@ -544,13 +548,13 @@ iterate_word_similarity(int *trg2indexes,
* str1: search pattern string, of length slen1 bytes.
* str2: text in which we are looking for a word, of length slen2 bytes.
* check_only: if true then only check existaince of similar search pattern in
* text.
* text.
*
* Returns word similarity.
*/
static float4
calc_word_similarity(char *str1, int slen1, char *str2, int slen2,
bool check_only)
bool check_only)
{
bool *found;
pos_trgm *ptrg;
@ -568,8 +572,8 @@ calc_word_similarity(char *str1, int slen1, char *str2, int slen2,
protect_out_of_mem(slen1 + slen2);
/* Make positional trigrams */
trg1 = (trgm *) palloc(sizeof(trgm) * (slen1 / 2 + 1) * 3);
trg2 = (trgm *) palloc(sizeof(trgm) * (slen2 / 2 + 1) * 3);
trg1 = (trgm *) palloc(sizeof(trgm) * (slen1 / 2 + 1) *3);
trg2 = (trgm *) palloc(sizeof(trgm) * (slen2 / 2 + 1) *3);
len1 = generate_trgm_only(trg1, str1, slen1);
len2 = generate_trgm_only(trg2, str2, slen2);
@ -594,7 +598,8 @@ calc_word_similarity(char *str1, int slen1, char *str2, int slen2,
{
if (i > 0)
{
int cmp = CMPTRGM(ptrg[i - 1].trg, ptrg[i].trg);
int cmp = CMPTRGM(ptrg[i - 1].trg, ptrg[i].trg);
if (cmp != 0)
{
if (found[j])
@ -617,7 +622,7 @@ calc_word_similarity(char *str1, int slen1, char *str2, int slen2,
/* Run iterative procedure to find maximum similarity with word */
result = iterate_word_similarity(trg2indexes, found, ulen1, len2, len,
check_only);
check_only);
pfree(trg2indexes);
pfree(found);
@ -1075,8 +1080,8 @@ word_similarity(PG_FUNCTION_ARGS)
float4 res;
res = calc_word_similarity(VARDATA_ANY(in1), VARSIZE_ANY_EXHDR(in1),
VARDATA_ANY(in2), VARSIZE_ANY_EXHDR(in2),
false);
VARDATA_ANY(in2), VARSIZE_ANY_EXHDR(in2),
false);
PG_FREE_IF_COPY(in1, 0);
PG_FREE_IF_COPY(in2, 1);
@ -1111,8 +1116,8 @@ word_similarity_op(PG_FUNCTION_ARGS)
float4 res;
res = calc_word_similarity(VARDATA_ANY(in1), VARSIZE_ANY_EXHDR(in1),
VARDATA_ANY(in2), VARSIZE_ANY_EXHDR(in2),
true);
VARDATA_ANY(in2), VARSIZE_ANY_EXHDR(in2),
true);
PG_FREE_IF_COPY(in1, 0);
PG_FREE_IF_COPY(in2, 1);
@ -1127,8 +1132,8 @@ word_similarity_commutator_op(PG_FUNCTION_ARGS)
float4 res;
res = calc_word_similarity(VARDATA_ANY(in2), VARSIZE_ANY_EXHDR(in2),
VARDATA_ANY(in1), VARSIZE_ANY_EXHDR(in1),
true);
VARDATA_ANY(in1), VARSIZE_ANY_EXHDR(in1),
true);
PG_FREE_IF_COPY(in1, 0);
PG_FREE_IF_COPY(in2, 1);
@ -1143,8 +1148,8 @@ word_similarity_dist_op(PG_FUNCTION_ARGS)
float4 res;
res = calc_word_similarity(VARDATA_ANY(in1), VARSIZE_ANY_EXHDR(in1),
VARDATA_ANY(in2), VARSIZE_ANY_EXHDR(in2),
false);
VARDATA_ANY(in2), VARSIZE_ANY_EXHDR(in2),
false);
PG_FREE_IF_COPY(in1, 0);
PG_FREE_IF_COPY(in2, 1);
@ -1159,8 +1164,8 @@ word_similarity_dist_commutator_op(PG_FUNCTION_ARGS)
float4 res;
res = calc_word_similarity(VARDATA_ANY(in2), VARSIZE_ANY_EXHDR(in2),
VARDATA_ANY(in1), VARSIZE_ANY_EXHDR(in1),
false);
VARDATA_ANY(in1), VARSIZE_ANY_EXHDR(in1),
false);
PG_FREE_IF_COPY(in1, 0);
PG_FREE_IF_COPY(in2, 1);

View File

@ -20,8 +20,8 @@ PG_MODULE_MAGIC;
typedef struct vbits
{
BlockNumber next;
BlockNumber count;
BlockNumber next;
BlockNumber count;
uint8 bits[FLEXIBLE_ARRAY_MEMBER];
} vbits;
@ -129,7 +129,7 @@ pg_visibility_map_rel(PG_FUNCTION_ARGS)
if (SRF_IS_FIRSTCALL())
{
Oid relid = PG_GETARG_OID(0);
MemoryContext oldcontext;
MemoryContext oldcontext;
funcctx = SRF_FIRSTCALL_INIT();
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
@ -173,7 +173,7 @@ pg_visibility_rel(PG_FUNCTION_ARGS)
if (SRF_IS_FIRSTCALL())
{
Oid relid = PG_GETARG_OID(0);
MemoryContext oldcontext;
MemoryContext oldcontext;
funcctx = SRF_FIRSTCALL_INIT();
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
@ -214,8 +214,8 @@ pg_visibility_map_summary(PG_FUNCTION_ARGS)
{
Oid relid = PG_GETARG_OID(0);
Relation rel;
BlockNumber nblocks;
BlockNumber blkno;
BlockNumber nblocks;
BlockNumber blkno;
Buffer vmbuffer = InvalidBuffer;
int64 all_visible = 0;
int64 all_frozen = 0;
@ -292,16 +292,16 @@ static vbits *
collect_visibility_data(Oid relid, bool include_pd)
{
Relation rel;
BlockNumber nblocks;
BlockNumber nblocks;
vbits *info;
BlockNumber blkno;
BlockNumber blkno;
Buffer vmbuffer = InvalidBuffer;
BufferAccessStrategy bstrategy = GetAccessStrategy(BAS_BULKREAD);
BufferAccessStrategy bstrategy = GetAccessStrategy(BAS_BULKREAD);
rel = relation_open(relid, AccessShareLock);
nblocks = RelationGetNumberOfBlocks(rel);
info = palloc0(offsetof(vbits, bits) + nblocks);
info = palloc0(offsetof(vbits, bits) +nblocks);
info->next = 0;
info->count = nblocks;
@ -320,8 +320,8 @@ collect_visibility_data(Oid relid, bool include_pd)
info->bits[blkno] |= (1 << 1);
/*
* Page-level data requires reading every block, so only get it if
* the caller needs it. Use a buffer access strategy, too, to prevent
* Page-level data requires reading every block, so only get it if the
* caller needs it. Use a buffer access strategy, too, to prevent
* cache-trashing.
*/
if (include_pd)

View File

@ -124,7 +124,7 @@ struct PGP_S2K
uint8 mode;
uint8 digest_algo;
uint8 salt[8];
uint8 iter; /* encoded (one-octet) count */
uint8 iter; /* encoded (one-octet) count */
/* calculated: */
uint8 key[PGP_MAX_KEY];
uint8 key_len;

View File

@ -486,11 +486,11 @@ pgfdw_get_result(PGconn *conn, const char *query)
for (;;)
{
PGresult *res;
PGresult *res;
while (PQisBusy(conn))
{
int wc;
int wc;
/* Sleep until there's something to do */
wc = WaitLatchOrSocket(MyLatch,
@ -675,9 +675,9 @@ pgfdw_xact_callback(XactEvent event, void *arg)
/*
* If a command has been submitted to the remote server by
* using an asynchronous execution function, the command
* might not have yet completed. Check to see if a command
* is still being processed by the remote server, and if so,
* request cancellation of the command.
* might not have yet completed. Check to see if a
* command is still being processed by the remote server,
* and if so, request cancellation of the command.
*/
if (PQtransactionStatus(entry->conn) == PQTRANS_ACTIVE)
{
@ -689,8 +689,8 @@ pgfdw_xact_callback(XactEvent event, void *arg)
if (!PQcancel(cancel, errbuf, sizeof(errbuf)))
ereport(WARNING,
(errcode(ERRCODE_CONNECTION_FAILURE),
errmsg("could not send cancel request: %s",
errbuf)));
errmsg("could not send cancel request: %s",
errbuf)));
PQfreeCancel(cancel);
}
}
@ -798,11 +798,11 @@ pgfdw_subxact_callback(SubXactEvent event, SubTransactionId mySubid,
entry->have_error = true;
/*
* If a command has been submitted to the remote server by using an
* asynchronous execution function, the command might not have yet
* completed. Check to see if a command is still being processed by
* the remote server, and if so, request cancellation of the
* command.
* If a command has been submitted to the remote server by using
* an asynchronous execution function, the command might not have
* yet completed. Check to see if a command is still being
* processed by the remote server, and if so, request cancellation
* of the command.
*/
if (PQtransactionStatus(entry->conn) == PQTRANS_ACTIVE)
{

View File

@ -1583,10 +1583,10 @@ deparseColumnRef(StringInfo buf, int varno, int varattno, PlannerInfo *root,
/*
* All other system attributes are fetched as 0, except for table OID,
* which is fetched as the local table OID. However, we must be
* careful; the table could be beneath an outer join, in which case
* it must go to NULL whenever the rest of the row does.
* careful; the table could be beneath an outer join, in which case it
* must go to NULL whenever the rest of the row does.
*/
Oid fetchval = 0;
Oid fetchval = 0;
if (varattno == TableOidAttributeNumber)
{
@ -1633,10 +1633,10 @@ deparseColumnRef(StringInfo buf, int varno, int varattno, PlannerInfo *root,
0 - FirstLowInvalidHeapAttributeNumber);
/*
* In case the whole-row reference is under an outer join then it has to
* go NULL whenver the rest of the row goes NULL. Deparsing a join query
* would always involve multiple relations, thus qualify_col would be
* true.
* In case the whole-row reference is under an outer join then it has
* to go NULL whenver the rest of the row goes NULL. Deparsing a join
* query would always involve multiple relations, thus qualify_col
* would be true.
*/
if (qualify_col)
{
@ -1652,7 +1652,7 @@ deparseColumnRef(StringInfo buf, int varno, int varattno, PlannerInfo *root,
/* Complete the CASE WHEN statement started above. */
if (qualify_col)
appendStringInfo(buf," END");
appendStringInfo(buf, " END");
heap_close(rel, NoLock);
bms_free(attrs_used);

View File

@ -133,9 +133,9 @@ postgres_fdw_validator(PG_FUNCTION_ARGS)
}
else if (strcmp(def->defname, "fetch_size") == 0)
{
int fetch_size;
int fetch_size;
fetch_size = strtol(defGetString(def), NULL,10);
fetch_size = strtol(defGetString(def), NULL, 10);
if (fetch_size <= 0)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),

View File

@ -4063,19 +4063,20 @@ foreign_join_ok(PlannerInfo *root, RelOptInfo *joinrel, JoinType jointype,
/*
* Pull the other remote conditions from the joining relations into join
* clauses or other remote clauses (remote_conds) of this relation wherever
* possible. This avoids building subqueries at every join step, which is
* not currently supported by the deparser logic.
* clauses or other remote clauses (remote_conds) of this relation
* wherever possible. This avoids building subqueries at every join step,
* which is not currently supported by the deparser logic.
*
* For an inner join, clauses from both the relations are added to the
* other remote clauses. For LEFT and RIGHT OUTER join, the clauses from the
* outer side are added to remote_conds since those can be evaluated after
* the join is evaluated. The clauses from inner side are added to the
* joinclauses, since they need to evaluated while constructing the join.
* other remote clauses. For LEFT and RIGHT OUTER join, the clauses from
* the outer side are added to remote_conds since those can be evaluated
* after the join is evaluated. The clauses from inner side are added to
* the joinclauses, since they need to evaluated while constructing the
* join.
*
* For a FULL OUTER JOIN, the other clauses from either relation can not be
* added to the joinclauses or remote_conds, since each relation acts as an
* outer relation for the other. Consider such full outer join as
* For a FULL OUTER JOIN, the other clauses from either relation can not
* be added to the joinclauses or remote_conds, since each relation acts
* as an outer relation for the other. Consider such full outer join as
* unshippable because of the reasons mentioned above in this comment.
*
* The joining sides can not have local conditions, thus no need to test

View File

@ -78,7 +78,7 @@ typedef struct PgFdwRelationInfo
ForeignServer *server;
UserMapping *user; /* only set in use_remote_estimate mode */
int fetch_size; /* fetch size for this remote table */
int fetch_size; /* fetch size for this remote table */
/*
* Name of the relation while EXPLAINing ForeignScan. It is used for join
@ -133,23 +133,23 @@ extern void deparseUpdateSql(StringInfo buf, PlannerInfo *root,
List *targetAttrs, List *returningList,
List **retrieved_attrs);
extern void deparseDirectUpdateSql(StringInfo buf, PlannerInfo *root,
Index rtindex, Relation rel,
List *targetlist,
List *targetAttrs,
List *remote_conds,
List **params_list,
List *returningList,
List **retrieved_attrs);
Index rtindex, Relation rel,
List *targetlist,
List *targetAttrs,
List *remote_conds,
List **params_list,
List *returningList,
List **retrieved_attrs);
extern void deparseDeleteSql(StringInfo buf, PlannerInfo *root,
Index rtindex, Relation rel,
List *returningList,
List **retrieved_attrs);
extern void deparseDirectDeleteSql(StringInfo buf, PlannerInfo *root,
Index rtindex, Relation rel,
List *remote_conds,
List **params_list,
List *returningList,
List **retrieved_attrs);
Index rtindex, Relation rel,
List *remote_conds,
List **params_list,
List *returningList,
List **retrieved_attrs);
extern void deparseAnalyzeSizeSql(StringInfo buf, Relation rel);
extern void deparseAnalyzeSql(StringInfo buf, Relation rel,
List **retrieved_attrs);

View File

@ -494,8 +494,8 @@ ssl_extension_info(PG_FUNCTION_ARGS)
if (nid == NID_undef)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("unknown OpenSSL extension in certificate at position %d",
call_cntr)));
errmsg("unknown OpenSSL extension in certificate at position %d",
call_cntr)));
values[0] = CStringGetTextDatum(OBJ_nid2sn(nid));
nulls[0] = false;

View File

@ -65,9 +65,9 @@ static void pg_decode_change(LogicalDecodingContext *ctx,
static bool pg_decode_filter(LogicalDecodingContext *ctx,
RepOriginId origin_id);
static void pg_decode_message(LogicalDecodingContext *ctx,
ReorderBufferTXN *txn, XLogRecPtr message_lsn,
bool transactional, const char *prefix,
Size sz, const char *message);
ReorderBufferTXN *txn, XLogRecPtr message_lsn,
bool transactional, const char *prefix,
Size sz, const char *message);
void
_PG_init(void)

View File

@ -47,7 +47,7 @@ brin_xlog_insert_update(XLogReaderState *record,
{
XLogRecPtr lsn = record->EndRecPtr;
Buffer buffer;
BlockNumber regpgno;
BlockNumber regpgno;
Page page;
XLogRedoAction action;

View File

@ -101,7 +101,8 @@ static relopt_int intRelOpts[] =
"fillfactor",
"Packs table pages only to this percentage",
RELOPT_KIND_HEAP,
ShareUpdateExclusiveLock /* since it applies only to later inserts */
ShareUpdateExclusiveLock /* since it applies only to later
* inserts */
},
HEAP_DEFAULT_FILLFACTOR, HEAP_MIN_FILLFACTOR, 100
},
@ -110,7 +111,8 @@ static relopt_int intRelOpts[] =
"fillfactor",
"Packs btree index pages only to this percentage",
RELOPT_KIND_BTREE,
ShareUpdateExclusiveLock /* since it applies only to later inserts */
ShareUpdateExclusiveLock /* since it applies only to later
* inserts */
},
BTREE_DEFAULT_FILLFACTOR, BTREE_MIN_FILLFACTOR, 100
},
@ -119,7 +121,8 @@ static relopt_int intRelOpts[] =
"fillfactor",
"Packs hash index pages only to this percentage",
RELOPT_KIND_HASH,
ShareUpdateExclusiveLock /* since it applies only to later inserts */
ShareUpdateExclusiveLock /* since it applies only to later
* inserts */
},
HASH_DEFAULT_FILLFACTOR, HASH_MIN_FILLFACTOR, 100
},
@ -128,7 +131,8 @@ static relopt_int intRelOpts[] =
"fillfactor",
"Packs gist index pages only to this percentage",
RELOPT_KIND_GIST,
ShareUpdateExclusiveLock /* since it applies only to later inserts */
ShareUpdateExclusiveLock /* since it applies only to later
* inserts */
},
GIST_DEFAULT_FILLFACTOR, GIST_MIN_FILLFACTOR, 100
},
@ -137,7 +141,8 @@ static relopt_int intRelOpts[] =
"fillfactor",
"Packs spgist index pages only to this percentage",
RELOPT_KIND_SPGIST,
ShareUpdateExclusiveLock /* since it applies only to later inserts */
ShareUpdateExclusiveLock /* since it applies only to later
* inserts */
},
SPGIST_DEFAULT_FILLFACTOR, SPGIST_MIN_FILLFACTOR, 100
},
@ -1475,8 +1480,8 @@ tablespace_reloptions(Datum reloptions, bool validate)
LOCKMODE
AlterTableGetRelOptionsLockLevel(List *defList)
{
LOCKMODE lockmode = NoLock;
ListCell *cell;
LOCKMODE lockmode = NoLock;
ListCell *cell;
if (defList == NIL)
return AccessExclusiveLock;
@ -1486,8 +1491,8 @@ AlterTableGetRelOptionsLockLevel(List *defList)
foreach(cell, defList)
{
DefElem *def = (DefElem *) lfirst(cell);
int i;
DefElem *def = (DefElem *) lfirst(cell);
int i;
for (i = 0; relOpts[i]; i++)
{

View File

@ -524,7 +524,7 @@ shiftList(Relation index, Buffer metabuffer, BlockNumber newHead,
int64 nDeletedHeapTuples = 0;
ginxlogDeleteListPages data;
Buffer buffers[GIN_NDELETE_AT_ONCE];
BlockNumber freespace[GIN_NDELETE_AT_ONCE];
BlockNumber freespace[GIN_NDELETE_AT_ONCE];
data.ndeleted = 0;
while (data.ndeleted < GIN_NDELETE_AT_ONCE && blknoToDelete != newHead)
@ -745,30 +745,29 @@ ginInsertCleanup(GinState *ginstate, bool full_clean,
bool inVacuum = (stats == NULL);
/*
* We would like to prevent concurrent cleanup process. For
* that we will lock metapage in exclusive mode using LockPage()
* call. Nobody other will use that lock for metapage, so
* we keep possibility of concurrent insertion into pending list
* We would like to prevent concurrent cleanup process. For that we will
* lock metapage in exclusive mode using LockPage() call. Nobody other
* will use that lock for metapage, so we keep possibility of concurrent
* insertion into pending list
*/
if (inVacuum)
{
/*
* We are called from [auto]vacuum/analyze or
* gin_clean_pending_list() and we would like to wait
* concurrent cleanup to finish.
* We are called from [auto]vacuum/analyze or gin_clean_pending_list()
* and we would like to wait concurrent cleanup to finish.
*/
LockPage(index, GIN_METAPAGE_BLKNO, ExclusiveLock);
workMemory =
(IsAutoVacuumWorkerProcess() && autovacuum_work_mem != -1) ?
autovacuum_work_mem : maintenance_work_mem;
autovacuum_work_mem : maintenance_work_mem;
}
else
{
/*
* We are called from regular insert and if we see
* concurrent cleanup just exit in hope that concurrent
* process will clean up pending list.
* We are called from regular insert and if we see concurrent cleanup
* just exit in hope that concurrent process will clean up pending
* list.
*/
if (!ConditionalLockPage(index, GIN_METAPAGE_BLKNO, ExclusiveLock))
return;
@ -829,9 +828,10 @@ ginInsertCleanup(GinState *ginstate, bool full_clean,
Assert(!GinPageIsDeleted(page));
/*
* Are we walk through the page which as we remember was a tail when we
* start our cleanup? But if caller asks us to clean up whole pending
* list then ignore old tail, we will work until list becomes empty.
* Are we walk through the page which as we remember was a tail when
* we start our cleanup? But if caller asks us to clean up whole
* pending list then ignore old tail, we will work until list becomes
* empty.
*/
if (blkno == blknoFinish && full_clean == false)
cleanupFinish = true;
@ -917,8 +917,8 @@ ginInsertCleanup(GinState *ginstate, bool full_clean,
* locking */
/*
* remove read pages from pending list, at this point all
* content of read pages is in regular structure
* remove read pages from pending list, at this point all content
* of read pages is in regular structure
*/
shiftList(index, metabuffer, blkno, fill_fsm, stats);
@ -961,9 +961,9 @@ ginInsertCleanup(GinState *ginstate, bool full_clean,
ReleaseBuffer(metabuffer);
/*
* As pending list pages can have a high churn rate, it is
* desirable to recycle them immediately to the FreeSpace Map when
* ordinary backends clean the list.
* As pending list pages can have a high churn rate, it is desirable to
* recycle them immediately to the FreeSpace Map when ordinary backends
* clean the list.
*/
if (fsm_vac && fill_fsm)
IndexFreeSpaceMapVacuum(index);
@ -989,7 +989,7 @@ gin_clean_pending_list(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("recovery is in progress"),
errhint("GIN pending list cannot be cleaned up during recovery.")));
errhint("GIN pending list cannot be cleaned up during recovery.")));
/* Must be a GIN index */
if (indexRel->rd_rel->relkind != RELKIND_INDEX ||

View File

@ -281,7 +281,7 @@ ginBuildCallback(Relation index, HeapTuple htup, Datum *values,
&htup->t_self);
/* If we've maxed out our available memory, dump everything to the index */
if (buildstate->accum.allocatedMemory >= (Size)maintenance_work_mem * 1024L)
if (buildstate->accum.allocatedMemory >= (Size) maintenance_work_mem * 1024L)
{
ItemPointerData *list;
Datum key;

View File

@ -540,8 +540,10 @@ ginbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
{
/* Yes, so initialize stats to zeroes */
stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));
/*
* and cleanup any pending inserts */
* and cleanup any pending inserts
*/
ginInsertCleanup(&gvs.ginstate, !IsAutoVacuumWorkerProcess(),
false, stats);
}

View File

@ -1498,8 +1498,9 @@ static void
gistvacuumpage(Relation rel, Page page, Buffer buffer)
{
OffsetNumber deletable[MaxIndexTuplesPerPage];
int ndeletable = 0;
OffsetNumber offnum, maxoff;
int ndeletable = 0;
OffsetNumber offnum,
maxoff;
Assert(GistPageIsLeaf(page));

View File

@ -36,13 +36,13 @@
static void
gistkillitems(IndexScanDesc scan)
{
GISTScanOpaque so = (GISTScanOpaque) scan->opaque;
Buffer buffer;
Page page;
OffsetNumber offnum;
ItemId iid;
int i;
bool killedsomething = false;
GISTScanOpaque so = (GISTScanOpaque) scan->opaque;
Buffer buffer;
Page page;
OffsetNumber offnum;
ItemId iid;
int i;
bool killedsomething = false;
Assert(so->curBlkno != InvalidBlockNumber);
Assert(!XLogRecPtrIsInvalid(so->curPageLSN));
@ -57,21 +57,22 @@ gistkillitems(IndexScanDesc scan)
page = BufferGetPage(buffer);
/*
* If page LSN differs it means that the page was modified since the last read.
* killedItems could be not valid so LP_DEAD hints applying is not safe.
* If page LSN differs it means that the page was modified since the last
* read. killedItems could be not valid so LP_DEAD hints applying is not
* safe.
*/
if(PageGetLSN(page) != so->curPageLSN)
if (PageGetLSN(page) != so->curPageLSN)
{
UnlockReleaseBuffer(buffer);
so->numKilled = 0; /* reset counter */
so->numKilled = 0; /* reset counter */
return;
}
Assert(GistPageIsLeaf(page));
/*
* Mark all killedItems as dead. We need no additional recheck,
* because, if page was modified, pageLSN must have changed.
* Mark all killedItems as dead. We need no additional recheck, because,
* if page was modified, pageLSN must have changed.
*/
for (i = 0; i < so->numKilled; i++)
{
@ -390,7 +391,7 @@ gistScanPage(IndexScanDesc scan, GISTSearchItem *pageItem, double *myDistances,
maxoff = PageGetMaxOffsetNumber(page);
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
{
ItemId iid = PageGetItemId(page, i);
ItemId iid = PageGetItemId(page, i);
IndexTuple it;
bool match;
bool recheck;
@ -400,10 +401,11 @@ gistScanPage(IndexScanDesc scan, GISTSearchItem *pageItem, double *myDistances,
* If the scan specifies not to return killed tuples, then we treat a
* killed tuple as not passing the qual.
*/
if(scan->ignore_killed_tuples && ItemIdIsDead(iid))
if (scan->ignore_killed_tuples && ItemIdIsDead(iid))
continue;
it = (IndexTuple) PageGetItem(page, iid);
/*
* Must call gistindex_keytest in tempCxt, and clean up any leftover
* junk afterward.
@ -665,11 +667,11 @@ gistgettuple(IndexScanDesc scan, ScanDirection dir)
if (so->killedItems == NULL)
{
MemoryContext oldCxt =
MemoryContextSwitchTo(so->giststate->scanCxt);
MemoryContextSwitchTo(so->giststate->scanCxt);
so->killedItems =
(OffsetNumber *) palloc(MaxIndexTuplesPerPage
* sizeof(OffsetNumber));
* sizeof(OffsetNumber));
MemoryContextSwitchTo(oldCxt);
}
@ -702,11 +704,11 @@ gistgettuple(IndexScanDesc scan, ScanDirection dir)
if (so->killedItems == NULL)
{
MemoryContext oldCxt =
MemoryContextSwitchTo(so->giststate->scanCxt);
MemoryContextSwitchTo(so->giststate->scanCxt);
so->killedItems =
(OffsetNumber *) palloc(MaxIndexTuplesPerPage
* sizeof(OffsetNumber));
* sizeof(OffsetNumber));
MemoryContextSwitchTo(oldCxt);
}

View File

@ -230,8 +230,8 @@ gistrescan(IndexScanDesc scan, ScanKey key, int nkeys,
ScanKey skey = scan->keyData + i;
/*
* Copy consistent support function to ScanKey structure
* instead of function implementing filtering operator.
* Copy consistent support function to ScanKey structure instead
* of function implementing filtering operator.
*/
fmgr_info_copy(&(skey->sk_func),
&(so->giststate->consistentFn[skey->sk_attno - 1]),
@ -303,8 +303,8 @@ gistrescan(IndexScanDesc scan, ScanKey key, int nkeys,
so->orderByTypes[i] = get_func_rettype(skey->sk_func.fn_oid);
/*
* Copy distance support function to ScanKey structure
* instead of function implementing ordering operator.
* Copy distance support function to ScanKey structure instead of
* function implementing ordering operator.
*/
fmgr_info_copy(&(skey->sk_func), finfo, so->giststate->scanCxt);

View File

@ -1687,7 +1687,7 @@ heap_parallelscan_nextpage(HeapScanDesc scan)
{
BlockNumber page = InvalidBlockNumber;
BlockNumber sync_startpage = InvalidBlockNumber;
BlockNumber report_page = InvalidBlockNumber;
BlockNumber report_page = InvalidBlockNumber;
ParallelHeapScanDesc parallel_scan;
Assert(scan->rs_parallel);

View File

@ -178,7 +178,7 @@ static void
RelationAddExtraBlocks(Relation relation, BulkInsertState bistate)
{
Page page;
BlockNumber blockNum = InvalidBlockNumber,
BlockNumber blockNum = InvalidBlockNumber,
firstBlock = InvalidBlockNumber;
int extraBlocks = 0;
int lockWaiters = 0;
@ -191,10 +191,10 @@ RelationAddExtraBlocks(Relation relation, BulkInsertState bistate)
return;
/*
* It might seem like multiplying the number of lock waiters by as much
* as 20 is too aggressive, but benchmarking revealed that smaller numbers
* were insufficient. 512 is just an arbitrary cap to prevent pathological
* results.
* It might seem like multiplying the number of lock waiters by as much as
* 20 is too aggressive, but benchmarking revealed that smaller numbers
* were insufficient. 512 is just an arbitrary cap to prevent
* pathological results.
*/
extraBlocks = Min(512, lockWaiters * 20);
@ -225,10 +225,10 @@ RelationAddExtraBlocks(Relation relation, BulkInsertState bistate)
}
/*
* Updating the upper levels of the free space map is too expensive
* to do for every block, but it's worth doing once at the end to make
* sure that subsequent insertion activity sees all of those nifty free
* pages we just inserted.
* Updating the upper levels of the free space map is too expensive to do
* for every block, but it's worth doing once at the end to make sure that
* subsequent insertion activity sees all of those nifty free pages we
* just inserted.
*
* Note that we're using the freespace value that was reported for the
* last block we added as if it were the freespace value for every block
@ -547,8 +547,8 @@ loop:
}
/*
* In addition to whatever extension we performed above, we always add
* at least one block to satisfy our own request.
* In addition to whatever extension we performed above, we always add at
* least one block to satisfy our own request.
*
* XXX This does an lseek - rather expensive - but at the moment it is the
* only way to accurately determine how many blocks are in a relation. Is

View File

@ -105,8 +105,8 @@ heap_page_prune_opt(Relation relation, Buffer buffer)
OldestXmin = RecentGlobalXmin;
else
OldestXmin =
TransactionIdLimitedForOldSnapshots(RecentGlobalDataXmin,
relation);
TransactionIdLimitedForOldSnapshots(RecentGlobalDataXmin,
relation);
Assert(TransactionIdIsValid(OldestXmin));

View File

@ -272,7 +272,7 @@ visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf,
uint32 mapByte = HEAPBLK_TO_MAPBYTE(heapBlk);
uint8 mapOffset = HEAPBLK_TO_OFFSET(heapBlk);
Page page;
uint8 *map;
uint8 *map;
#ifdef TRACE_VISIBILITYMAP
elog(DEBUG1, "vm_set %s %d", RelationGetRelationName(rel), heapBlk);
@ -291,7 +291,7 @@ visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf,
elog(ERROR, "wrong VM buffer passed to visibilitymap_set");
page = BufferGetPage(vmBuf);
map = (uint8 *)PageGetContents(page);
map = (uint8 *) PageGetContents(page);
LockBuffer(vmBuf, BUFFER_LOCK_EXCLUSIVE);
if (flags != (map[mapByte] >> mapOffset & VISIBILITYMAP_VALID_BITS))

View File

@ -395,7 +395,8 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
* Check for a conflict-in as we would if we were going to
* write to this page. We aren't actually going to write,
* but we want a chance to report SSI conflicts that would
* otherwise be masked by this unique constraint violation.
* otherwise be masked by this unique constraint
* violation.
*/
CheckForSerializableConflictIn(rel, NULL, buf);

View File

@ -813,8 +813,8 @@ btvacuumscan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
/*
* Check to see if we need to issue one final WAL record for this index,
* which may be needed for correctness on a hot standby node when
* non-MVCC index scans could take place.
* which may be needed for correctness on a hot standby node when non-MVCC
* index scans could take place.
*
* If the WAL is replayed in hot standby, the replay process needs to get
* cleanup locks on all index leaf pages, just as we've been doing here.
@ -1025,13 +1025,13 @@ restart:
if (ndeletable > 0)
{
/*
* Notice that the issued XLOG_BTREE_VACUUM WAL record includes all
* information to the replay code to allow it to get a cleanup lock
* on all pages between the previous lastBlockVacuumed and this page.
* This ensures that WAL replay locks all leaf pages at some point,
* which is important should non-MVCC scans be requested.
* This is currently unused on standby, but we record it anyway, so
* that the WAL contains the required information.
* Notice that the issued XLOG_BTREE_VACUUM WAL record includes
* all information to the replay code to allow it to get a cleanup
* lock on all pages between the previous lastBlockVacuumed and
* this page. This ensures that WAL replay locks all leaf pages at
* some point, which is important should non-MVCC scans be
* requested. This is currently unused on standby, but we record
* it anyway, so that the WAL contains the required information.
*
* Since we can visit leaf pages out-of-order when recursing,
* replay might end up locking such pages an extra time, but it

View File

@ -392,15 +392,15 @@ btree_xlog_vacuum(XLogReaderState *record)
xl_btree_vacuum *xlrec = (xl_btree_vacuum *) XLogRecGetData(record);
/*
* This section of code is thought to be no longer needed, after
* analysis of the calling paths. It is retained to allow the code
* to be reinstated if a flaw is revealed in that thinking.
* This section of code is thought to be no longer needed, after analysis
* of the calling paths. It is retained to allow the code to be reinstated
* if a flaw is revealed in that thinking.
*
* If we are running non-MVCC scans using this index we need to do some
* additional work to ensure correctness, which is known as a "pin scan"
* described in more detail in next paragraphs. We used to do the extra
* work in all cases, whereas we now avoid that work in most cases.
* If lastBlockVacuumed is set to InvalidBlockNumber then we skip the
* work in all cases, whereas we now avoid that work in most cases. If
* lastBlockVacuumed is set to InvalidBlockNumber then we skip the
* additional work required for the pin scan.
*
* Avoiding this extra work is important since it requires us to touch

View File

@ -29,8 +29,8 @@ generic_desc(StringInfo buf, XLogReaderState *record)
while (ptr < end)
{
OffsetNumber offset,
length;
OffsetNumber offset,
length;
memcpy(&offset, ptr, sizeof(offset));
ptr += sizeof(offset);

View File

@ -26,7 +26,7 @@ logicalmsg_desc(StringInfo buf, XLogReaderState *record)
xl_logical_message *xlrec = (xl_logical_message *) rec;
appendStringInfo(buf, "%s message size %zu bytes",
xlrec->transactional ? "transactional" : "nontransactional",
xlrec->transactional ? "transactional" : "nontransactional",
xlrec->message_size);
}
}

View File

@ -100,7 +100,7 @@ standby_desc_invalidations(StringInfo buf,
Oid dbId, Oid tsId,
bool relcacheInitFileInval)
{
int i;
int i;
if (relcacheInitFileInval)
appendStringInfo(buf, "; relcache init file inval dbid %u tsid %u",

View File

@ -205,8 +205,8 @@ xact_desc_commit(StringInfo buf, uint8 info, xl_xact_commit *xlrec, RepOriginId
if (parsed.nmsgs > 0)
{
standby_desc_invalidations(
buf, parsed.nmsgs, parsed.msgs, parsed.dbId, parsed.tsId,
XactCompletionRelcacheInitFileInval(parsed.xinfo));
buf, parsed.nmsgs, parsed.msgs, parsed.dbId, parsed.tsId,
XactCompletionRelcacheInitFileInval(parsed.xinfo));
}
if (XactCompletionForceSyncCommit(parsed.xinfo))

View File

@ -26,8 +26,8 @@
const struct config_enum_entry wal_level_options[] = {
{"minimal", WAL_LEVEL_MINIMAL, false},
{"replica", WAL_LEVEL_REPLICA, false},
{"archive", WAL_LEVEL_REPLICA, true}, /* deprecated */
{"hot_standby", WAL_LEVEL_REPLICA, true}, /* deprecated */
{"archive", WAL_LEVEL_REPLICA, true}, /* deprecated */
{"hot_standby", WAL_LEVEL_REPLICA, true}, /* deprecated */
{"logical", WAL_LEVEL_LOGICAL, false},
{NULL, 0, false}
};

View File

@ -92,7 +92,7 @@ typedef struct CommitTimestampShared
{
TransactionId xidLastCommit;
CommitTimestampEntry dataLastCommit;
bool commitTsActive;
bool commitTsActive;
} CommitTimestampShared;
CommitTimestampShared *commitTsShared;
@ -153,9 +153,9 @@ TransactionTreeSetCommitTsData(TransactionId xid, int nsubxids,
* No-op if the module is not active.
*
* An unlocked read here is fine, because in a standby (the only place
* where the flag can change in flight) this routine is only called by
* the recovery process, which is also the only process which can change
* the flag.
* where the flag can change in flight) this routine is only called by the
* recovery process, which is also the only process which can change the
* flag.
*/
if (!commitTsShared->commitTsActive)
return;
@ -767,8 +767,8 @@ ExtendCommitTs(TransactionId newestXact)
int pageno;
/*
* Nothing to do if module not enabled. Note we do an unlocked read of the
* flag here, which is okay because this routine is only called from
* Nothing to do if module not enabled. Note we do an unlocked read of
* the flag here, which is okay because this routine is only called from
* GetNewTransactionId, which is never called in a standby.
*/
Assert(!InRecovery);
@ -855,7 +855,7 @@ AdvanceOldestCommitTsXid(TransactionId oldestXact)
{
LWLockAcquire(CommitTsLock, LW_EXCLUSIVE);
if (ShmemVariableCache->oldestCommitTsXid != InvalidTransactionId &&
TransactionIdPrecedes(ShmemVariableCache->oldestCommitTsXid, oldestXact))
TransactionIdPrecedes(ShmemVariableCache->oldestCommitTsXid, oldestXact))
ShmemVariableCache->oldestCommitTsXid = oldestXact;
LWLockRelease(CommitTsLock);
}

View File

@ -52,9 +52,8 @@ typedef struct
Buffer buffer; /* registered buffer */
int flags; /* flags for this buffer */
int deltaLen; /* space consumed in delta field */
char *image; /* copy of page image for modification,
* do not do it in-place to have aligned
* memory chunk */
char *image; /* copy of page image for modification, do not
* do it in-place to have aligned memory chunk */
char delta[MAX_DELTA_SIZE]; /* delta between page images */
} PageData;

View File

@ -988,8 +988,8 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
char *oldest_datname = get_database_name(oldest_datoid);
/*
* Immediately kick autovacuum into action as we're already
* in ERROR territory.
* Immediately kick autovacuum into action as we're already in
* ERROR territory.
*/
SendPostmasterSignal(PMSIGNAL_START_AUTOVAC_LAUNCHER);
@ -1134,8 +1134,8 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg_plural("database with OID %u must be vacuumed before %d more multixact member is used",
"database with OID %u must be vacuumed before %d more multixact members are used",
MultiXactState->offsetStopLimit - nextOffset + nmembers,
MultiXactState->oldestMultiXactDB,
MultiXactState->offsetStopLimit - nextOffset + nmembers,
MultiXactState->oldestMultiXactDB,
MultiXactState->offsetStopLimit - nextOffset + nmembers),
errhint("Execute a database-wide VACUUM in that database with reduced vacuum_multixact_freeze_min_age and vacuum_multixact_freeze_table_age settings.")));

View File

@ -134,9 +134,9 @@ CreateParallelContext(parallel_worker_main_type entrypoint, int nworkers)
nworkers = 0;
/*
* If we are running under serializable isolation, we can't use
* parallel workers, at least not until somebody enhances that mechanism
* to be parallel-aware.
* If we are running under serializable isolation, we can't use parallel
* workers, at least not until somebody enhances that mechanism to be
* parallel-aware.
*/
if (IsolationIsSerializable())
nworkers = 0;
@ -646,9 +646,9 @@ DestroyParallelContext(ParallelContext *pcxt)
}
/*
* We can't finish transaction commit or abort until all of the
* workers have exited. This means, in particular, that we can't respond
* to interrupts at this stage.
* We can't finish transaction commit or abort until all of the workers
* have exited. This means, in particular, that we can't respond to
* interrupts at this stage.
*/
HOLD_INTERRUPTS();
WaitForParallelWorkersToExit(pcxt);
@ -918,7 +918,7 @@ ParallelWorkerMain(Datum main_arg)
if (toc == NULL)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("invalid magic number in dynamic shared memory segment")));
errmsg("invalid magic number in dynamic shared memory segment")));
/* Look up fixed parallel state. */
fps = shm_toc_lookup(toc, PARALLEL_KEY_FIXED);
@ -958,9 +958,9 @@ ParallelWorkerMain(Datum main_arg)
*/
/*
* Join locking group. We must do this before anything that could try
* to acquire a heavyweight lock, because any heavyweight locks acquired
* to this point could block either directly against the parallel group
* Join locking group. We must do this before anything that could try to
* acquire a heavyweight lock, because any heavyweight locks acquired to
* this point could block either directly against the parallel group
* leader or against some process which in turn waits for a lock that
* conflicts with the parallel group leader, causing an undetected
* deadlock. (If we can't join the lock group, the leader has gone away,

View File

@ -152,7 +152,7 @@ SimpleLruShmemSize(int nslots, int nlsns)
sz += MAXALIGN(nslots * sizeof(bool)); /* page_dirty[] */
sz += MAXALIGN(nslots * sizeof(int)); /* page_number[] */
sz += MAXALIGN(nslots * sizeof(int)); /* page_lru_count[] */
sz += MAXALIGN(nslots * sizeof(LWLockPadded)); /* buffer_locks[] */
sz += MAXALIGN(nslots * sizeof(LWLockPadded)); /* buffer_locks[] */
if (nlsns > 0)
sz += MAXALIGN(nslots * nlsns * sizeof(XLogRecPtr)); /* group_lsn[] */
@ -224,7 +224,7 @@ SimpleLruInit(SlruCtl ctl, const char *name, int nslots, int nlsns,
for (slotno = 0; slotno < nslots; slotno++)
{
LWLockInitialize(&shared->buffer_locks[slotno].lock,
shared->lwlock_tranche_id);
shared->lwlock_tranche_id);
shared->page_buffer[slotno] = ptr;
shared->page_status[slotno] = SLRU_PAGE_EMPTY;

View File

@ -257,7 +257,7 @@ StartupSUBTRANS(TransactionId oldestActiveXID)
startPage++;
/* must account for wraparound */
if (startPage > TransactionIdToPage(MaxTransactionId))
startPage=0;
startPage = 0;
}
(void) ZeroSUBTRANSPage(startPage);

View File

@ -140,13 +140,13 @@ typedef struct GlobalTransactionData
TimestampTz prepared_at; /* time of preparation */
/*
* Note that we need to keep track of two LSNs for each GXACT.
* We keep track of the start LSN because this is the address we must
* use to read state data back from WAL when committing a prepared GXACT.
* We keep track of the end LSN because that is the LSN we need to wait
* for prior to commit.
* Note that we need to keep track of two LSNs for each GXACT. We keep
* track of the start LSN because this is the address we must use to read
* state data back from WAL when committing a prepared GXACT. We keep
* track of the end LSN because that is the LSN we need to wait for prior
* to commit.
*/
XLogRecPtr prepare_start_lsn; /* XLOG offset of prepare record start */
XLogRecPtr prepare_start_lsn; /* XLOG offset of prepare record start */
XLogRecPtr prepare_end_lsn; /* XLOG offset of prepare record end */
Oid owner; /* ID of user that executed the xact */
@ -980,7 +980,7 @@ StartPrepare(GlobalTransaction gxact)
hdr.nabortrels = smgrGetPendingDeletes(false, &abortrels);
hdr.ninvalmsgs = xactGetCommittedInvalidationMessages(&invalmsgs,
&hdr.initfileinval);
hdr.gidlen = strlen(gxact->gid) + 1; /* Include '\0' */
hdr.gidlen = strlen(gxact->gid) + 1; /* Include '\0' */
save_state_data(&hdr, sizeof(TwoPhaseFileHeader));
save_state_data(gxact->gid, hdr.gidlen);
@ -1259,28 +1259,28 @@ XlogReadTwoPhaseData(XLogRecPtr lsn, char **buf, int *len)
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of memory"),
errdetail("Failed while allocating an XLog reading processor.")));
errdetail("Failed while allocating an XLog reading processor.")));
record = XLogReadRecord(xlogreader, lsn, &errormsg);
if (record == NULL)
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not read two-phase state from xlog at %X/%X",
(uint32) (lsn >> 32),
(uint32) lsn)));
(uint32) (lsn >> 32),
(uint32) lsn)));
if (XLogRecGetRmid(xlogreader) != RM_XACT_ID ||
(XLogRecGetInfo(xlogreader) & XLOG_XACT_OPMASK) != XLOG_XACT_PREPARE)
ereport(ERROR,
(errcode_for_file_access(),
errmsg("expected two-phase state data is not present in xlog at %X/%X",
(uint32) (lsn >> 32),
(uint32) lsn)));
(uint32) (lsn >> 32),
(uint32) lsn)));
if (len != NULL)
*len = XLogRecGetDataLen(xlogreader);
*buf = palloc(sizeof(char)*XLogRecGetDataLen(xlogreader));
*buf = palloc(sizeof(char) * XLogRecGetDataLen(xlogreader));
memcpy(*buf, XLogRecGetData(xlogreader), sizeof(char) * XLogRecGetDataLen(xlogreader));
XLogReaderFree(xlogreader);
@ -1347,10 +1347,9 @@ FinishPreparedTransaction(const char *gid, bool isCommit)
xid = pgxact->xid;
/*
* Read and validate 2PC state data.
* State data will typically be stored in WAL files if the LSN is after the
* last checkpoint record, or moved to disk if for some reason they have
* lived for a long time.
* Read and validate 2PC state data. State data will typically be stored
* in WAL files if the LSN is after the last checkpoint record, or moved
* to disk if for some reason they have lived for a long time.
*/
if (gxact->ondisk)
buf = ReadTwoPhaseFile(xid, true);
@ -1605,22 +1604,20 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon)
TRACE_POSTGRESQL_TWOPHASE_CHECKPOINT_START();
/*
* We are expecting there to be zero GXACTs that need to be
* copied to disk, so we perform all I/O while holding
* TwoPhaseStateLock for simplicity. This prevents any new xacts
* from preparing while this occurs, which shouldn't be a problem
* since the presence of long-lived prepared xacts indicates the
* transaction manager isn't active.
* We are expecting there to be zero GXACTs that need to be copied to
* disk, so we perform all I/O while holding TwoPhaseStateLock for
* simplicity. This prevents any new xacts from preparing while this
* occurs, which shouldn't be a problem since the presence of long-lived
* prepared xacts indicates the transaction manager isn't active.
*
* It's also possible to move I/O out of the lock, but on
* every error we should check whether somebody committed our
* transaction in different backend. Let's leave this optimisation
* for future, if somebody will spot that this place cause
* bottleneck.
* It's also possible to move I/O out of the lock, but on every error we
* should check whether somebody committed our transaction in different
* backend. Let's leave this optimisation for future, if somebody will
* spot that this place cause bottleneck.
*
* Note that it isn't possible for there to be a GXACT with
* a prepare_end_lsn set prior to the last checkpoint yet
* is marked invalid, because of the efforts with delayChkpt.
* Note that it isn't possible for there to be a GXACT with a
* prepare_end_lsn set prior to the last checkpoint yet is marked invalid,
* because of the efforts with delayChkpt.
*/
LWLockAcquire(TwoPhaseStateLock, LW_SHARED);
for (i = 0; i < TwoPhaseState->numPrepXacts; i++)
@ -1633,7 +1630,7 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon)
gxact->prepare_end_lsn <= redo_horizon)
{
char *buf;
int len;
int len;
XlogReadTwoPhaseData(gxact->prepare_start_lsn, &buf, &len);
RecreateTwoPhaseFile(pgxact->xid, buf, len);
@ -1920,7 +1917,7 @@ RecoverPreparedTransactions(void)
TwoPhaseFileHeader *hdr;
TransactionId *subxids;
GlobalTransaction gxact;
const char *gid;
const char *gid;
int i;
xid = (TransactionId) strtoul(clde->d_name, NULL, 16);

View File

@ -1166,19 +1166,19 @@ RecordTransactionCommit(void)
/*
* Transactions without an assigned xid can contain invalidation
* messages (e.g. explicit relcache invalidations or catcache
* invalidations for inplace updates); standbys need to process
* those. We can't emit a commit record without an xid, and we don't
* want to force assigning an xid, because that'd be problematic for
* e.g. vacuum. Hence we emit a bespoke record for the
* invalidations. We don't want to use that in case a commit record is
* emitted, so they happen synchronously with commits (besides not
* wanting to emit more WAL recoreds).
* invalidations for inplace updates); standbys need to process those.
* We can't emit a commit record without an xid, and we don't want to
* force assigning an xid, because that'd be problematic for e.g.
* vacuum. Hence we emit a bespoke record for the invalidations. We
* don't want to use that in case a commit record is emitted, so they
* happen synchronously with commits (besides not wanting to emit more
* WAL recoreds).
*/
if (nmsgs != 0)
{
LogStandbyInvalidations(nmsgs, invalMessages,
RelcacheInitFileInval);
wrote_xlog = true; /* not strictly necessary */
wrote_xlog = true; /* not strictly necessary */
}
/*
@ -1272,8 +1272,8 @@ RecordTransactionCommit(void)
* this case, but we don't currently try to do that. It would certainly
* cause problems at least in Hot Standby mode, where the
* KnownAssignedXids machinery requires tracking every XID assignment. It
* might be OK to skip it only when wal_level < replica, but for now
* we don't.)
* might be OK to skip it only when wal_level < replica, but for now we
* don't.)
*
* However, if we're doing cleanup of any non-temp rels or committing any
* command that wanted to force sync commit, then we must flush XLOG
@ -5486,8 +5486,8 @@ xact_redo_commit(xl_xact_parsed_commit *parsed,
/*
* If asked by the primary (because someone is waiting for a synchronous
* commit = remote_apply), we will need to ask walreceiver to send a
* reply immediately.
* commit = remote_apply), we will need to ask walreceiver to send a reply
* immediately.
*/
if (XactCompletionApplyFeedback(parsed->xinfo))
XLogRequestWalReceiverReply();

View File

@ -5004,9 +5004,9 @@ readRecoveryCommandFile(void)
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("invalid value for recovery parameter \"%s\": \"%s\"",
"recovery_target_action",
item->value),
errmsg("invalid value for recovery parameter \"%s\": \"%s\"",
"recovery_target_action",
item->value),
errhint("Valid values are \"pause\", \"promote\", and \"shutdown\".")));
ereport(DEBUG2,
@ -5087,9 +5087,9 @@ readRecoveryCommandFile(void)
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("invalid value for recovery parameter \"%s\": \"%s\"",
"recovery_target",
item->value),
errmsg("invalid value for recovery parameter \"%s\": \"%s\"",
"recovery_target",
item->value),
errhint("The only allowed value is \"immediate\".")));
ereport(DEBUG2,
(errmsg_internal("recovery_target = '%s'",
@ -5880,8 +5880,8 @@ CheckRequiredParameterValues(void)
}
/*
* For Hot Standby, the WAL must be generated with 'replica' mode, and
* we must have at least as many backend slots as the primary.
* For Hot Standby, the WAL must be generated with 'replica' mode, and we
* must have at least as many backend slots as the primary.
*/
if (ArchiveRecoveryRequested && EnableHotStandby)
{
@ -6163,26 +6163,26 @@ StartupXLOG(void)
* is no use of such file. There is no harm in retaining it, but it
* is better to get rid of the map file so that we don't have any
* redundant file in data directory and it will avoid any sort of
* confusion. It seems prudent though to just rename the file out
* of the way rather than delete it completely, also we ignore any
* error that occurs in rename operation as even if map file is
* present without backup_label file, it is harmless.
* confusion. It seems prudent though to just rename the file out of
* the way rather than delete it completely, also we ignore any error
* that occurs in rename operation as even if map file is present
* without backup_label file, it is harmless.
*/
if (stat(TABLESPACE_MAP, &st) == 0)
{
unlink(TABLESPACE_MAP_OLD);
if (durable_rename(TABLESPACE_MAP, TABLESPACE_MAP_OLD, DEBUG1) == 0)
ereport(LOG,
(errmsg("ignoring file \"%s\" because no file \"%s\" exists",
TABLESPACE_MAP, BACKUP_LABEL_FILE),
errdetail("File \"%s\" was renamed to \"%s\".",
TABLESPACE_MAP, TABLESPACE_MAP_OLD)));
(errmsg("ignoring file \"%s\" because no file \"%s\" exists",
TABLESPACE_MAP, BACKUP_LABEL_FILE),
errdetail("File \"%s\" was renamed to \"%s\".",
TABLESPACE_MAP, TABLESPACE_MAP_OLD)));
else
ereport(LOG,
(errmsg("ignoring file \"%s\" because no file \"%s\" exists",
TABLESPACE_MAP, BACKUP_LABEL_FILE),
errdetail("Could not rename file \"%s\" to \"%s\": %m.",
TABLESPACE_MAP, TABLESPACE_MAP_OLD)));
(errmsg("ignoring file \"%s\" because no file \"%s\" exists",
TABLESPACE_MAP, BACKUP_LABEL_FILE),
errdetail("Could not rename file \"%s\" to \"%s\": %m.",
TABLESPACE_MAP, TABLESPACE_MAP_OLD)));
}
/*
@ -6314,24 +6314,24 @@ StartupXLOG(void)
ereport(DEBUG1,
(errmsg_internal("redo record is at %X/%X; shutdown %s",
(uint32) (checkPoint.redo >> 32), (uint32) checkPoint.redo,
wasShutdown ? "TRUE" : "FALSE")));
wasShutdown ? "TRUE" : "FALSE")));
ereport(DEBUG1,
(errmsg_internal("next transaction ID: %u:%u; next OID: %u",
checkPoint.nextXidEpoch, checkPoint.nextXid,
checkPoint.nextOid)));
checkPoint.nextXidEpoch, checkPoint.nextXid,
checkPoint.nextOid)));
ereport(DEBUG1,
(errmsg_internal("next MultiXactId: %u; next MultiXactOffset: %u",
checkPoint.nextMulti, checkPoint.nextMultiOffset)));
checkPoint.nextMulti, checkPoint.nextMultiOffset)));
ereport(DEBUG1,
(errmsg_internal("oldest unfrozen transaction ID: %u, in database %u",
checkPoint.oldestXid, checkPoint.oldestXidDB)));
(errmsg_internal("oldest unfrozen transaction ID: %u, in database %u",
checkPoint.oldestXid, checkPoint.oldestXidDB)));
ereport(DEBUG1,
(errmsg_internal("oldest MultiXactId: %u, in database %u",
checkPoint.oldestMulti, checkPoint.oldestMultiDB)));
checkPoint.oldestMulti, checkPoint.oldestMultiDB)));
ereport(DEBUG1,
(errmsg_internal("commit timestamp Xid oldest/newest: %u/%u",
checkPoint.oldestCommitTsXid,
checkPoint.newestCommitTsXid)));
checkPoint.oldestCommitTsXid,
checkPoint.newestCommitTsXid)));
if (!TransactionIdIsNormal(checkPoint.nextXid))
ereport(PANIC,
(errmsg("invalid next transaction ID")));
@ -6883,8 +6883,8 @@ StartupXLOG(void)
SpinLockRelease(&XLogCtl->info_lck);
/*
* If rm_redo called XLogRequestWalReceiverReply, then we
* wake up the receiver so that it notices the updated
* If rm_redo called XLogRequestWalReceiverReply, then we wake
* up the receiver so that it notices the updated
* lastReplayedEndRecPtr and sends a reply to the master.
*/
if (doRequestWalReceiverReply)

View File

@ -104,8 +104,8 @@ pg_start_backup(PG_FUNCTION_ARGS)
MemoryContext oldcontext;
/*
* Label file and tablespace map file need to be long-lived, since they
* are read in pg_stop_backup.
* Label file and tablespace map file need to be long-lived, since
* they are read in pg_stop_backup.
*/
oldcontext = MemoryContextSwitchTo(TopMemoryContext);
label_file = makeStringInfo();
@ -113,7 +113,7 @@ pg_start_backup(PG_FUNCTION_ARGS)
MemoryContextSwitchTo(oldcontext);
startpoint = do_pg_start_backup(backupidstr, fast, NULL, label_file,
dir, NULL, tblspc_map_file, false, true);
dir, NULL, tblspc_map_file, false, true);
nonexclusive_backup_running = true;
before_shmem_exit(nonexclusive_base_backup_cleanup, (Datum) 0);
@ -138,8 +138,8 @@ pg_start_backup(PG_FUNCTION_ARGS)
* Note: different from CancelBackup which just cancels online backup mode.
*
* Note: this version is only called to stop an exclusive backup. The function
* pg_stop_backup_v2 (overloaded as pg_stop_backup in SQL) is called to
* stop non-exclusive backups.
* pg_stop_backup_v2 (overloaded as pg_stop_backup in SQL) is called to
* stop non-exclusive backups.
*
* Permission checking for this function is managed through the normal
* GRANT system.
@ -156,10 +156,10 @@ pg_stop_backup(PG_FUNCTION_ARGS)
errhint("Did you mean to use pg_stop_backup('f')?")));
/*
* Exclusive backups were typically started in a different connection,
* so don't try to verify that exclusive_backup_running is set in this one.
* Actual verification that an exclusive backup is in fact running is handled
* inside do_pg_stop_backup.
* Exclusive backups were typically started in a different connection, so
* don't try to verify that exclusive_backup_running is set in this one.
* Actual verification that an exclusive backup is in fact running is
* handled inside do_pg_stop_backup.
*/
stoppoint = do_pg_stop_backup(NULL, true, NULL);
@ -182,16 +182,16 @@ pg_stop_backup(PG_FUNCTION_ARGS)
Datum
pg_stop_backup_v2(PG_FUNCTION_ARGS)
{
ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
TupleDesc tupdesc;
ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
TupleDesc tupdesc;
Tuplestorestate *tupstore;
MemoryContext per_query_ctx;
MemoryContext oldcontext;
Datum values[3];
bool nulls[3];
MemoryContext per_query_ctx;
MemoryContext oldcontext;
Datum values[3];
bool nulls[3];
bool exclusive = PG_GETARG_BOOL(0);
XLogRecPtr stoppoint;
bool exclusive = PG_GETARG_BOOL(0);
XLogRecPtr stoppoint;
/* check to see if caller supports us returning a tuplestore */
if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo))
@ -248,9 +248,8 @@ pg_stop_backup_v2(PG_FUNCTION_ARGS)
errhint("Did you mean to use pg_stop_backup('t')?")));
/*
* Stop the non-exclusive backup. Return a copy of the backup
* label and tablespace map so they can be written to disk by
* the caller.
* Stop the non-exclusive backup. Return a copy of the backup label
* and tablespace map so they can be written to disk by the caller.
*/
stoppoint = do_pg_stop_backup(label_file->data, true, NULL);
nonexclusive_backup_running = false;
@ -269,7 +268,7 @@ pg_stop_backup_v2(PG_FUNCTION_ARGS)
}
/* Stoppoint is included on both exclusive and nonexclusive backups */
values[0] = LSNGetDatum(stoppoint);
values[0] = LSNGetDatum(stoppoint);
tuplestore_putvalues(tupstore, tupdesc, values, nulls);
tuplestore_donestoring(typstore);

View File

@ -322,7 +322,7 @@ XLogReadRecord(XLogReaderState *state, XLogRecPtr RecPtr, char **errormsg)
if (total_len < SizeOfXLogRecord)
{
report_invalid_record(state,
"invalid record length at %X/%X: wanted %u, got %u",
"invalid record length at %X/%X: wanted %u, got %u",
(uint32) (RecPtr >> 32), (uint32) RecPtr,
(uint32) SizeOfXLogRecord, total_len);
goto err;
@ -621,7 +621,7 @@ ValidXLogRecordHeader(XLogReaderState *state, XLogRecPtr RecPtr,
if (record->xl_tot_len < SizeOfXLogRecord)
{
report_invalid_record(state,
"invalid record length at %X/%X: wanted %u, got %u",
"invalid record length at %X/%X: wanted %u, got %u",
(uint32) (RecPtr >> 32), (uint32) RecPtr,
(uint32) SizeOfXLogRecord, record->xl_tot_len);
return false;

View File

@ -1792,7 +1792,7 @@ get_object_address_defacl(List *objname, List *objargs, bool missing_ok)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("unrecognized default ACL object type %c", objtype),
errhint("Valid object types are \"r\", \"S\", \"f\", and \"T\".")));
errhint("Valid object types are \"r\", \"S\", \"f\", and \"T\".")));
}
/*

View File

@ -82,9 +82,9 @@ AggregateCreate(const char *aggName,
Form_pg_proc proc;
Oid transfn;
Oid finalfn = InvalidOid; /* can be omitted */
Oid combinefn = InvalidOid; /* can be omitted */
Oid combinefn = InvalidOid; /* can be omitted */
Oid serialfn = InvalidOid; /* can be omitted */
Oid deserialfn = InvalidOid; /* can be omitted */
Oid deserialfn = InvalidOid; /* can be omitted */
Oid mtransfn = InvalidOid; /* can be omitted */
Oid minvtransfn = InvalidOid; /* can be omitted */
Oid mfinalfn = InvalidOid; /* can be omitted */
@ -407,11 +407,11 @@ AggregateCreate(const char *aggName,
/* handle the combinefn, if supplied */
if (aggcombinefnName)
{
Oid combineType;
Oid combineType;
/*
* Combine function must have 2 argument, each of which is the
* trans type
* Combine function must have 2 argument, each of which is the trans
* type
*/
fnArgs[0] = aggTransType;
fnArgs[1] = aggTransType;
@ -423,9 +423,9 @@ AggregateCreate(const char *aggName,
if (combineType != aggTransType)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("return type of combine function %s is not %s",
NameListToString(aggcombinefnName),
format_type_be(aggTransType))));
errmsg("return type of combine function %s is not %s",
NameListToString(aggcombinefnName),
format_type_be(aggTransType))));
/*
* A combine function to combine INTERNAL states must accept nulls and
@ -440,8 +440,9 @@ AggregateCreate(const char *aggName,
}
/*
* Validate the serialization function, if present. We must ensure that the
* return type of this function is the same as the specified serialType.
* Validate the serialization function, if present. We must ensure that
* the return type of this function is the same as the specified
* serialType.
*/
if (aggserialfnName)
{
@ -454,9 +455,9 @@ AggregateCreate(const char *aggName,
if (rettype != aggSerialType)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("return type of serialization function %s is not %s",
NameListToString(aggserialfnName),
format_type_be(aggSerialType))));
errmsg("return type of serialization function %s is not %s",
NameListToString(aggserialfnName),
format_type_be(aggSerialType))));
}
/*
@ -474,9 +475,9 @@ AggregateCreate(const char *aggName,
if (rettype != aggTransType)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("return type of deserialization function %s is not %s",
NameListToString(aggdeserialfnName),
format_type_be(aggTransType))));
errmsg("return type of deserialization function %s is not %s",
NameListToString(aggdeserialfnName),
format_type_be(aggTransType))));
}
/*

View File

@ -338,14 +338,14 @@ DefineAggregate(List *name, List *args, bool oldstyle, List *parameters,
/*
* There's little point in having a serialization/deserialization
* function on aggregates that don't have an internal state, so let's
* just disallow this as it may help clear up any confusion or needless
* authoring of these functions.
* just disallow this as it may help clear up any confusion or
* needless authoring of these functions.
*/
if (transTypeId != INTERNALOID)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
errmsg("a serialization type must only be specified when the aggregate transition data type is %s",
format_type_be(INTERNALOID))));
format_type_be(INTERNALOID))));
serialTypeId = typenameTypeId(NULL, serialType);
@ -358,15 +358,15 @@ DefineAggregate(List *name, List *args, bool oldstyle, List *parameters,
/*
* We disallow INTERNAL serialType as the whole point of the
* serialized types is to allow the aggregate state to be output,
* and we cannot output INTERNAL. This check, combined with the one
* above ensures that the trans type and serialization type are not the
* serialized types is to allow the aggregate state to be output, and
* we cannot output INTERNAL. This check, combined with the one above
* ensures that the trans type and serialization type are not the
* same.
*/
if (serialTypeId == INTERNALOID)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
errmsg("aggregate serialization data type cannot be %s",
errmsg("aggregate serialization data type cannot be %s",
format_type_be(serialTypeId))));
/*
@ -392,14 +392,14 @@ DefineAggregate(List *name, List *args, bool oldstyle, List *parameters,
*/
if (serialfuncName != NIL)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
errmsg("must specify serialization type when specifying serialization function")));
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
errmsg("must specify serialization type when specifying serialization function")));
/* likewise for the deserialization function */
if (deserialfuncName != NIL)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
errmsg("must specify serialization type when specifying deserialization function")));
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
errmsg("must specify serialization type when specifying deserialization function")));
}
/*
@ -493,7 +493,7 @@ DefineAggregate(List *name, List *args, bool oldstyle, List *parameters,
mfinalfuncExtraArgs,
sortoperatorName, /* sort operator name */
transTypeId, /* transition data type */
serialTypeId, /* serialization data type */
serialTypeId, /* serialization data type */
transSpace, /* transition space */
mtransTypeId, /* transition data type */
mtransSpace, /* transition space */

View File

@ -400,18 +400,17 @@ ExecRenameStmt(RenameStmt *stmt)
ObjectAddress
ExecAlterObjectDependsStmt(AlterObjectDependsStmt *stmt, ObjectAddress *refAddress)
{
ObjectAddress address;
ObjectAddress refAddr;
Relation rel;
ObjectAddress address;
ObjectAddress refAddr;
Relation rel;
address =
get_object_address_rv(stmt->objectType, stmt->relation, stmt->objname,
stmt->objargs, &rel, AccessExclusiveLock, false);
stmt->objargs, &rel, AccessExclusiveLock, false);
/*
* If a relation was involved, it would have been opened and locked.
* We don't need the relation here, but we'll retain the lock until
* commit.
* If a relation was involved, it would have been opened and locked. We
* don't need the relation here, but we'll retain the lock until commit.
*/
if (rel)
heap_close(rel, NoLock);
@ -630,8 +629,8 @@ AlterObjectNamespace_internal(Relation rel, Oid objid, Oid nspOid)
oldNspOid = DatumGetObjectId(namespace);
/*
* If the object is already in the correct namespace, we don't need
* to do anything except fire the object access hook.
* If the object is already in the correct namespace, we don't need to do
* anything except fire the object access hook.
*/
if (oldNspOid == nspOid)
{

View File

@ -138,7 +138,7 @@ RemoveAccessMethodById(Oid amOid)
/*
* get_am_type_oid
* Worker for various get_am_*_oid variants
* Worker for various get_am_*_oid variants
*
* If missing_ok is false, throw an error if access method not found. If
* true, just return InvalidOid.
@ -188,7 +188,7 @@ get_index_am_oid(const char *amname, bool missing_ok)
/*
* get_am_oid - given an access method name, look up its OID.
* The type is not checked.
* The type is not checked.
*/
Oid
get_am_oid(const char *amname, bool missing_ok)

View File

@ -570,7 +570,7 @@ do_analyze_rel(Relation onerel, int options, VacuumParams *params,
*/
if (!inh)
{
BlockNumber relallvisible;
BlockNumber relallvisible;
visibilitymap_count(onerel, &relallvisible, NULL);

View File

@ -85,8 +85,8 @@ CreateConversionCommand(CreateConversionStmt *stmt)
if (get_func_rettype(funcoid) != VOIDOID)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
errmsg("encoding conversion function %s must return type %s",
NameListToString(func_name), "void")));
errmsg("encoding conversion function %s must return type %s",
NameListToString(func_name), "void")));
/* Check we have EXECUTE rights for the function */
aclresult = pg_proc_aclcheck(funcoid, GetUserId(), ACL_EXECUTE);

View File

@ -875,7 +875,7 @@ DoCopy(const CopyStmt *stmt, const char *queryString, uint64 *processed)
if (is_from)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("COPY FROM not supported with row-level security"),
errmsg("COPY FROM not supported with row-level security"),
errhint("Use INSERT statements instead.")));
/* Build target list */
@ -1399,16 +1399,16 @@ BeginCopy(bool is_from,
{
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("DO INSTEAD NOTHING rules are not supported for COPY")));
errmsg("DO INSTEAD NOTHING rules are not supported for COPY")));
}
else if (list_length(rewritten) > 1)
{
ListCell *lc;
ListCell *lc;
/* examine queries to determine which error message to issue */
foreach(lc, rewritten)
{
Query *q = (Query *) lfirst(lc);
Query *q = (Query *) lfirst(lc);
if (q->querySource == QSRC_QUAL_INSTEAD_RULE)
ereport(ERROR,
@ -1417,7 +1417,7 @@ BeginCopy(bool is_from,
if (q->querySource == QSRC_NON_INSTEAD_RULE)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("DO ALSO rules are not supported for the COPY")));
errmsg("DO ALSO rules are not supported for the COPY")));
}
ereport(ERROR,
@ -1448,8 +1448,8 @@ BeginCopy(bool is_from,
query->commandType == CMD_DELETE);
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("COPY query must have a RETURNING clause")));
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("COPY query must have a RETURNING clause")));
}
/* plan the query */

View File

@ -1419,7 +1419,7 @@ CreateExtensionInternal(CreateExtensionStmt *stmt, List *parents)
CreateExtensionStmt *ces;
ListCell *lc;
ObjectAddress addr;
List *cascade_parents;
List *cascade_parents;
/* Check extension name validity before trying to cascade */
check_valid_extension_name(curreq);

View File

@ -487,7 +487,7 @@ lookup_fdw_handler_func(DefElem *handler)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("function %s must return type %s",
NameListToString((List *) handler->arg), "fdw_handler")));
NameListToString((List *) handler->arg), "fdw_handler")));
return handlerOid;
}

View File

@ -217,21 +217,20 @@ ExecRefreshMatView(RefreshMatViewStmt *stmt, const char *queryString,
RelationGetRelationName(matviewRel));
/*
* Check that there is a unique index with no WHERE clause on
* one or more columns of the materialized view if CONCURRENTLY
* is specified.
* Check that there is a unique index with no WHERE clause on one or more
* columns of the materialized view if CONCURRENTLY is specified.
*/
if (concurrent)
{
List *indexoidlist = RelationGetIndexList(matviewRel);
ListCell *indexoidscan;
List *indexoidlist = RelationGetIndexList(matviewRel);
ListCell *indexoidscan;
bool hasUniqueIndex = false;
foreach(indexoidscan, indexoidlist)
{
Oid indexoid = lfirst_oid(indexoidscan);
Relation indexRel;
Form_pg_index indexStruct;
Form_pg_index indexStruct;
indexRel = index_open(indexoid, AccessShareLock);
indexStruct = indexRel->rd_index;
@ -255,9 +254,9 @@ ExecRefreshMatView(RefreshMatViewStmt *stmt, const char *queryString,
if (!hasUniqueIndex)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("cannot refresh materialized view \"%s\" concurrently",
quote_qualified_identifier(get_namespace_name(RelationGetNamespace(matviewRel)),
RelationGetRelationName(matviewRel))),
errmsg("cannot refresh materialized view \"%s\" concurrently",
quote_qualified_identifier(get_namespace_name(RelationGetNamespace(matviewRel)),
RelationGetRelationName(matviewRel))),
errhint("Create a unique index with no WHERE clause on one or more columns of the materialized view.")));
}
@ -745,8 +744,8 @@ refresh_by_match_merge(Oid matviewOid, Oid tempOid, Oid relowner,
/*
* There must be at least one unique index on the matview.
*
* ExecRefreshMatView() checks that after taking the exclusive lock on
* the matview. So at least one unique index is guaranteed to exist here
* ExecRefreshMatView() checks that after taking the exclusive lock on the
* matview. So at least one unique index is guaranteed to exist here
* because the lock is still being held.
*/
Assert(foundUniqueIndex);

View File

@ -275,8 +275,8 @@ ValidateRestrictionEstimator(List *restrictionName)
if (get_func_rettype(restrictionOid) != FLOAT8OID)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
errmsg("restriction estimator function %s must return type %s",
NameListToString(restrictionName), "float8")));
errmsg("restriction estimator function %s must return type %s",
NameListToString(restrictionName), "float8")));
/* Require EXECUTE rights for the estimator */
aclresult = pg_proc_aclcheck(restrictionOid, GetUserId(), ACL_EXECUTE);
@ -321,8 +321,8 @@ ValidateJoinEstimator(List *joinName)
if (get_func_rettype(joinOid) != FLOAT8OID)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
errmsg("join estimator function %s must return type %s",
NameListToString(joinName), "float8")));
errmsg("join estimator function %s must return type %s",
NameListToString(joinName), "float8")));
/* Require EXECUTE rights for the estimator */
aclresult = pg_proc_aclcheck(joinOid, GetUserId(), ACL_EXECUTE);

View File

@ -496,7 +496,7 @@ RemoveRoleFromObjectPolicy(Oid roleid, Oid classid, Oid policy_id)
/* Must own relation. */
if (pg_class_ownercheck(relid, GetUserId()))
noperm = false; /* user is allowed to modify this policy */
noperm = false; /* user is allowed to modify this policy */
else
ereport(WARNING,
(errcode(ERRCODE_WARNING_PRIVILEGE_NOT_REVOKED),
@ -511,15 +511,16 @@ RemoveRoleFromObjectPolicy(Oid roleid, Oid classid, Oid policy_id)
*/
if (!noperm && num_roles > 0)
{
int i, j;
int i,
j;
Oid *roles = (Oid *) ARR_DATA_PTR(policy_roles);
Datum *role_oids;
char *qual_value;
Node *qual_expr;
List *qual_parse_rtable = NIL;
List *qual_parse_rtable = NIL;
char *with_check_value;
Node *with_check_qual;
List *with_check_parse_rtable = NIL;
List *with_check_parse_rtable = NIL;
Datum values[Natts_pg_policy];
bool isnull[Natts_pg_policy];
bool replaces[Natts_pg_policy];
@ -536,15 +537,14 @@ RemoveRoleFromObjectPolicy(Oid roleid, Oid classid, Oid policy_id)
/*
* All of the dependencies will be removed from the policy and then
* re-added. In order to get them correct, we need to extract out
* the expressions in the policy and construct a parsestate just
* enough to build the range table(s) to then pass to
* recordDependencyOnExpr().
* re-added. In order to get them correct, we need to extract out the
* expressions in the policy and construct a parsestate just enough to
* build the range table(s) to then pass to recordDependencyOnExpr().
*/
/* Get policy qual, to update dependencies */
value_datum = heap_getattr(tuple, Anum_pg_policy_polqual,
RelationGetDescr(pg_policy_rel), &attr_isnull);
RelationGetDescr(pg_policy_rel), &attr_isnull);
if (!attr_isnull)
{
ParseState *qual_pstate;
@ -566,7 +566,7 @@ RemoveRoleFromObjectPolicy(Oid roleid, Oid classid, Oid policy_id)
/* Get WITH CHECK qual, to update dependencies */
value_datum = heap_getattr(tuple, Anum_pg_policy_polwithcheck,
RelationGetDescr(pg_policy_rel), &attr_isnull);
RelationGetDescr(pg_policy_rel), &attr_isnull);
if (!attr_isnull)
{
ParseState *with_check_pstate;
@ -665,7 +665,7 @@ RemoveRoleFromObjectPolicy(Oid roleid, Oid classid, Oid policy_id)
heap_close(pg_policy_rel, RowExclusiveLock);
return(noperm || num_roles > 0);
return (noperm || num_roles > 0);
}
/*
@ -996,8 +996,8 @@ AlterPolicy(AlterPolicyStmt *stmt)
/* Get policy command */
polcmd_datum = heap_getattr(policy_tuple, Anum_pg_policy_polcmd,
RelationGetDescr(pg_policy_rel),
&polcmd_isnull);
RelationGetDescr(pg_policy_rel),
&polcmd_isnull);
Assert(!polcmd_isnull);
polcmd = DatumGetChar(polcmd_datum);
@ -1029,15 +1029,15 @@ AlterPolicy(AlterPolicyStmt *stmt)
}
else
{
Oid *roles;
Oid *roles;
Datum roles_datum;
bool attr_isnull;
ArrayType *policy_roles;
/*
* We need to pull the set of roles this policy applies to from
* what's in the catalog, so that we can recreate the dependencies
* correctly for the policy.
* We need to pull the set of roles this policy applies to from what's
* in the catalog, so that we can recreate the dependencies correctly
* for the policy.
*/
roles_datum = heap_getattr(policy_tuple, Anum_pg_policy_polroles,
@ -1065,13 +1065,13 @@ AlterPolicy(AlterPolicyStmt *stmt)
}
else
{
Datum value_datum;
bool attr_isnull;
Datum value_datum;
bool attr_isnull;
/*
* We need to pull the USING expression and build the range table for
* the policy from what's in the catalog, so that we can recreate
* the dependencies correctly for the policy.
* the policy from what's in the catalog, so that we can recreate the
* dependencies correctly for the policy.
*/
/* Check if the policy has a USING expr */
@ -1106,8 +1106,8 @@ AlterPolicy(AlterPolicyStmt *stmt)
}
else
{
Datum value_datum;
bool attr_isnull;
Datum value_datum;
bool attr_isnull;
/*
* We need to pull the WITH CHECK expression and build the range table

View File

@ -114,8 +114,8 @@ CreateProceduralLanguage(CreatePLangStmt *stmt)
if (funcrettype != LANGUAGE_HANDLEROID)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("function %s must return type %s",
NameListToString(funcname), "language_handler")));
errmsg("function %s must return type %s",
NameListToString(funcname), "language_handler")));
}
else
{
@ -285,8 +285,8 @@ CreateProceduralLanguage(CreatePLangStmt *stmt)
else
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("function %s must return type %s",
NameListToString(stmt->plhandler), "language_handler")));
errmsg("function %s must return type %s",
NameListToString(stmt->plhandler), "language_handler")));
}
/* validate the inline function */

View File

@ -532,8 +532,8 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
* can skip this for internally generated triggers, since the name
* modification above should be sufficient.
*
* NOTE that this is cool only because we have ShareRowExclusiveLock on the
* relation, so the trigger set won't be changing underneath us.
* NOTE that this is cool only because we have ShareRowExclusiveLock on
* the relation, so the trigger set won't be changing underneath us.
*/
if (!isInternal)
{

View File

@ -450,8 +450,8 @@ DefineType(List *names, List *parameters)
{
/* backwards-compatibility hack */
ereport(WARNING,
(errmsg("changing return type of function %s from %s to %s",
NameListToString(inputName), "opaque", typeName)));
(errmsg("changing return type of function %s from %s to %s",
NameListToString(inputName), "opaque", typeName)));
SetFunctionReturnType(inputOid, typoid);
}
else
@ -467,15 +467,15 @@ DefineType(List *names, List *parameters)
{
/* backwards-compatibility hack */
ereport(WARNING,
(errmsg("changing return type of function %s from %s to %s",
NameListToString(outputName), "opaque", "cstring")));
(errmsg("changing return type of function %s from %s to %s",
NameListToString(outputName), "opaque", "cstring")));
SetFunctionReturnType(outputOid, CSTRINGOID);
}
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
errmsg("type output function %s must return type %s",
NameListToString(outputName), "cstring")));
errmsg("type output function %s must return type %s",
NameListToString(outputName), "cstring")));
}
if (receiveOid)
{
@ -492,8 +492,8 @@ DefineType(List *names, List *parameters)
if (resulttype != BYTEAOID)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
errmsg("type send function %s must return type %s",
NameListToString(sendName), "bytea")));
errmsg("type send function %s must return type %s",
NameListToString(sendName), "bytea")));
}
/*
@ -1888,8 +1888,8 @@ findTypeAnalyzeFunction(List *procname, Oid typeOid)
if (get_func_rettype(procOid) != BOOLOID)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
errmsg("type analyze function %s must return type %s",
NameListToString(procname), "boolean")));
errmsg("type analyze function %s must return type %s",
NameListToString(procname), "boolean")));
return procOid;
}
@ -3313,9 +3313,9 @@ AlterTypeOwner_oid(Oid typeOid, Oid newOwnerId, bool hasDependEntry)
typTup = (Form_pg_type) GETSTRUCT(tup);
/*
* If it's a composite type, invoke ATExecChangeOwner so that we fix up the
* pg_class entry properly. That will call back to AlterTypeOwnerInternal
* to take care of the pg_type entry(s).
* If it's a composite type, invoke ATExecChangeOwner so that we fix up
* the pg_class entry properly. That will call back to
* AlterTypeOwnerInternal to take care of the pg_type entry(s).
*/
if (typTup->typtype == TYPTYPE_COMPOSITE)
ATExecChangeOwner(typTup->typrelid, newOwnerId, true, AccessExclusiveLock);

View File

@ -302,7 +302,7 @@ CreateRole(CreateRoleStmt *stmt)
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("must be superuser to change bypassrls attribute")));
errmsg("must be superuser to change bypassrls attribute")));
}
else
{
@ -320,8 +320,8 @@ CreateRole(CreateRoleStmt *stmt)
ereport(ERROR,
(errcode(ERRCODE_RESERVED_NAME),
errmsg("role name \"%s\" is reserved",
stmt->role),
errdetail("Role names starting with \"pg_\" are reserved.")));
stmt->role),
errdetail("Role names starting with \"pg_\" are reserved.")));
/*
* Check the pg_authid relation to be certain the role doesn't already
@ -977,7 +977,7 @@ DropRole(DropRoleStmt *stmt)
if (rolspec->roletype != ROLESPEC_CSTRING)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("cannot use special role specifier in DROP ROLE")));
errmsg("cannot use special role specifier in DROP ROLE")));
role = rolspec->rolename;
tuple = SearchSysCache1(AUTHNAME, PointerGetDatum(role));
@ -1167,22 +1167,22 @@ RenameRole(const char *oldname, const char *newname)
errmsg("current user cannot be renamed")));
/*
* Check that the user is not trying to rename a system role and
* not trying to rename a role into the reserved "pg_" namespace.
* Check that the user is not trying to rename a system role and not
* trying to rename a role into the reserved "pg_" namespace.
*/
if (IsReservedName(NameStr(authform->rolname)))
ereport(ERROR,
(errcode(ERRCODE_RESERVED_NAME),
errmsg("role name \"%s\" is reserved",
NameStr(authform->rolname)),
errdetail("Role names starting with \"pg_\" are reserved.")));
NameStr(authform->rolname)),
errdetail("Role names starting with \"pg_\" are reserved.")));
if (IsReservedName(newname))
ereport(ERROR,
(errcode(ERRCODE_RESERVED_NAME),
errmsg("role name \"%s\" is reserved",
newname),
errdetail("Role names starting with \"pg_\" are reserved.")));
newname),
errdetail("Role names starting with \"pg_\" are reserved.")));
/* make sure the new name doesn't exist */
if (SearchSysCacheExists1(AUTHNAME, CStringGetDatum(newname)))

View File

@ -1192,9 +1192,9 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
}
/*
* If the all-visible page is turned out to be all-frozen but not marked,
* we should so mark it. Note that all_frozen is only valid if all_visible
* is true, so we must check both.
* If the all-visible page is turned out to be all-frozen but not
* marked, we should so mark it. Note that all_frozen is only valid
* if all_visible is true, so we must check both.
*/
else if (all_visible_according_to_vm && all_visible && all_frozen &&
!VM_ALL_FROZEN(onerel, blkno, &vmbuffer))
@ -1660,7 +1660,7 @@ should_attempt_truncation(LVRelStats *vacrelstats)
possibly_freeable = vacrelstats->rel_pages - vacrelstats->nonempty_pages;
if (possibly_freeable > 0 &&
(possibly_freeable >= REL_TRUNCATE_MINIMUM ||
possibly_freeable >= vacrelstats->rel_pages / REL_TRUNCATE_FRACTION) &&
possibly_freeable >= vacrelstats->rel_pages / REL_TRUNCATE_FRACTION) &&
old_snapshot_threshold < 0)
return true;
else

View File

@ -880,9 +880,9 @@ check_role(char **newval, void **extra, GucSource source)
ReleaseSysCache(roleTup);
/*
* Verify that session user is allowed to become this role, but
* skip this in parallel mode, where we must blindly recreate the
* parallel leader's state.
* Verify that session user is allowed to become this role, but skip
* this in parallel mode, where we must blindly recreate the parallel
* leader's state.
*/
if (!InitializingParallelWorker &&
!is_member_of_role(GetSessionUserId(), roleid))

View File

@ -444,10 +444,9 @@ ExecSupportsBackwardScan(Plan *node)
return false;
/*
* Parallel-aware nodes return a subset of the tuples in each worker,
* and in general we can't expect to have enough bookkeeping state to
* know which ones we returned in this worker as opposed to some other
* worker.
* Parallel-aware nodes return a subset of the tuples in each worker, and
* in general we can't expect to have enough bookkeeping state to know
* which ones we returned in this worker as opposed to some other worker.
*/
if (node->parallel_aware)
return false;

View File

@ -725,7 +725,7 @@ retry:
{
TransactionId xwait;
ItemPointerData ctid_wait;
XLTW_Oper reason_wait;
XLTW_Oper reason_wait;
Datum existing_values[INDEX_MAX_KEYS];
bool existing_isnull[INDEX_MAX_KEYS];
char *error_new;

View File

@ -1851,25 +1851,25 @@ ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo,
if (wco->polname != NULL)
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("new row violates row-level security policy \"%s\" for table \"%s\"",
wco->polname, wco->relname)));
errmsg("new row violates row-level security policy \"%s\" for table \"%s\"",
wco->polname, wco->relname)));
else
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("new row violates row-level security policy for table \"%s\"",
wco->relname)));
errmsg("new row violates row-level security policy for table \"%s\"",
wco->relname)));
break;
case WCO_RLS_CONFLICT_CHECK:
if (wco->polname != NULL)
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("new row violates row-level security policy \"%s\" (USING expression) for table \"%s\"",
wco->polname, wco->relname)));
errmsg("new row violates row-level security policy \"%s\" (USING expression) for table \"%s\"",
wco->polname, wco->relname)));
else
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("new row violates row-level security policy (USING expression) for table \"%s\"",
wco->relname)));
errmsg("new row violates row-level security policy (USING expression) for table \"%s\"",
wco->relname)));
break;
default:
elog(ERROR, "unrecognized WCO kind: %u", wco->kind);

View File

@ -83,7 +83,7 @@ struct SharedExecutorInstrumentation
typedef struct ExecParallelEstimateContext
{
ParallelContext *pcxt;
int nnodes;
int nnodes;
} ExecParallelEstimateContext;
/* Context object for ExecParallelInitializeDSM. */
@ -91,7 +91,7 @@ typedef struct ExecParallelInitializeDSMContext
{
ParallelContext *pcxt;
SharedExecutorInstrumentation *instrumentation;
int nnodes;
int nnodes;
} ExecParallelInitializeDSMContext;
/* Helper functions that run in the parallel leader. */
@ -99,11 +99,11 @@ static char *ExecSerializePlan(Plan *plan, EState *estate);
static bool ExecParallelEstimate(PlanState *node,
ExecParallelEstimateContext *e);
static bool ExecParallelInitializeDSM(PlanState *node,
ExecParallelInitializeDSMContext *d);
ExecParallelInitializeDSMContext *d);
static shm_mq_handle **ExecParallelSetupTupleQueues(ParallelContext *pcxt,
bool reinitialize);
static bool ExecParallelRetrieveInstrumentation(PlanState *planstate,
SharedExecutorInstrumentation *instrumentation);
SharedExecutorInstrumentation *instrumentation);
/* Helper functions that run in the parallel worker. */
static void ParallelQueryMain(dsm_segment *seg, shm_toc *toc);
@ -387,12 +387,12 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers)
/* Estimate space for tuple queues. */
shm_toc_estimate_chunk(&pcxt->estimator,
mul_size(PARALLEL_TUPLE_QUEUE_SIZE, pcxt->nworkers));
mul_size(PARALLEL_TUPLE_QUEUE_SIZE, pcxt->nworkers));
shm_toc_estimate_keys(&pcxt->estimator, 1);
/*
* Give parallel-aware nodes a chance to add to the estimates, and get
* a count of how many PlanState nodes there are.
* Give parallel-aware nodes a chance to add to the estimates, and get a
* count of how many PlanState nodes there are.
*/
e.pcxt = pcxt;
e.nnodes = 0;
@ -444,14 +444,14 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers)
pei->tqueue = ExecParallelSetupTupleQueues(pcxt, false);
/*
* If instrumentation options were supplied, allocate space for the
* data. It only gets partially initialized here; the rest happens
* during ExecParallelInitializeDSM.
* If instrumentation options were supplied, allocate space for the data.
* It only gets partially initialized here; the rest happens during
* ExecParallelInitializeDSM.
*/
if (estate->es_instrument)
{
Instrumentation *instrument;
int i;
int i;
instrumentation = shm_toc_allocate(pcxt->toc, instrumentation_len);
instrumentation->instrument_options = estate->es_instrument;
@ -493,13 +493,13 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers)
*/
static bool
ExecParallelRetrieveInstrumentation(PlanState *planstate,
SharedExecutorInstrumentation *instrumentation)
SharedExecutorInstrumentation *instrumentation)
{
Instrumentation *instrument;
int i;
int n;
int ibytes;
int plan_node_id = planstate->plan->plan_node_id;
int i;
int n;
int ibytes;
int plan_node_id = planstate->plan->plan_node_id;
/* Find the instumentation for this node. */
for (i = 0; i < instrumentation->num_plan_nodes; ++i)
@ -532,7 +532,7 @@ ExecParallelRetrieveInstrumentation(PlanState *planstate,
void
ExecParallelFinish(ParallelExecutorInfo *pei)
{
int i;
int i;
if (pei->finished)
return;
@ -626,19 +626,19 @@ ExecParallelGetQueryDesc(shm_toc *toc, DestReceiver *receiver,
*/
static bool
ExecParallelReportInstrumentation(PlanState *planstate,
SharedExecutorInstrumentation *instrumentation)
SharedExecutorInstrumentation *instrumentation)
{
int i;
int plan_node_id = planstate->plan->plan_node_id;
int i;
int plan_node_id = planstate->plan->plan_node_id;
Instrumentation *instrument;
InstrEndLoop(planstate->instrument);
/*
* If we shuffled the plan_node_id values in ps_instrument into sorted
* order, we could use binary search here. This might matter someday
* if we're pushing down sufficiently large plan trees. For now, do it
* the slow, dumb way.
* order, we could use binary search here. This might matter someday if
* we're pushing down sufficiently large plan trees. For now, do it the
* slow, dumb way.
*/
for (i = 0; i < instrumentation->num_plan_nodes; ++i)
if (instrumentation->plan_node_id[i] == plan_node_id)

View File

@ -497,8 +497,8 @@ init_execution_state(List *queryTree_list,
stmt = queryTree->utilityStmt;
else
stmt = (Node *) pg_plan_query(queryTree,
fcache->readonly_func ? CURSOR_OPT_PARALLEL_OK : 0,
NULL);
fcache->readonly_func ? CURSOR_OPT_PARALLEL_OK : 0,
NULL);
/* Precheck all commands for validity in a function */
if (IsA(stmt, TransactionStmt))

View File

@ -491,9 +491,9 @@ static void finalize_aggregate(AggState *aggstate,
AggStatePerGroup pergroupstate,
Datum *resultVal, bool *resultIsNull);
static void finalize_partialaggregate(AggState *aggstate,
AggStatePerAgg peragg,
AggStatePerGroup pergroupstate,
Datum *resultVal, bool *resultIsNull);
AggStatePerAgg peragg,
AggStatePerGroup pergroupstate,
Datum *resultVal, bool *resultIsNull);
static void prepare_projection_slot(AggState *aggstate,
TupleTableSlot *slot,
int currentSet);
@ -981,17 +981,18 @@ combine_aggregates(AggState *aggstate, AggStatePerGroup pergroup)
if (OidIsValid(pertrans->deserialfn_oid))
{
/*
* Don't call a strict deserialization function with NULL input.
* A strict deserialization function and a null value means we skip
* calling the combine function for this state. We assume that this
* would be a waste of time and effort anyway so just skip it.
* Don't call a strict deserialization function with NULL input. A
* strict deserialization function and a null value means we skip
* calling the combine function for this state. We assume that
* this would be a waste of time and effort anyway so just skip
* it.
*/
if (pertrans->deserialfn.fn_strict && slot->tts_isnull[0])
continue;
else
{
FunctionCallInfo dsinfo = &pertrans->deserialfn_fcinfo;
MemoryContext oldContext;
FunctionCallInfo dsinfo = &pertrans->deserialfn_fcinfo;
MemoryContext oldContext;
dsinfo->arg[0] = slot->tts_values[0];
dsinfo->argnull[0] = slot->tts_isnull[0];
@ -1423,14 +1424,14 @@ finalize_partialaggregate(AggState *aggstate,
AggStatePerGroup pergroupstate,
Datum *resultVal, bool *resultIsNull)
{
AggStatePerTrans pertrans = &aggstate->pertrans[peragg->transno];
MemoryContext oldContext;
AggStatePerTrans pertrans = &aggstate->pertrans[peragg->transno];
MemoryContext oldContext;
oldContext = MemoryContextSwitchTo(aggstate->ss.ps.ps_ExprContext->ecxt_per_tuple_memory);
/*
* serialfn_oid will be set if we must serialize the input state
* before calling the combine function on the state.
* serialfn_oid will be set if we must serialize the input state before
* calling the combine function on the state.
*/
if (OidIsValid(pertrans->serialfn_oid))
{
@ -1443,6 +1444,7 @@ finalize_partialaggregate(AggState *aggstate,
else
{
FunctionCallInfo fcinfo = &pertrans->serialfn_fcinfo;
fcinfo->arg[0] = pergroupstate->transValue;
fcinfo->argnull[0] = pergroupstate->transValueIsNull;
@ -1459,7 +1461,7 @@ finalize_partialaggregate(AggState *aggstate,
/* If result is pass-by-ref, make sure it is in the right context. */
if (!peragg->resulttypeByVal && !*resultIsNull &&
!MemoryContextContains(CurrentMemoryContext,
DatumGetPointer(*resultVal)))
DatumGetPointer(*resultVal)))
*resultVal = datumCopy(*resultVal,
peragg->resulttypeByVal,
peragg->resulttypeLen);
@ -2627,21 +2629,21 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
*
* 1. An aggregate function appears more than once in query:
*
* SELECT SUM(x) FROM ... HAVING SUM(x) > 0
* SELECT SUM(x) FROM ... HAVING SUM(x) > 0
*
* Since the aggregates are the identical, we only need to calculate
* the calculate it once. Both aggregates will share the same 'aggno'
* value.
* Since the aggregates are the identical, we only need to calculate
* the calculate it once. Both aggregates will share the same 'aggno'
* value.
*
* 2. Two different aggregate functions appear in the query, but the
* aggregates have the same transition function and initial value, but
* different final function:
* aggregates have the same transition function and initial value, but
* different final function:
*
* SELECT SUM(x), AVG(x) FROM ...
* SELECT SUM(x), AVG(x) FROM ...
*
* In this case we must create a new peragg for the varying aggregate,
* and need to call the final functions separately, but can share the
* same transition state.
* In this case we must create a new peragg for the varying aggregate,
* and need to call the final functions separately, but can share the
* same transition state.
*
* For either of these optimizations to be valid, the aggregate's
* arguments must be the same, including any modifiers such as ORDER BY,
@ -2889,8 +2891,8 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
*/
existing_transno = find_compatible_pertrans(aggstate, aggref,
transfn_oid, aggtranstype,
serialfn_oid, deserialfn_oid,
initValue, initValueIsNull,
serialfn_oid, deserialfn_oid,
initValue, initValueIsNull,
same_input_transnos);
if (existing_transno != -1)
{
@ -3366,9 +3368,9 @@ find_compatible_pertrans(AggState *aggstate, Aggref *newagg,
/*
* The serialization and deserialization functions must match, if
* present, as we're unable to share the trans state for aggregates
* which will serialize or deserialize into different formats. Remember
* that these will be InvalidOid if they're not required for this agg
* node.
* which will serialize or deserialize into different formats.
* Remember that these will be InvalidOid if they're not required for
* this agg node.
*/
if (aggserialfn != pertrans->serialfn_oid ||
aggdeserialfn != pertrans->deserialfn_oid)

View File

@ -285,8 +285,8 @@ ExecReScanForeignScan(ForeignScanState *node)
/*
* If chgParam of subnode is not null then plan will be re-scanned by
* first ExecProcNode. outerPlan may also be NULL, in which case there
* is nothing to rescan at all.
* first ExecProcNode. outerPlan may also be NULL, in which case there is
* nothing to rescan at all.
*/
if (outerPlan != NULL && outerPlan->chgParam == NULL)
ExecReScan(outerPlan);

View File

@ -138,8 +138,8 @@ ExecGather(GatherState *node)
/*
* Initialize the parallel context and workers on first execution. We do
* this on first execution rather than during node initialization, as it
* needs to allocate large dynamic segment, so it is better to do if it
* is really needed.
* needs to allocate large dynamic segment, so it is better to do if it is
* really needed.
*/
if (!node->initialized)
{
@ -147,8 +147,8 @@ ExecGather(GatherState *node)
Gather *gather = (Gather *) node->ps.plan;
/*
* Sometimes we might have to run without parallelism; but if
* parallel mode is active then we can try to fire up some workers.
* Sometimes we might have to run without parallelism; but if parallel
* mode is active then we can try to fire up some workers.
*/
if (gather->num_workers > 0 && IsInParallelMode())
{
@ -186,7 +186,7 @@ ExecGather(GatherState *node)
}
else
{
/* No workers? Then never mind. */
/* No workers? Then never mind. */
ExecShutdownGatherWorkers(node);
}
}
@ -314,7 +314,7 @@ gather_getnext(GatherState *gatherstate)
static HeapTuple
gather_readnext(GatherState *gatherstate)
{
int waitpos = gatherstate->nextreader;
int waitpos = gatherstate->nextreader;
for (;;)
{
@ -330,8 +330,8 @@ gather_readnext(GatherState *gatherstate)
tup = TupleQueueReaderNext(reader, true, &readerdone);
/*
* If this reader is done, remove it. If all readers are done,
* clean up remaining worker state.
* If this reader is done, remove it. If all readers are done, clean
* up remaining worker state.
*/
if (readerdone)
{
@ -402,7 +402,7 @@ ExecShutdownGatherWorkers(GatherState *node)
/* Shut down tuple queue readers before shutting down workers. */
if (node->reader != NULL)
{
int i;
int i;
for (i = 0; i < node->nreaders; ++i)
DestroyTupleQueueReader(node->reader[i]);
@ -452,10 +452,10 @@ void
ExecReScanGather(GatherState *node)
{
/*
* Re-initialize the parallel workers to perform rescan of relation.
* We want to gracefully shutdown all the workers so that they
* should be able to propagate any error or other information to master
* backend before dying. Parallel context will be reused for rescan.
* Re-initialize the parallel workers to perform rescan of relation. We
* want to gracefully shutdown all the workers so that they should be able
* to propagate any error or other information to master backend before
* dying. Parallel context will be reused for rescan.
*/
ExecShutdownGatherWorkers(node);

View File

@ -1221,10 +1221,10 @@ ExecOnConflictUpdate(ModifyTableState *mtstate,
/*
* Note that it is possible that the target tuple has been modified in
* this session, after the above heap_lock_tuple. We choose to not error
* out in that case, in line with ExecUpdate's treatment of similar
* cases. This can happen if an UPDATE is triggered from within
* ExecQual(), ExecWithCheckOptions() or ExecProject() above, e.g. by
* selecting from a wCTE in the ON CONFLICT's SET.
* out in that case, in line with ExecUpdate's treatment of similar cases.
* This can happen if an UPDATE is triggered from within ExecQual(),
* ExecWithCheckOptions() or ExecProject() above, e.g. by selecting from a
* wCTE in the ON CONFLICT's SET.
*/
/* Execute UPDATE with projection */
@ -1595,7 +1595,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
/* Initialize the usesFdwDirectModify flag */
resultRelInfo->ri_usesFdwDirectModify = bms_is_member(i,
node->fdwDirectModifyPlans);
node->fdwDirectModifyPlans);
/*
* Verify result relation is a valid target for the current operation

View File

@ -65,8 +65,8 @@ SeqNext(SeqScanState *node)
if (scandesc == NULL)
{
/*
* We reach here if the scan is not parallel, or if we're executing
* a scan that was intended to be parallel serially.
* We reach here if the scan is not parallel, or if we're executing a
* scan that was intended to be parallel serially.
*/
scandesc = heap_beginscan(node->ss.ss_currentRelation,
estate->es_snapshot,
@ -145,7 +145,7 @@ InitScanRelation(SeqScanState *node, EState *estate, int eflags)
* open that relation and acquire appropriate lock on it.
*/
currentRelation = ExecOpenScanRelation(estate,
((SeqScan *) node->ss.ps.plan)->scanrelid,
((SeqScan *) node->ss.ps.plan)->scanrelid,
eflags);
node->ss.ss_currentRelation = currentRelation;
@ -277,8 +277,8 @@ ExecReScanSeqScan(SeqScanState *node)
scan = node->ss.ss_currentScanDesc;
if (scan != NULL)
heap_rescan(scan, /* scan desc */
NULL); /* new scan keys */
heap_rescan(scan, /* scan desc */
NULL); /* new scan keys */
ExecScanReScan((ScanState *) node);
}
@ -316,7 +316,7 @@ ExecSeqScanInitializeDSM(SeqScanState *node,
ParallelContext *pcxt)
{
EState *estate = node->ss.ps.state;
ParallelHeapScanDesc pscan;
ParallelHeapScanDesc pscan;
pscan = shm_toc_allocate(pcxt->toc, node->pscan_len);
heap_parallelscan_initialize(pscan,
@ -336,7 +336,7 @@ ExecSeqScanInitializeDSM(SeqScanState *node,
void
ExecSeqScanInitializeWorker(SeqScanState *node, shm_toc *toc)
{
ParallelHeapScanDesc pscan;
ParallelHeapScanDesc pscan;
pscan = shm_toc_lookup(toc, node->ss.ps.plan->plan_node_id);
node->ss.ss_currentScanDesc =

View File

@ -2220,8 +2220,8 @@ initialize_peragg(WindowAggState *winstate, WindowFunc *wfunc,
/* build expression trees using actual argument & result types */
build_aggregate_transfn_expr(inputTypes,
numArguments,
0, /* no ordered-set window functions yet */
false, /* no variadic window functions yet */
0, /* no ordered-set window functions yet */
false, /* no variadic window functions yet */
wfunc->wintype,
wfunc->inputcollid,
transfn_oid,

View File

@ -44,13 +44,13 @@ typedef enum
TQUEUE_REMAP_ARRAY, /* array */
TQUEUE_REMAP_RANGE, /* range */
TQUEUE_REMAP_RECORD /* composite type, named or anonymous */
} RemapClass;
} RemapClass;
typedef struct
{
int natts;
RemapClass mapping[FLEXIBLE_ARRAY_MEMBER];
} RemapInfo;
} RemapInfo;
typedef struct
{
@ -61,13 +61,13 @@ typedef struct
char mode;
TupleDesc tupledesc;
RemapInfo *remapinfo;
} TQueueDestReceiver;
} TQueueDestReceiver;
typedef struct RecordTypemodMap
{
int remotetypmod;
int localtypmod;
} RecordTypemodMap;
} RecordTypemodMap;
struct TupleQueueReader
{
@ -81,19 +81,19 @@ struct TupleQueueReader
#define TUPLE_QUEUE_MODE_CONTROL 'c'
#define TUPLE_QUEUE_MODE_DATA 'd'
static void tqueueWalk(TQueueDestReceiver * tqueue, RemapClass walktype,
static void tqueueWalk(TQueueDestReceiver *tqueue, RemapClass walktype,
Datum value);
static void tqueueWalkRecord(TQueueDestReceiver * tqueue, Datum value);
static void tqueueWalkArray(TQueueDestReceiver * tqueue, Datum value);
static void tqueueWalkRange(TQueueDestReceiver * tqueue, Datum value);
static void tqueueSendTypmodInfo(TQueueDestReceiver * tqueue, int typmod,
static void tqueueWalkRecord(TQueueDestReceiver *tqueue, Datum value);
static void tqueueWalkArray(TQueueDestReceiver *tqueue, Datum value);
static void tqueueWalkRange(TQueueDestReceiver *tqueue, Datum value);
static void tqueueSendTypmodInfo(TQueueDestReceiver *tqueue, int typmod,
TupleDesc tupledesc);
static void TupleQueueHandleControlMessage(TupleQueueReader *reader,
Size nbytes, char *data);
static HeapTuple TupleQueueHandleDataMessage(TupleQueueReader *reader,
Size nbytes, HeapTupleHeader data);
static HeapTuple TupleQueueRemapTuple(TupleQueueReader *reader,
TupleDesc tupledesc, RemapInfo * remapinfo,
TupleDesc tupledesc, RemapInfo *remapinfo,
HeapTuple tuple);
static Datum TupleQueueRemap(TupleQueueReader *reader, RemapClass remapclass,
Datum value);
@ -212,7 +212,7 @@ tqueueReceiveSlot(TupleTableSlot *slot, DestReceiver *self)
* Invoke the appropriate walker function based on the given RemapClass.
*/
static void
tqueueWalk(TQueueDestReceiver * tqueue, RemapClass walktype, Datum value)
tqueueWalk(TQueueDestReceiver *tqueue, RemapClass walktype, Datum value)
{
check_stack_depth();
@ -237,7 +237,7 @@ tqueueWalk(TQueueDestReceiver * tqueue, RemapClass walktype, Datum value)
* contained therein.
*/
static void
tqueueWalkRecord(TQueueDestReceiver * tqueue, Datum value)
tqueueWalkRecord(TQueueDestReceiver *tqueue, Datum value)
{
HeapTupleHeader tup;
Oid typeid;
@ -304,7 +304,7 @@ tqueueWalkRecord(TQueueDestReceiver * tqueue, Datum value)
* contained therein.
*/
static void
tqueueWalkArray(TQueueDestReceiver * tqueue, Datum value)
tqueueWalkArray(TQueueDestReceiver *tqueue, Datum value)
{
ArrayType *arr = DatumGetArrayTypeP(value);
Oid typeid = ARR_ELEMTYPE(arr);
@ -342,7 +342,7 @@ tqueueWalkArray(TQueueDestReceiver * tqueue, Datum value)
* contained therein.
*/
static void
tqueueWalkRange(TQueueDestReceiver * tqueue, Datum value)
tqueueWalkRange(TQueueDestReceiver *tqueue, Datum value)
{
RangeType *range = DatumGetRangeType(value);
Oid typeid = RangeTypeGetOid(range);
@ -386,7 +386,7 @@ tqueueWalkRange(TQueueDestReceiver * tqueue, Datum value)
* already done so previously.
*/
static void
tqueueSendTypmodInfo(TQueueDestReceiver * tqueue, int typmod,
tqueueSendTypmodInfo(TQueueDestReceiver *tqueue, int typmod,
TupleDesc tupledesc)
{
StringInfoData buf;
@ -613,7 +613,7 @@ TupleQueueHandleDataMessage(TupleQueueReader *reader,
*/
static HeapTuple
TupleQueueRemapTuple(TupleQueueReader *reader, TupleDesc tupledesc,
RemapInfo * remapinfo, HeapTuple tuple)
RemapInfo *remapinfo, HeapTuple tuple)
{
Datum *values;
bool *isnull;

View File

@ -1875,7 +1875,7 @@ CheckPAMAuth(Port *port, char *user, char *password)
retval = pg_getnameinfo_all(&port->raddr.addr, port->raddr.salen,
hostinfo, sizeof(hostinfo), NULL, 0,
port->hba->pam_use_hostname ? 0 : NI_NUMERICHOST | NI_NUMERICSERV);
port->hba->pam_use_hostname ? 0 : NI_NUMERICHOST | NI_NUMERICSERV);
if (retval != 0)
{
ereport(WARNING,
@ -1934,7 +1934,7 @@ CheckPAMAuth(Port *port, char *user, char *password)
{
ereport(LOG,
(errmsg("pam_set_item(PAM_RHOST) failed: %s",
pam_strerror(pamh, retval))));
pam_strerror(pamh, retval))));
pam_passwd = NULL;
return STATUS_ERROR;
}
@ -1996,8 +1996,8 @@ CheckPAMAuth(Port *port, char *user, char *password)
static int
CheckBSDAuth(Port *port, char *user)
{
char *passwd;
int retval;
char *passwd;
int retval;
/* Send regular password request to client, and get the response */
sendAuthRequest(port, AUTH_REQ_PASSWORD);
@ -2539,11 +2539,10 @@ CheckRADIUSAuth(Port *port)
radius_add_attribute(packet, RADIUS_NAS_IDENTIFIER, (unsigned char *) identifier, strlen(identifier));
/*
* RADIUS password attributes are calculated as:
* e[0] = p[0] XOR MD5(secret + Request Authenticator)
* for the first group of 16 octets, and then:
* e[i] = p[i] XOR MD5(secret + e[i-1])
* for the following ones (if necessary)
* RADIUS password attributes are calculated as: e[0] = p[0] XOR
* MD5(secret + Request Authenticator) for the first group of 16 octets,
* and then: e[i] = p[i] XOR MD5(secret + e[i-1]) for the following ones
* (if necessary)
*/
encryptedpasswordlen = ((strlen(passwd) + RADIUS_VECTOR_LENGTH - 1) / RADIUS_VECTOR_LENGTH) * RADIUS_VECTOR_LENGTH;
cryptvector = palloc(strlen(port->hba->radiussecret) + RADIUS_VECTOR_LENGTH);
@ -2554,7 +2553,11 @@ CheckRADIUSAuth(Port *port)
for (i = 0; i < encryptedpasswordlen; i += RADIUS_VECTOR_LENGTH)
{
memcpy(cryptvector + strlen(port->hba->radiussecret), md5trailer, RADIUS_VECTOR_LENGTH);
/* .. and for subsequent iterations the result of the previous XOR (calculated below) */
/*
* .. and for subsequent iterations the result of the previous XOR
* (calculated below)
*/
md5trailer = encryptedpassword + i;
if (!pg_md5_binary(cryptvector, strlen(port->hba->radiussecret) + RADIUS_VECTOR_LENGTH, encryptedpassword + i))
@ -2565,7 +2568,7 @@ CheckRADIUSAuth(Port *port)
return STATUS_ERROR;
}
for (j = i; j < i+RADIUS_VECTOR_LENGTH; j++)
for (j = i; j < i + RADIUS_VECTOR_LENGTH; j++)
{
if (j < strlen(passwd))
encryptedpassword[j] = passwd[j] ^ encryptedpassword[j];

View File

@ -241,8 +241,8 @@ be_tls_init(void)
(buf.st_uid == 0 && buf.st_mode & (S_IWGRP | S_IXGRP | S_IRWXO)))
ereport(FATAL,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
errmsg("private key file \"%s\" has group or world access",
ssl_key_file),
errmsg("private key file \"%s\" has group or world access",
ssl_key_file),
errdetail("File must have permissions u=rw (0600) or less if owned by the database user, or permissions u=rw,g=r (0640) or less if owned by root.")));
#endif
@ -316,7 +316,7 @@ be_tls_init(void)
else
ereport(FATAL,
(errmsg("could not load SSL certificate revocation list file \"%s\": %s",
ssl_crl_file, SSLerrmessage(ERR_get_error()))));
ssl_crl_file, SSLerrmessage(ERR_get_error()))));
}
}
@ -377,11 +377,12 @@ be_tls_open_server(Port *port)
port->ssl_in_use = true;
aloop:
/*
* Prepare to call SSL_get_error() by clearing thread's OpenSSL error
* queue. In general, the current thread's error queue must be empty
* before the TLS/SSL I/O operation is attempted, or SSL_get_error()
* will not work reliably. An extension may have failed to clear the
* before the TLS/SSL I/O operation is attempted, or SSL_get_error() will
* not work reliably. An extension may have failed to clear the
* per-thread error queue following another call to an OpenSSL I/O
* routine.
*/
@ -393,12 +394,11 @@ aloop:
/*
* Other clients of OpenSSL in the backend may fail to call
* ERR_get_error(), but we always do, so as to not cause problems
* for OpenSSL clients that don't call ERR_clear_error()
* defensively. Be sure that this happens by calling now.
* SSL_get_error() relies on the OpenSSL per-thread error queue
* being intact, so this is the earliest possible point
* ERR_get_error() may be called.
* ERR_get_error(), but we always do, so as to not cause problems for
* OpenSSL clients that don't call ERR_clear_error() defensively. Be
* sure that this happens by calling now. SSL_get_error() relies on
* the OpenSSL per-thread error queue being intact, so this is the
* earliest possible point ERR_get_error() may be called.
*/
ecode = ERR_get_error();
switch (err)

View File

@ -140,26 +140,26 @@ retry:
/* In blocking mode, wait until the socket is ready */
if (n < 0 && !port->noblock && (errno == EWOULDBLOCK || errno == EAGAIN))
{
WaitEvent event;
WaitEvent event;
Assert(waitfor);
ModifyWaitEvent(FeBeWaitSet, 0, waitfor, NULL);
WaitEventSetWait(FeBeWaitSet, -1 /* no timeout */, &event, 1);
WaitEventSetWait(FeBeWaitSet, -1 /* no timeout */ , &event, 1);
/*
* If the postmaster has died, it's not safe to continue running,
* because it is the postmaster's job to kill us if some other backend
* exists uncleanly. Moreover, we won't run very well in this state;
* helper processes like walwriter and the bgwriter will exit, so
* performance may be poor. Finally, if we don't exit, pg_ctl will
* be unable to restart the postmaster without manual intervention,
* so no new connections can be accepted. Exiting clears the deck
* for a postmaster restart.
* performance may be poor. Finally, if we don't exit, pg_ctl will be
* unable to restart the postmaster without manual intervention, so no
* new connections can be accepted. Exiting clears the deck for a
* postmaster restart.
*
* (Note that we only make this check when we would otherwise sleep
* on our latch. We might still continue running for a while if the
* (Note that we only make this check when we would otherwise sleep on
* our latch. We might still continue running for a while if the
* postmaster is killed in mid-query, or even through multiple queries
* if we never have to wait for read. We don't want to burn too many
* cycles checking for this very rare condition, and this should cause
@ -168,7 +168,7 @@ retry:
if (event.events & WL_POSTMASTER_DEATH)
ereport(FATAL,
(errcode(ERRCODE_ADMIN_SHUTDOWN),
errmsg("terminating connection due to unexpected postmaster exit")));
errmsg("terminating connection due to unexpected postmaster exit")));
/* Handle interrupt. */
if (event.events & WL_LATCH_SET)
@ -241,19 +241,19 @@ retry:
if (n < 0 && !port->noblock && (errno == EWOULDBLOCK || errno == EAGAIN))
{
WaitEvent event;
WaitEvent event;
Assert(waitfor);
ModifyWaitEvent(FeBeWaitSet, 0, waitfor, NULL);
WaitEventSetWait(FeBeWaitSet, -1 /* no timeout */, &event, 1);
WaitEventSetWait(FeBeWaitSet, -1 /* no timeout */ , &event, 1);
/* See comments in secure_read. */
if (event.events & WL_POSTMASTER_DEATH)
ereport(FATAL,
(errcode(ERRCODE_ADMIN_SHUTDOWN),
errmsg("terminating connection due to unexpected postmaster exit")));
errmsg("terminating connection due to unexpected postmaster exit")));
/* Handle interrupt. */
if (event.events & WL_LATCH_SET)

View File

@ -1174,7 +1174,7 @@ pq_startmsgread(void)
if (PqCommReadingMsg)
ereport(FATAL,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
errmsg("terminating connection because protocol synchronization was lost")));
errmsg("terminating connection because protocol synchronization was lost")));
PqCommReadingMsg = true;
}

View File

@ -143,9 +143,9 @@ mq_putmessage(char msgtype, const char *s, size_t len)
/*
* If the message queue is already gone, just ignore the message. This
* doesn't necessarily indicate a problem; for example, DEBUG messages
* can be generated late in the shutdown sequence, after all DSMs have
* already been detached.
* doesn't necessarily indicate a problem; for example, DEBUG messages can
* be generated late in the shutdown sequence, after all DSMs have already
* been detached.
*/
if (pq_mq == NULL)
return 0;

View File

@ -270,19 +270,22 @@ startup_hacks(const char *progname)
SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX);
#if defined(_M_AMD64) && _MSC_VER == 1800
/*
* Avoid crashing in certain floating-point operations if
* we were compiled for x64 with MS Visual Studio 2013 and
* are running on Windows prior to 7/2008R2 SP1 on an
* AVX2-capable CPU.
* Avoid crashing in certain floating-point operations if we were
* compiled for x64 with MS Visual Studio 2013 and are running on
* Windows prior to 7/2008R2 SP1 on an AVX2-capable CPU.
*
* Ref: https://connect.microsoft.com/VisualStudio/feedback/details/811093/visual-studio-2013-rtm-c-x64-code-generation-bug-for-avx2-instructions
* Ref:
* https://connect.microsoft.com/VisualStudio/feedback/details/811093/v
* isual-studio-2013-rtm-c-x64-code-generation-bug-for-avx2-instruction
* s
*/
if (!IsWindows7SP1OrGreater())
{
_set_FMA3_enable(0);
}
#endif /* defined(_M_AMD64) && _MSC_VER == 1800 */
#endif /* defined(_M_AMD64) && _MSC_VER == 1800 */
}
#endif /* WIN32 */

View File

@ -3499,7 +3499,7 @@ planstate_tree_walker(PlanState *planstate,
return true;
break;
case T_CustomScan:
foreach (lc, ((CustomScanState *) planstate)->custom_ps)
foreach(lc, ((CustomScanState *) planstate)->custom_ps)
{
if (walker((PlanState *) lfirst(lc), context))
return true;

View File

@ -94,8 +94,8 @@ copyParamList(ParamListInfo from)
Size
EstimateParamListSpace(ParamListInfo paramLI)
{
int i;
Size sz = sizeof(int);
int i;
Size sz = sizeof(int);
if (paramLI == NULL || paramLI->numParams <= 0)
return sz;
@ -119,7 +119,7 @@ EstimateParamListSpace(ParamListInfo paramLI)
typeOid = prm->ptype;
}
sz = add_size(sz, sizeof(Oid)); /* space for type OID */
sz = add_size(sz, sizeof(Oid)); /* space for type OID */
sz = add_size(sz, sizeof(uint16)); /* space for pflags */
/* space for datum/isnull */
@ -132,7 +132,7 @@ EstimateParamListSpace(ParamListInfo paramLI)
typByVal = true;
}
sz = add_size(sz,
datumEstimateSpace(prm->value, prm->isnull, typByVal, typLen));
datumEstimateSpace(prm->value, prm->isnull, typByVal, typLen));
}
return sz;

View File

@ -1836,8 +1836,8 @@ _readCustomScan(void)
READ_BITMAPSET_FIELD(custom_relids);
/* Lookup CustomScanMethods by CustomName */
token = pg_strtok(&length); /* skip methods: */
token = pg_strtok(&length); /* CustomName */
token = pg_strtok(&length); /* skip methods: */
token = pg_strtok(&length); /* CustomName */
custom_name = nullable_string(token, length);
methods = GetCustomScanMethods(custom_name, false);
local_node->methods = methods;
@ -2227,11 +2227,12 @@ _readExtensibleNode(void)
{
const ExtensibleNodeMethods *methods;
ExtensibleNode *local_node;
const char *extnodename;
const char *extnodename;
READ_TEMP_LOCALS();
token = pg_strtok(&length); /* skip: extnodename */
token = pg_strtok(&length); /* get extnodename */
token = pg_strtok(&length); /* skip: extnodename */
token = pg_strtok(&length); /* get extnodename */
extnodename = nullable_string(token, length);
if (!extnodename)

View File

@ -163,8 +163,8 @@ make_one_rel(PlannerInfo *root, List *joinlist)
set_base_rel_consider_startup(root);
/*
* Generate access paths for the base rels. set_base_rel_sizes also
* sets the consider_parallel flag for each baserel, if appropriate.
* Generate access paths for the base rels. set_base_rel_sizes also sets
* the consider_parallel flag for each baserel, if appropriate.
*/
set_base_rel_sizes(root);
set_base_rel_pathlists(root);
@ -228,7 +228,7 @@ set_base_rel_consider_startup(PlannerInfo *root)
/*
* set_base_rel_sizes
* Set the size estimates (rows and widths) for each base-relation entry.
* Also determine whether to consider parallel paths for base relations.
* Also determine whether to consider parallel paths for base relations.
*
* We do this in a separate pass over the base rels so that rowcount
* estimates are available for parameterized path generation, and also so
@ -509,6 +509,7 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
switch (rte->rtekind)
{
case RTE_RELATION:
/*
* Currently, parallel workers can't access the leader's temporary
* tables. We could possibly relax this if the wrote all of its
@ -528,7 +529,7 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
*/
if (rte->tablesample != NULL)
{
Oid proparallel = func_parallel(rte->tablesample->tsmhandler);
Oid proparallel = func_parallel(rte->tablesample->tsmhandler);
if (proparallel != PROPARALLEL_SAFE)
return;
@ -557,14 +558,15 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
break;
case RTE_SUBQUERY:
/*
* Subplans currently aren't passed to workers. Even if they
* were, the subplan might be using parallelism internally, and
* we can't support nested Gather nodes at present. Finally,
* we don't have a good way of knowing whether the subplan
* involves any parallel-restricted operations. It would be
* nice to relax this restriction some day, but it's going to
* take a fair amount of work.
* were, the subplan might be using parallelism internally, and we
* can't support nested Gather nodes at present. Finally, we
* don't have a good way of knowing whether the subplan involves
* any parallel-restricted operations. It would be nice to relax
* this restriction some day, but it's going to take a fair amount
* of work.
*/
return;
@ -580,6 +582,7 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
break;
case RTE_VALUES:
/*
* The data for a VALUES clause is stored in the plan tree itself,
* so scanning it in a worker is fine.
@ -587,6 +590,7 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
break;
case RTE_CTE:
/*
* CTE tuplestores aren't shared among parallel workers, so we
* force all CTE scans to happen in the leader. Also, populating
@ -598,8 +602,8 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
}
/*
* If there's anything in baserestrictinfo that's parallel-restricted,
* we give up on parallelizing access to this relation. We could consider
* If there's anything in baserestrictinfo that's parallel-restricted, we
* give up on parallelizing access to this relation. We could consider
* instead postponing application of the restricted quals until we're
* above all the parallelism in the plan tree, but it's not clear that
* this would be a win in very many cases, and it might be tricky to make
@ -609,8 +613,8 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
return;
/*
* If the relation's outputs are not parallel-safe, we must give up.
* In the common case where the relation only outputs Vars, this check is
* If the relation's outputs are not parallel-safe, we must give up. In
* the common case where the relation only outputs Vars, this check is
* very cheap; otherwise, we have to do more work.
*/
if (rel->reltarget_has_non_vars &&
@ -1251,8 +1255,8 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
int parallel_workers = 0;
/*
* Decide on the numebr of workers to request for this append path. For
* now, we just use the maximum value from among the members. It
* Decide on the numebr of workers to request for this append path.
* For now, we just use the maximum value from among the members. It
* might be useful to use a higher number if the Append node were
* smart enough to spread out the workers, but it currently isn't.
*/
@ -2160,8 +2164,8 @@ standard_join_search(PlannerInfo *root, int levels_needed, List *initial_rels)
* Run generate_gather_paths() for each just-processed joinrel. We
* could not do this earlier because both regular and partial paths
* can get added to a particular joinrel at multiple times within
* join_search_one_level. After that, we're done creating paths
* for the joinrel, so run set_cheapest().
* join_search_one_level. After that, we're done creating paths for
* the joinrel, so run set_cheapest().
*/
foreach(lc, root->join_rel_level[lev])
{

View File

@ -1428,15 +1428,14 @@ create_projection_plan(PlannerInfo *root, ProjectionPath *best_path)
* We might not really need a Result node here. There are several ways
* that this can happen. For example, MergeAppend doesn't project, so we
* would have thought that we needed a projection to attach resjunk sort
* columns to its output ... but create_merge_append_plan might have
* added those same resjunk sort columns to both MergeAppend and its
* children. Alternatively, apply_projection_to_path might have created
* a projection path as the subpath of a Gather node even though the
* subpath was projection-capable. So, if the subpath is capable of
* projection or the desired tlist is the same expression-wise as the
* subplan's, just jam it in there. We'll have charged for a Result that
* doesn't actually appear in the plan, but that's better than having a
* Result we don't need.
* columns to its output ... but create_merge_append_plan might have added
* those same resjunk sort columns to both MergeAppend and its children.
* Alternatively, apply_projection_to_path might have created a projection
* path as the subpath of a Gather node even though the subpath was
* projection-capable. So, if the subpath is capable of projection or the
* desired tlist is the same expression-wise as the subplan's, just jam it
* in there. We'll have charged for a Result that doesn't actually appear
* in the plan, but that's better than having a Result we don't need.
*/
if (is_projection_capable_path(best_path->subpath) ||
tlist_same_exprs(tlist, subplan->targetlist))
@ -3248,8 +3247,8 @@ create_foreignscan_plan(PlannerInfo *root, ForeignPath *best_path,
/*
* If a join between foreign relations was pushed down, remember it. The
* push-down safety of the join depends upon the server and user mapping
* being same. That can change between planning and execution time, in which
* case the plan should be invalidated.
* being same. That can change between planning and execution time, in
* which case the plan should be invalidated.
*/
if (scan_relid == 0)
root->glob->hasForeignJoin = true;
@ -3257,8 +3256,8 @@ create_foreignscan_plan(PlannerInfo *root, ForeignPath *best_path,
/*
* Replace any outer-relation variables with nestloop params in the qual,
* fdw_exprs and fdw_recheck_quals expressions. We do this last so that
* the FDW doesn't have to be involved. (Note that parts of fdw_exprs
* or fdw_recheck_quals could have come from join clauses, so doing this
* the FDW doesn't have to be involved. (Note that parts of fdw_exprs or
* fdw_recheck_quals could have come from join clauses, so doing this
* beforehand on the scan_clauses wouldn't work.) We assume
* fdw_scan_tlist contains no such variables.
*/
@ -3279,8 +3278,8 @@ create_foreignscan_plan(PlannerInfo *root, ForeignPath *best_path,
* 0, but there can be no Var with relid 0 in the rel's targetlist or the
* restriction clauses, so we skip this in that case. Note that any such
* columns in base relations that were joined are assumed to be contained
* in fdw_scan_tlist.) This is a bit of a kluge and might go away someday,
* so we intentionally leave it out of the API presented to FDWs.
* in fdw_scan_tlist.) This is a bit of a kluge and might go away
* someday, so we intentionally leave it out of the API presented to FDWs.
*/
scan_plan->fsSystemCol = false;
if (scan_relid > 0)
@ -5899,7 +5898,7 @@ make_gather(List *qptlist,
plan->righttree = NULL;
node->num_workers = nworkers;
node->single_copy = single_copy;
node->invisible = false;
node->invisible = false;
return node;
}

View File

@ -108,10 +108,10 @@ static double get_number_of_groups(PlannerInfo *root,
List *rollup_lists,
List *rollup_groupclauses);
static void set_grouped_rel_consider_parallel(PlannerInfo *root,
RelOptInfo *grouped_rel,
PathTarget *target);
RelOptInfo *grouped_rel,
PathTarget *target);
static Size estimate_hashagg_tablesize(Path *path, AggClauseCosts *agg_costs,
double dNumGroups);
double dNumGroups);
static RelOptInfo *create_grouping_paths(PlannerInfo *root,
RelOptInfo *input_rel,
PathTarget *target,
@ -141,7 +141,7 @@ static RelOptInfo *create_ordered_paths(PlannerInfo *root,
static PathTarget *make_group_input_target(PlannerInfo *root,
PathTarget *final_target);
static PathTarget *make_partialgroup_input_target(PlannerInfo *root,
PathTarget *final_target);
PathTarget *final_target);
static List *postprocess_setop_tlist(List *new_tlist, List *orig_tlist);
static List *select_active_windows(PlannerInfo *root, WindowFuncLists *wflists);
static PathTarget *make_window_input_target(PlannerInfo *root,
@ -1777,8 +1777,8 @@ grouping_planner(PlannerInfo *root, bool inheritance_update,
* findable from the PlannerInfo struct; anything else the FDW wants
* to know should be obtainable via "root".
*
* Note: CustomScan providers, as well as FDWs that don't want to
* use this hook, can use the create_upper_paths_hook; see below.
* Note: CustomScan providers, as well as FDWs that don't want to use
* this hook, can use the create_upper_paths_hook; see below.
*/
if (current_rel->fdwroutine &&
current_rel->fdwroutine->GetForeignUpperPaths)
@ -3196,8 +3196,8 @@ set_grouped_rel_consider_parallel(PlannerInfo *root, RelOptInfo *grouped_rel,
/*
* All that's left to check now is to make sure all aggregate functions
* support partial mode. If there's no aggregates then we can skip checking
* that.
* support partial mode. If there's no aggregates then we can skip
* checking that.
*/
if (!parse->hasAggs)
grouped_rel->consider_parallel = true;
@ -3370,9 +3370,10 @@ create_grouping_paths(PlannerInfo *root,
/*
* Determine whether it's possible to perform sort-based implementations
* of grouping. (Note that if groupClause is empty, grouping_is_sortable()
* is trivially true, and all the pathkeys_contained_in() tests will
* succeed too, so that we'll consider every surviving input path.)
* of grouping. (Note that if groupClause is empty,
* grouping_is_sortable() is trivially true, and all the
* pathkeys_contained_in() tests will succeed too, so that we'll consider
* every surviving input path.)
*/
can_sort = grouping_is_sortable(parse->groupClause);
@ -3408,7 +3409,7 @@ create_grouping_paths(PlannerInfo *root,
*/
if (grouped_rel->consider_parallel)
{
Path *cheapest_partial_path = linitial(input_rel->partial_pathlist);
Path *cheapest_partial_path = linitial(input_rel->partial_pathlist);
/*
* Build target list for partial aggregate paths. We cannot reuse the
@ -3471,27 +3472,27 @@ create_grouping_paths(PlannerInfo *root,
if (parse->hasAggs)
add_partial_path(grouped_rel, (Path *)
create_agg_path(root,
grouped_rel,
path,
partial_grouping_target,
parse->groupClause ? AGG_SORTED : AGG_PLAIN,
parse->groupClause,
NIL,
&agg_partial_costs,
dNumPartialGroups,
false,
false,
true));
create_agg_path(root,
grouped_rel,
path,
partial_grouping_target,
parse->groupClause ? AGG_SORTED : AGG_PLAIN,
parse->groupClause,
NIL,
&agg_partial_costs,
dNumPartialGroups,
false,
false,
true));
else
add_partial_path(grouped_rel, (Path *)
create_group_path(root,
grouped_rel,
path,
partial_grouping_target,
parse->groupClause,
NIL,
dNumPartialGroups));
create_group_path(root,
grouped_rel,
path,
partial_grouping_target,
parse->groupClause,
NIL,
dNumPartialGroups));
}
}
}
@ -3513,18 +3514,18 @@ create_grouping_paths(PlannerInfo *root,
if (hashaggtablesize < work_mem * 1024L)
{
add_partial_path(grouped_rel, (Path *)
create_agg_path(root,
grouped_rel,
cheapest_partial_path,
partial_grouping_target,
AGG_HASHED,
parse->groupClause,
NIL,
&agg_partial_costs,
dNumPartialGroups,
false,
false,
true));
create_agg_path(root,
grouped_rel,
cheapest_partial_path,
partial_grouping_target,
AGG_HASHED,
parse->groupClause,
NIL,
&agg_partial_costs,
dNumPartialGroups,
false,
false,
true));
}
}
}
@ -3616,13 +3617,13 @@ create_grouping_paths(PlannerInfo *root,
/*
* Now generate a complete GroupAgg Path atop of the cheapest partial
* path. We need only bother with the cheapest path here, as the output
* of Gather is never sorted.
* path. We need only bother with the cheapest path here, as the
* output of Gather is never sorted.
*/
if (grouped_rel->partial_pathlist)
{
Path *path = (Path *) linitial(grouped_rel->partial_pathlist);
double total_groups = path->rows * path->parallel_workers;
Path *path = (Path *) linitial(grouped_rel->partial_pathlist);
double total_groups = path->rows * path->parallel_workers;
path = (Path *) create_gather_path(root,
grouped_rel,
@ -3632,9 +3633,9 @@ create_grouping_paths(PlannerInfo *root,
&total_groups);
/*
* Gather is always unsorted, so we'll need to sort, unless there's
* no GROUP BY clause, in which case there will only be a single
* group.
* Gather is always unsorted, so we'll need to sort, unless
* there's no GROUP BY clause, in which case there will only be a
* single group.
*/
if (parse->groupClause)
path = (Path *) create_sort_path(root,
@ -3645,27 +3646,27 @@ create_grouping_paths(PlannerInfo *root,
if (parse->hasAggs)
add_path(grouped_rel, (Path *)
create_agg_path(root,
grouped_rel,
path,
target,
parse->groupClause ? AGG_SORTED : AGG_PLAIN,
parse->groupClause,
(List *) parse->havingQual,
&agg_final_costs,
dNumGroups,
true,
true,
true));
create_agg_path(root,
grouped_rel,
path,
target,
parse->groupClause ? AGG_SORTED : AGG_PLAIN,
parse->groupClause,
(List *) parse->havingQual,
&agg_final_costs,
dNumGroups,
true,
true,
true));
else
add_path(grouped_rel, (Path *)
create_group_path(root,
grouped_rel,
path,
target,
parse->groupClause,
(List *) parse->havingQual,
dNumGroups));
create_group_path(root,
grouped_rel,
path,
target,
parse->groupClause,
(List *) parse->havingQual,
dNumGroups));
}
}
@ -3678,15 +3679,15 @@ create_grouping_paths(PlannerInfo *root,
/*
* Provided that the estimated size of the hashtable does not exceed
* work_mem, we'll generate a HashAgg Path, although if we were unable
* to sort above, then we'd better generate a Path, so that we at least
* have one.
* to sort above, then we'd better generate a Path, so that we at
* least have one.
*/
if (hashaggtablesize < work_mem * 1024L ||
grouped_rel->pathlist == NIL)
{
/*
* We just need an Agg over the cheapest-total input path, since input
* order won't matter.
* We just need an Agg over the cheapest-total input path, since
* input order won't matter.
*/
add_path(grouped_rel, (Path *)
create_agg_path(root, grouped_rel,
@ -3704,12 +3705,12 @@ create_grouping_paths(PlannerInfo *root,
/*
* Generate a HashAgg Path atop of the cheapest partial path. Once
* again, we'll only do this if it looks as though the hash table won't
* exceed work_mem.
* again, we'll only do this if it looks as though the hash table
* won't exceed work_mem.
*/
if (grouped_rel->partial_pathlist)
{
Path *path = (Path *) linitial(grouped_rel->partial_pathlist);
Path *path = (Path *) linitial(grouped_rel->partial_pathlist);
hashaggtablesize = estimate_hashagg_tablesize(path,
&agg_final_costs,
@ -3717,7 +3718,7 @@ create_grouping_paths(PlannerInfo *root,
if (hashaggtablesize < work_mem * 1024L)
{
double total_groups = path->rows * path->parallel_workers;
double total_groups = path->rows * path->parallel_workers;
path = (Path *) create_gather_path(root,
grouped_rel,
@ -3727,18 +3728,18 @@ create_grouping_paths(PlannerInfo *root,
&total_groups);
add_path(grouped_rel, (Path *)
create_agg_path(root,
grouped_rel,
path,
target,
AGG_HASHED,
parse->groupClause,
(List *) parse->havingQual,
&agg_final_costs,
dNumGroups,
true,
true,
true));
create_agg_path(root,
grouped_rel,
path,
target,
AGG_HASHED,
parse->groupClause,
(List *) parse->havingQual,
&agg_final_costs,
dNumGroups,
true,
true,
true));
}
}
}

View File

@ -2100,6 +2100,7 @@ search_indexed_tlist_for_partial_aggref(Aggref *aggref, indexed_tlist *itlist,
continue;
if (aggref->aggvariadic != tlistaggref->aggvariadic)
continue;
/*
* it would be harmless to compare aggcombine and aggpartial, but
* it's also unnecessary

View File

@ -101,7 +101,7 @@ typedef struct
} has_parallel_hazard_arg;
static bool aggregates_allow_partial_walker(Node *node,
partial_agg_context *context);
partial_agg_context *context);
static bool contain_agg_clause_walker(Node *node, void *context);
static bool count_agg_clauses_walker(Node *node,
count_agg_clauses_context *context);
@ -112,9 +112,9 @@ static bool contain_mutable_functions_walker(Node *node, void *context);
static bool contain_volatile_functions_walker(Node *node, void *context);
static bool contain_volatile_functions_not_nextval_walker(Node *node, void *context);
static bool has_parallel_hazard_walker(Node *node,
has_parallel_hazard_arg *context);
has_parallel_hazard_arg *context);
static bool parallel_too_dangerous(char proparallel,
has_parallel_hazard_arg *context);
has_parallel_hazard_arg *context);
static bool typeid_is_temp(Oid typeid);
static bool contain_nonstrict_functions_walker(Node *node, void *context);
static bool contain_leaked_vars_walker(Node *node, void *context);
@ -446,7 +446,7 @@ aggregates_allow_partial_walker(Node *node, partial_agg_context *context)
if (aggref->aggdistinct || aggref->aggorder)
{
context->allowedtype = PAT_DISABLED;
return true; /* abort search */
return true; /* abort search */
}
aggTuple = SearchSysCache1(AGGFNOID,
ObjectIdGetDatum(aggref->aggfnoid));
@ -463,7 +463,7 @@ aggregates_allow_partial_walker(Node *node, partial_agg_context *context)
{
ReleaseSysCache(aggTuple);
context->allowedtype = PAT_DISABLED;
return true; /* abort search */
return true; /* abort search */
}
/*
@ -479,7 +479,7 @@ aggregates_allow_partial_walker(Node *node, partial_agg_context *context)
context->allowedtype = PAT_INTERNAL_ONLY;
ReleaseSysCache(aggTuple);
return false; /* continue searching */
return false; /* continue searching */
}
return expression_tree_walker(node, aggregates_allow_partial_walker,
(void *) context);
@ -1354,7 +1354,7 @@ contain_volatile_functions_not_nextval_walker(Node *node, void *context)
bool
has_parallel_hazard(Node *node, bool allow_restricted)
{
has_parallel_hazard_arg context;
has_parallel_hazard_arg context;
context.allow_restricted = allow_restricted;
return has_parallel_hazard_walker(node, &context);
@ -1371,16 +1371,16 @@ has_parallel_hazard_walker(Node *node, has_parallel_hazard_arg *context)
* recurse through Query objects to as to locate parallel-unsafe
* constructs anywhere in the tree.
*
* Later, we'll be called again for specific quals, possibly after
* some planning has been done, we may encounter SubPlan, SubLink,
* or AlternativeSubLink nodes. Currently, there's no need to recurse
* through these; they can't be unsafe, since we've already cleared
* the entire query of unsafe operations, and they're definitely
* Later, we'll be called again for specific quals, possibly after some
* planning has been done, we may encounter SubPlan, SubLink, or
* AlternativeSubLink nodes. Currently, there's no need to recurse
* through these; they can't be unsafe, since we've already cleared the
* entire query of unsafe operations, and they're definitely
* parallel-restricted.
*/
if (IsA(node, Query))
{
Query *query = (Query *) node;
Query *query = (Query *) node;
if (query->rowMarks != NULL)
return true;
@ -1390,12 +1390,12 @@ has_parallel_hazard_walker(Node *node, has_parallel_hazard_arg *context)
has_parallel_hazard_walker,
context, 0);
}
else if (IsA(node, SubPlan) || IsA(node, SubLink) ||
IsA(node, AlternativeSubPlan) || IsA(node, Param))
else if (IsA(node, SubPlan) ||IsA(node, SubLink) ||
IsA(node, AlternativeSubPlan) ||IsA(node, Param))
{
/*
* Since we don't have the ability to push subplans down to workers
* at present, we treat subplan references as parallel-restricted.
* Since we don't have the ability to push subplans down to workers at
* present, we treat subplan references as parallel-restricted.
*/
if (!context->allow_restricted)
return true;
@ -1405,12 +1405,14 @@ has_parallel_hazard_walker(Node *node, has_parallel_hazard_arg *context)
if (IsA(node, RestrictInfo))
{
RestrictInfo *rinfo = (RestrictInfo *) node;
return has_parallel_hazard_walker((Node *) rinfo->clause, context);
}
/*
* It is an error for a parallel worker to touch a temporary table in any
* way, so we can't handle nodes whose type is the rowtype of such a table.
* way, so we can't handle nodes whose type is the rowtype of such a
* table.
*/
if (!context->allow_restricted)
{
@ -1534,7 +1536,8 @@ has_parallel_hazard_walker(Node *node, has_parallel_hazard_arg *context)
foreach(opid, rcexpr->opnos)
{
Oid opfuncid = get_opcode(lfirst_oid(opid));
Oid opfuncid = get_opcode(lfirst_oid(opid));
if (parallel_too_dangerous(func_parallel(opfuncid), context))
return true;
}
@ -1558,7 +1561,7 @@ parallel_too_dangerous(char proparallel, has_parallel_hazard_arg *context)
static bool
typeid_is_temp(Oid typeid)
{
Oid relid = get_typ_typrelid(typeid);
Oid relid = get_typ_typrelid(typeid);
if (!OidIsValid(relid))
return false;
@ -1870,8 +1873,8 @@ contain_leaked_vars_walker(Node *node, void *context)
/*
* WHERE CURRENT OF doesn't contain function calls. Moreover, it
* is important that this can be pushed down into a
* security_barrier view, since the planner must always generate
* a TID scan when CURRENT OF is present -- c.f. cost_tidscan.
* security_barrier view, since the planner must always generate a
* TID scan when CURRENT OF is present -- c.f. cost_tidscan.
*/
return false;

View File

@ -709,7 +709,7 @@ infer_collation_opclass_match(InferenceElem *elem, Relation idxRel,
AttrNumber natt;
Oid inferopfamily = InvalidOid; /* OID of opclass opfamily */
Oid inferopcinputtype = InvalidOid; /* OID of opclass input type */
int nplain = 0; /* # plain attrs observed */
int nplain = 0; /* # plain attrs observed */
/*
* If inference specification element lacks collation/opclass, then no

View File

@ -107,7 +107,7 @@ build_simple_rel(PlannerInfo *root, int relid, RelOptKind reloptkind)
rel->consider_startup = (root->tuple_fraction > 0);
rel->consider_param_startup = false; /* might get changed later */
rel->consider_parallel = false; /* might get changed later */
rel->rel_parallel_workers = -1; /* set up in GetRelationInfo */
rel->rel_parallel_workers = -1; /* set up in GetRelationInfo */
rel->reltarget = create_empty_pathtarget();
rel->reltarget_has_non_vars = false;
rel->pathlist = NIL;

View File

@ -776,11 +776,11 @@ apply_pathtarget_labeling_to_tlist(List *tlist, PathTarget *target)
void
apply_partialaggref_adjustment(PathTarget *target)
{
ListCell *lc;
ListCell *lc;
foreach(lc, target->exprs)
{
Aggref *aggref = (Aggref *) lfirst(lc);
Aggref *aggref = (Aggref *) lfirst(lc);
if (IsA(aggref, Aggref))
{

View File

@ -3083,8 +3083,8 @@ errorMissingColumn(ParseState *pstate,
errmsg("column %s.%s does not exist", relname, colname) :
errmsg("column \"%s\" does not exist", colname),
state->rfirst ? closestfirst ?
errhint("Perhaps you meant to reference the column \"%s.%s\".",
state->rfirst->eref->aliasname, closestfirst) :
errhint("Perhaps you meant to reference the column \"%s.%s\".",
state->rfirst->eref->aliasname, closestfirst) :
errhint("There is a column named \"%s\" in table \"%s\", but it cannot be referenced from this part of the query.",
colname, state->rfirst->eref->aliasname) : 0,
parser_errposition(pstate, location)));

Some files were not shown because too many files have changed in this diff Show More