Fix inconsistencies and typos in the tree

This is numbered take 7, and addresses a set of issues with code
comments, variable names and unreferenced variables.

Author: Alexander Lakhin
Discussion: https://postgr.es/m/dff75442-2468-f74f-568c-6006e141062f@gmail.com
This commit is contained in:
Michael Paquier 2019-07-22 10:01:50 +09:00
parent e1a0f6a983
commit 23bccc823d
44 changed files with 49 additions and 65 deletions

View File

@ -1,4 +1,4 @@
/* contrib/json_plperl/jsonb_plperl--1.0.sql */
/* contrib/jsonb_plperl/jsonb_plperlu--1.0.sql */
-- complain if script is sourced in psql, rather than via CREATE EXTENSION
\echo Use "CREATE EXTENSION jsonb_plperlu" to load this file. \quit

View File

@ -311,7 +311,7 @@ pgp_decompress_filter(PullFilter **res, PGP_Context *ctx, PullFilter *src)
{
return pullf_create(res, &decompress_filter, ctx, src);
}
#else /* !HAVE_ZLIB */
#else /* !HAVE_LIBZ */
int
pgp_compress_filter(PushFilter **res, PGP_Context *ctx, PushFilter *dst)

View File

@ -252,7 +252,7 @@
C library, processor, memory information, and so on. In most
cases it is sufficient to report the vendor and version, but do
not assume everyone knows what exactly <quote>Debian</quote>
contains or that everyone runs on i386s. If you have
contains or that everyone runs on x86_64. If you have
installation problems then information about the toolchain on
your machine (compiler, <application>make</application>, and so
on) is also necessary.

View File

@ -69,7 +69,7 @@
* currently executing.
*
* Fillfactor can be set because it applies only to subsequent changes made to
* data blocks, as documented in heapio.c
* data blocks, as documented in hio.c
*
* n_distinct options can be set at ShareUpdateExclusiveLock because they
* are only used during ANALYZE, which uses a ShareUpdateExclusiveLock,

View File

@ -92,7 +92,7 @@ typedef struct
/*
* The following fields represent the items in this segment. If 'items' is
* not NULL, it contains a palloc'd array of the itemsin this segment. If
* not NULL, it contains a palloc'd array of the items in this segment. If
* 'seg' is not NULL, it contains the items in an already-compressed
* format. It can point to an on-disk page (!modified), or a palloc'd
* segment in memory. If both are set, they must represent the same items.

View File

@ -663,7 +663,7 @@ gistgettuple(IndexScanDesc scan, ScanDirection dir)
}
/*
* Check the last returned tuple and add it to killitems if
* Check the last returned tuple and add it to killedItems if
* necessary
*/
if (scan->kill_prior_tuple

View File

@ -120,7 +120,7 @@ gistjoinvector(IndexTuple *itvec, int *len, IndexTuple *additvec, int addlen)
}
/*
* make plain IndexTupleVector
* make plain IndexTuple vector
*/
IndexTupleData *

View File

@ -793,7 +793,7 @@ _hash_initbitmapbuffer(Buffer buf, uint16 bmsize, bool initpage)
* be confused into returning the same tuple more than once or some tuples
* not at all by the rearrangement we are performing here. To prevent
* any concurrent scan to cross the squeeze scan we use lock chaining
* similar to hasbucketcleanup. Refer comments atop hashbucketcleanup.
* similar to hashbucketcleanup. Refer comments atop hashbucketcleanup.
*
* We need to retain a pin on the primary bucket to ensure that no concurrent
* split can start.

View File

@ -509,7 +509,7 @@ _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid,
* Choose the number of initial bucket pages to match the fill factor
* given the estimated number of tuples. We round up the result to the
* total number of buckets which has to be allocated before using its
* _hashm_spare element. However always force at least 2 bucket pages. The
* hashm_spares element. However always force at least 2 bucket pages. The
* upper limit is determined by considerations explained in
* _hash_expandtable().
*/

View File

@ -102,7 +102,7 @@ static void MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 in
static bool ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status,
uint16 infomask, Relation rel, int *remaining);
static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup);
static HeapTuple ExtractReplicaIdentity(Relation rel, HeapTuple tup, bool key_modified,
static HeapTuple ExtractReplicaIdentity(Relation rel, HeapTuple tup, bool key_changed,
bool *copy);

View File

@ -256,7 +256,7 @@ heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
MarkBufferDirty(buffer);
/*
* Emit a WAL HEAP_CLEAN record showing what we did
* Emit a WAL XLOG_HEAP2_CLEAN record showing what we did
*/
if (RelationNeedsWAL(relation))
{

View File

@ -557,8 +557,8 @@ systable_endscan(SysScanDesc sysscan)
* we could do a heapscan and sort, but the uses are in places that
* probably don't need to still work with corrupted catalog indexes.)
* For the moment, therefore, these functions are merely the thinnest of
* wrappers around index_beginscan/index_getnext. The main reason for their
* existence is to centralize possible future support of lossy operators
* wrappers around index_beginscan/index_getnext_slot. The main reason for
* their existence is to centralize possible future support of lossy operators
* in catalog scans.
*/
SysScanDesc

View File

@ -643,7 +643,7 @@ spgInnerTest(SpGistScanOpaque so, SpGistSearchItem *item,
continue;
/*
* Use infinity distances if innerConsistent() failed to return
* Use infinity distances if innerConsistentFn() failed to return
* them or if is a NULL item (their distances are really unused).
*/
distances = out.distances ? out.distances[i] : so->infDistances;

View File

@ -891,7 +891,7 @@ ExtendCLOG(TransactionId newestXact)
* Remove all CLOG segments before the one holding the passed transaction ID
*
* Before removing any CLOG data, we must flush XLOG to disk, to ensure
* that any recently-emitted HEAP_FREEZE records have reached disk; otherwise
* that any recently-emitted FREEZE_PAGE records have reached disk; otherwise
* a crash and restart might leave us with some unfrozen tuples referencing
* removed CLOG data. We choose to emit a special TRUNCATE XLOG record too.
* Replaying the deletion from XLOG is not critical, since the files could

View File

@ -9158,7 +9158,7 @@ CreateRestartPoint(int flags)
/*
* Update pg_control, using current time. Check that it still shows
* IN_ARCHIVE_RECOVERY state and an older checkpoint, else do nothing;
* DB_IN_ARCHIVE_RECOVERY state and an older checkpoint, else do nothing;
* this is a quick hack to make sure nothing really bad happens if somehow
* we get here after the end-of-recovery checkpoint.
*/

View File

@ -775,7 +775,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
NULL,
true, /* islocal */
0, /* inhcount */
true, /* isnoinherit */
true, /* noinherit */
isInternal); /* is_internal */
}

View File

@ -2988,8 +2988,8 @@ build_pertrans_for_aggref(AggStatePerTrans pertrans,
numTransArgs = pertrans->numTransInputs + 1;
/*
* Set up infrastructure for calling the transfn. Note that invtrans
* is not needed here.
* Set up infrastructure for calling the transfn. Note that
* invtransfn is not needed here.
*/
build_aggregate_transfn_expr(inputTypes,
numArguments,

View File

@ -2369,7 +2369,7 @@ _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI,
/*
* The last canSetTag query sets the status values returned to the
* caller. Be careful to free any tuptables not returned, to
* avoid intratransaction memory leak.
* avoid intra-transaction memory leak.
*/
if (canSetTag)
{

View File

@ -2835,7 +2835,7 @@ satisfies_hash_partition(PG_FUNCTION_ARGS)
PartitionKey key;
int j;
/* Open parent relation and fetch partition keyinfo */
/* Open parent relation and fetch partition key info */
parent = try_relation_open(parentId, AccessShareLock);
if (parent == NULL)
PG_RETURN_NULL();

View File

@ -127,7 +127,6 @@ PGSemaphoreReset(PGSemaphore sema)
* PGSemaphoreLock
*
* Lock a semaphore (decrement count), blocking if count would be < 0.
* Serve the interrupt if interruptOK is true.
*/
void
PGSemaphoreLock(PGSemaphore sema)

View File

@ -3130,7 +3130,7 @@ DisplayXidCache(void)
/* ----------------------------------------------
* KnownAssignedTransactions sub-module
* KnownAssignedTransactionIds sub-module
* ----------------------------------------------
*/

View File

@ -1855,7 +1855,7 @@ DecodeTimeOnly(char **field, int *ftype, int nf,
/*
* Was this an "ISO time" with embedded field labels? An
* example is "h04m05s06" - thomas 2001-02-04
* example is "h04mm05s06" - thomas 2001-02-04
*/
if (ptype != 0)
{

View File

@ -510,7 +510,7 @@ fillJsonbValue(JsonbContainer *container, int index,
* "raw scalar" pseudo array to append it - the actual scalar should be passed
* next and it will be added as the only member of the array.
*
* Values of type jvbBinary, which are rolled up arrays and objects,
* Values of type jbvBinary, which are rolled up arrays and objects,
* are unpacked before being added to the result.
*/
JsonbValue *

View File

@ -308,7 +308,7 @@ oidvectorsend(PG_FUNCTION_ARGS)
}
/*
* oidparse - get OID from IConst/FConst node
* oidparse - get OID from ICONST/FCONST node
*/
Oid
oidparse(Node *node)

View File

@ -1152,7 +1152,7 @@ tsvector_concat(PG_FUNCTION_ARGS)
/*
* Compare two strings by tsvector rules.
*
* if isPrefix = true then it returns zero value iff b has prefix a
* if prefix = true then it returns zero value iff b has prefix a
*/
int32
tsCompareString(char *a, int lena, char *b, int lenb, bool prefix)

View File

@ -217,7 +217,7 @@ fmgr_info_cxt_security(Oid functionId, FmgrInfo *finfo, MemoryContext mcxt,
/*
* For an ordinary builtin function, we should never get here
* because the isbuiltin() search above will have succeeded.
* because the fmgr_isbuiltin() search above will have succeeded.
* However, if the user has done a CREATE FUNCTION to create an
* alias for a builtin function, we can end up here. In that case
* we have to look up the function by name. The name of the

View File

@ -1093,7 +1093,7 @@ AtEOXact_Snapshot(bool isCommit, bool resetXmin)
* prevent a warning below.
*
* As with the FirstXactSnapshot, we don't need to free resources of
* the snapshot iself as it will go away with the memory context.
* the snapshot itself as it will go away with the memory context.
*/
foreach(lc, exportedSnapshots)
{

View File

@ -277,7 +277,7 @@ bytesToHex(uint8 b[16], char *s)
*
* OUTPUT hexsum the MD5 sum as a '\0'-terminated string of
* hexadecimal digits. an MD5 sum is 16 bytes long.
* each byte is represented by two heaxadecimal
* each byte is represented by two hexadecimal
* characters. you thus need to provide an array
* of 33 characters, including the trailing '\0'.
*

View File

@ -171,9 +171,6 @@ typedef struct GinMetaPageData
GinItemPointerGetBlockNumber(p) == (BlockNumber)0)
#define ItemPointerSetMax(p) \
ItemPointerSet((p), InvalidBlockNumber, (OffsetNumber)0xffff)
#define ItemPointerIsMax(p) \
(GinItemPointerGetOffsetNumber(p) == (OffsetNumber)0xffff && \
GinItemPointerGetBlockNumber(p) == InvalidBlockNumber)
#define ItemPointerSetLossyPage(p, b) \
ItemPointerSet((p), (b), (OffsetNumber)0xffff)
#define ItemPointerIsLossyPage(p) \

View File

@ -127,7 +127,7 @@ typedef struct ginxlogSplit
/*
* Vacuum simply WAL-logs the whole page, when anything is modified. This
* is functionally identical to heap_newpage records, but is kept separate for
* is functionally identical to XLOG_FPI records, but is kept separate for
* debugging purposes. (When inspecting the WAL stream, it's easier to see
* what's going on when GIN vacuum records are marked as such, not as heap
* records.) This is currently only used for entry tree leaf pages.

View File

@ -195,14 +195,14 @@ typedef struct xl_multi_insert_tuple
*
* Backup blk 0: new page
*
* If XLOG_HEAP_PREFIX_FROM_OLD or XLOG_HEAP_SUFFIX_FROM_OLD flags are set,
* If XLH_UPDATE_PREFIX_FROM_OLD or XLH_UPDATE_SUFFIX_FROM_OLD flags are set,
* the prefix and/or suffix come first, as one or two uint16s.
*
* After that, xl_heap_header and new tuple data follow. The new tuple
* data doesn't include the prefix and suffix, which are copied from the
* old tuple on replay.
*
* If HEAP_CONTAINS_NEW_TUPLE_DATA flag is given, the tuple data is
* If XLH_UPDATE_CONTAINS_NEW_TUPLE flag is given, the tuple data is
* included even if a full-page image was taken.
*
* Backup blk 1: old page, if different. (no data, just a reference to the blk)
@ -217,8 +217,8 @@ typedef struct xl_heap_update
OffsetNumber new_offnum; /* new tuple's offset */
/*
* If XLOG_HEAP_CONTAINS_OLD_TUPLE or XLOG_HEAP_CONTAINS_OLD_KEY flags are
* set, a xl_heap_header struct and tuple data for the old tuple follows.
* If XLH_UPDATE_CONTAINS_OLD_TUPLE or XLH_UPDATE_CONTAINS_OLD_KEY flags
* are set, xl_heap_header and tuple data for the old tuple follow.
*/
} xl_heap_update;

View File

@ -328,7 +328,7 @@ typedef struct SpGistLeafTupleData
{
unsigned int tupstate:2, /* LIVE/REDIRECT/DEAD/PLACEHOLDER */
size:30; /* large enough for any palloc'able value */
OffsetNumber nextOffset; /* next tuple in chain, or InvalidOffset */
OffsetNumber nextOffset; /* next tuple in chain, or InvalidOffsetNumber */
ItemPointerData heapPtr; /* TID of represented heap tuple */
/* leaf datum follows */
} SpGistLeafTupleData;

View File

@ -283,8 +283,8 @@ typedef struct xl_xact_abort
/* xl_xact_xinfo follows if XLOG_XACT_HAS_INFO */
/* xl_xact_dbinfo follows if XINFO_HAS_DBINFO */
/* xl_xact_subxacts follows if HAS_SUBXACT */
/* xl_xact_relfilenodes follows if HAS_RELFILENODES */
/* xl_xact_subxacts follows if XINFO_HAS_SUBXACT */
/* xl_xact_relfilenodes follows if XINFO_HAS_RELFILENODES */
/* No invalidation messages needed. */
/* xl_xact_twophase follows if XINFO_HAS_TWOPHASE */
/* twophase_gid follows if XINFO_HAS_GID. As a null-terminated string. */

View File

@ -300,7 +300,7 @@ typedef struct AggStatePerHashData
int numhashGrpCols; /* number of columns in hash table */
int largestGrpColIdx; /* largest col required for hashing */
AttrNumber *hashGrpColIdxInput; /* hash col indices in input slot */
AttrNumber *hashGrpColIdxHash; /* indices in hashtbl tuples */
AttrNumber *hashGrpColIdxHash; /* indices in hash table tuples */
Agg *aggnode; /* original Agg node, for numGroups etc. */
} AggStatePerHashData;

View File

@ -544,7 +544,7 @@ extern int pg_mbstrlen_with_len(const char *mbstr, int len);
extern int pg_mbcliplen(const char *mbstr, int len, int limit);
extern int pg_encoding_mbcliplen(int encoding, const char *mbstr,
int len, int limit);
extern int pg_mbcharcliplen(const char *mbstr, int len, int imit);
extern int pg_mbcharcliplen(const char *mbstr, int len, int limit);
extern int pg_encoding_max_length(int encoding);
extern int pg_database_encoding_max_length(void);
extern mbcharacter_incrementer pg_database_encoding_character_incrementer(void);

View File

@ -159,15 +159,9 @@
/* Define to 1 if you have the <editline/readline.h> header file. */
/* #undef HAVE_EDITLINE_READLINE_H */
/* Define to 1 if you have the `fcvt' function. */
#define HAVE_FCVT 1
/* Define to 1 if you have the `fdatasync' function. */
/* #undef HAVE_FDATASYNC */
/* Define to 1 if you have finite(). */
#define HAVE_FINITE 1
/* Define to 1 if you have the `fpclass' function. */
/* #undef HAVE_FPCLASS */
@ -469,9 +463,6 @@
/* Define to 1 if `__ss_len' is member of `struct sockaddr_storage'. */
/* #undef HAVE_STRUCT_SOCKADDR_STORAGE___SS_LEN */
/* Define to 1 if the system has the type `struct sockaddr_un'. */
/* #undef HAVE_STRUCT_SOCKADDR_UN */
/* Define to 1 if `tm_zone' is member of `struct tm'. */
/* #undef HAVE_STRUCT_TM_TM_ZONE */
@ -481,9 +472,6 @@
/* Define to 1 if you have the `sync_file_range' function. */
/* #undef HAVE_SYNC_FILE_RANGE */
/* Define to 1 if you have the `sysconf' function. */
/* #undef HAVE_SYSCONF */
/* Define to 1 if you have the syslog interface. */
/* #undef HAVE_SYSLOG */

View File

@ -7,7 +7,7 @@
* Portions Copyright (c) 1999-2019, PostgreSQL Global Development Group
*
* The PostgreSQL routines for a DateTime/int/float/numeric formatting,
* inspire with Oracle TO_CHAR() / TO_DATE() / TO_NUMBER() routines.
* inspired by the Oracle TO_CHAR() / TO_DATE() / TO_NUMBER() routines.
*
* Karel Zak
*

View File

@ -367,7 +367,7 @@ extern JsonbValue *findJsonbValueFromContainer(JsonbContainer *sheader,
extern JsonbValue *getIthJsonbValueFromContainer(JsonbContainer *sheader,
uint32 i);
extern JsonbValue *pushJsonbValue(JsonbParseState **pstate,
JsonbIteratorToken seq, JsonbValue *jbVal);
JsonbIteratorToken seq, JsonbValue *jbval);
extern JsonbIterator *JsonbIteratorInit(JsonbContainer *container);
extern JsonbIteratorToken JsonbIteratorNext(JsonbIterator **it, JsonbValue *val,
bool skipNested);

View File

@ -59,7 +59,7 @@ typedef enum IndexAttrBitmapKind
} IndexAttrBitmapKind;
extern Bitmapset *RelationGetIndexAttrBitmap(Relation relation,
IndexAttrBitmapKind keyAttrs);
IndexAttrBitmapKind attrKind);
extern void RelationGetExclusionInfo(Relation indexRelation,
Oid **operators,

View File

@ -2073,9 +2073,9 @@ ecpg_do_prologue(int lineno, const int compat, const int force_indicator,
* offset - offset between ith and (i+1)th entry in an array, normally
* that means sizeof(type)
* ind_type - type of indicator variable
* ind_value - pointer to indicator variable
* ind_pointer - pointer to indicator variable
* ind_varcharsize - empty
* ind_arraysize - arraysize of indicator array
* ind_arrsize - arraysize of indicator array
* ind_offset - indicator offset
*------
*/

View File

@ -275,8 +275,8 @@ PGTYPEStimestamp_to_asc(timestamp tstamp)
*tm = &tt;
char buf[MAXDATELEN + 1];
fsec_t fsec;
int DateStyle = 1; /* this defaults to ISO_DATES, shall we make
* it an option? */
int DateStyle = 1; /* this defaults to USE_ISO_DATES, shall we
* make it an option? */
if (TIMESTAMP_NOT_FINITE(tstamp))
EncodeSpecialTimestamp(tstamp, buf);

View File

@ -3691,7 +3691,7 @@ plpgsql_sql_error_callback(void *arg)
internalerrposition(myerrpos + errpos - cbarg->leaderlen - 1);
}
/* In any case, flush errposition --- we want internalerrpos only */
/* In any case, flush errposition --- we want internalerrposition only */
errposition(0);
}

View File

@ -1634,7 +1634,7 @@ compile_pltcl_function(Oid fn_oid, Oid tgreloid,
/************************************************************
* prefix procedure body with
* upvar #0 <internal_procname> GD
* upvar #0 <internal_proname> GD
* and with appropriate setting of arguments
************************************************************/
Tcl_DStringAppend(&proc_internal_body, "upvar #0 ", -1);

View File

@ -51,7 +51,7 @@ my $xid = $stdout;
chomp($xid);
is($node->safe_psql('postgres', qq[SELECT txid_status('$xid');]),
'in progress', 'own xid is in-progres');
'in progress', 'own xid is in-progress');
# Crash and restart the postmaster
$node->stop('immediate');