Fix more typos and inconsistencies in the tree

Author: Alexander Lakhin
Discussion: https://postgr.es/m/0a5419ea-1452-a4e6-72ff-545b1a5a8076@gmail.com
This commit is contained in:
Michael Paquier 2019-06-17 16:13:16 +09:00
parent 9d20b0ec8f
commit 3412030205
41 changed files with 56 additions and 56 deletions

View File

@ -133,7 +133,7 @@ PLyObject_FromJsonbValue(JsonbValue *jsonbValue)
}
/*
* PLyObject_FromJsonb
* PLyObject_FromJsonbContainer
*
* Transform JsonbContainer to PyObject.
*/

View File

@ -356,7 +356,7 @@ apw_load_buffers(void)
Oid current_db = blkinfo[j].database;
/*
* Advance the prewarm_stop_idx to the first BlockRecordInfo that does
* Advance the prewarm_stop_idx to the first BlockInfoRecord that does
* not belong to this database.
*/
j++;
@ -365,7 +365,7 @@ apw_load_buffers(void)
if (current_db != blkinfo[j].database)
{
/*
* Combine BlockRecordInfos for global objects with those of
* Combine BlockInfoRecords for global objects with those of
* the database.
*/
if (current_db != InvalidOid)
@ -378,7 +378,7 @@ apw_load_buffers(void)
/*
* If we reach this point with current_db == InvalidOid, then only
* BlockRecordInfos belonging to global objects exist. We can't
* BlockInfoRecords belonging to global objects exist. We can't
* prewarm without a database connection, so just bail out.
*/
if (current_db == InvalidOid)

View File

@ -500,7 +500,7 @@ iterate_word_similarity(int *trg2indexes,
word_similarity_threshold;
/*
* Consider first trigram as initial lower bount for strict word
* Consider first trigram as initial lower bound for strict word
* similarity, or initialize it later with first trigram present for plain
* word similarity.
*/

View File

@ -133,7 +133,7 @@ GetSessionDsmHandle(void)
* If we got this far, we can pin the shared memory so it stays mapped for
* the rest of this backend's life. If we don't make it this far, cleanup
* callbacks for anything we installed above (ie currently
* SharedRecordTypemodRegistry) will run when the DSM segment is detached
* SharedRecordTypmodRegistry) will run when the DSM segment is detached
* by CurrentResourceOwner so we aren't left with a broken CurrentSession.
*/
dsm_pin_mapping(seg);

View File

@ -340,7 +340,7 @@ hashgetbitmap(IndexScanDesc scan, TIDBitmap *tbm)
/*
* _hash_first and _hash_next handle eliminate dead index entries
* whenever scan->ignored_killed_tuples is true. Therefore, there's
* whenever scan->ignore_killed_tuples is true. Therefore, there's
* nothing to do here except add the results to the TIDBitmap.
*/
tbm_add_tuples(tbm, &(currItem->heapTid), 1, true);

View File

@ -3364,7 +3364,7 @@ PreventInTransactionBlock(bool isTopLevel, const char *stmtType)
}
/*
* WarnNoTranactionBlock
* WarnNoTransactionBlock
* RequireTransactionBlock
*
* These two functions allow for warnings or errors if a command is executed

View File

@ -528,7 +528,7 @@ AppendAttributeTuples(Relation indexRelation, int numatts)
static void
UpdateIndexRelation(Oid indexoid,
Oid heapoid,
Oid parentIndexOid,
Oid parentIndexId,
IndexInfo *indexInfo,
Oid *collationOids,
Oid *classOids,

View File

@ -319,7 +319,7 @@ get_default_partition_oid(Oid parentId)
/*
* update_default_partition_oid
*
* Update pg_partition_table.partdefid with a new default partition OID.
* Update pg_partitioned_table.partdefid with a new default partition OID.
*/
void
update_default_partition_oid(Oid parentId, Oid defaultPartId)

View File

@ -829,8 +829,8 @@ CopyLoadRawBuf(CopyState cstate)
* input/output stream. The latter could be either stdin/stdout or a
* socket, depending on whether we're running under Postmaster control.
*
* Do not allow a Postgres user without the 'pg_access_server_files' role to
* read from or write to a file.
* Do not allow a Postgres user without the 'pg_read_server_files' or
* 'pg_write_server_files' role to read from or write to a file.
*
* Do not allow the copy if user doesn't have proper permission to access
* the table or the specifically requested columns.

View File

@ -170,7 +170,7 @@ compute_return_type(TypeName *returnType, Oid languageOid,
* Input parameters:
* parameters: list of FunctionParameter structs
* languageOid: OID of function language (InvalidOid if it's CREATE AGGREGATE)
* is_aggregate: needed only to determine error handling
* objtype: needed only to determine error handling and required result type
*
* Results are stored into output parameters. parameterTypes must always
* be created, but the other arrays are set to NULL if not needed.

View File

@ -2876,8 +2876,9 @@ EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree)
/*
* Each EState must have its own es_epqScanDone state, but if we have
* nested EPQ checks they should share es_epqTuple arrays. This allows
* sub-rechecks to inherit the values being examined by an outer recheck.
* nested EPQ checks they should share es_epqTupleSlot arrays. This
* allows sub-rechecks to inherit the values being examined by an outer
* recheck.
*/
estate->es_epqScanDone = (bool *) palloc0(rtsize * sizeof(bool));
if (parentestate->es_epqTupleSlot != NULL)

View File

@ -642,7 +642,7 @@ ExecAssignScanType(ScanState *scanstate, TupleDesc tupDesc)
}
/* ----------------
* ExecCreateSlotFromOuterPlan
* ExecCreateScanSlotFromOuterPlan
* ----------------
*/
void

View File

@ -1049,8 +1049,8 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable)
/*
* ExecParallelHashIncreaseNumBatches
* Every participant attached to grow_barrier must run this function
* when it observes growth == PHJ_GROWTH_NEED_MORE_BATCHES.
* Every participant attached to grow_batches_barrier must run this
* function when it observes growth == PHJ_GROWTH_NEED_MORE_BATCHES.
*/
static void
ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
@ -1106,7 +1106,7 @@ ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
* The combined work_mem of all participants wasn't
* enough. Therefore one batch per participant would be
* approximately equivalent and would probably also be
* insufficient. So try two batches per particiant,
* insufficient. So try two batches per participant,
* rounded up to a power of two.
*/
new_nbatch = 1 << my_log2(pstate->nparticipants * 2);
@ -1674,7 +1674,7 @@ ExecHashTableInsert(HashJoinTable hashtable,
}
/*
* ExecHashTableParallelInsert
* ExecParallelHashTableInsert
* insert a tuple into a shared hash table or shared batch tuplestore
*/
void

View File

@ -297,11 +297,12 @@ ExecInitProjectSet(ProjectSet *node, EState *estate, int eflags)
Assert(node->plan.qual == NIL);
/*
* Create a memory context that ExecMakeFunctionResult can use to evaluate
* function arguments in. We can't use the per-tuple context for this
* because it gets reset too often; but we don't want to leak evaluation
* results into the query-lifespan context either. We use one context for
* the arguments of all tSRFs, as they have roughly equivalent lifetimes.
* Create a memory context that ExecMakeFunctionResultSet can use to
* evaluate function arguments in. We can't use the per-tuple context for
* this because it gets reset too often; but we don't want to leak
* evaluation results into the query-lifespan context either. We use one
* context for the arguments of all tSRFs, as they have roughly equivalent
* lifetimes.
*/
state->argcontext = AllocSetContextCreate(CurrentMemoryContext,
"tSRF function arguments",

View File

@ -22,7 +22,7 @@ endif
PGFILEDESC = "llvmjit - JIT using LLVM"
NAME = llvmjit
# All files in this directy use LLVM.
# All files in this directory use LLVM.
CFLAGS += $(LLVM_CFLAGS)
CXXFLAGS += $(LLVM_CXXFLAGS)
override CPPFLAGS := $(LLVM_CPPFLAGS) $(CPPFLAGS)

View File

@ -261,7 +261,7 @@ struct IntegerSet
* Prototypes for internal functions.
*/
static void intset_update_upper(IntegerSet *intset, int level,
intset_node *new_node, uint64 new_node_item);
intset_node *child, uint64 child_key);
static void intset_flush_buffered_values(IntegerSet *intset);
static int intset_binsrch_uint64(uint64 value, uint64 *arr, int arr_elems,

View File

@ -308,7 +308,7 @@ pq_endmessage(StringInfo buf)
* pq_endmessage_reuse - send the completed message to the frontend
*
* The data buffer is *not* freed, allowing to reuse the buffer with
* pg_beginmessage_reuse.
* pq_beginmessage_reuse.
--------------------------------
*/

View File

@ -1087,7 +1087,8 @@ StartLogicalReplication(StartReplicationCmd *cmd)
* Create our decoding context, making it start at the previously ack'ed
* position.
*
* Do this before sending CopyBoth, so that any errors are reported early.
* Do this before sending a CopyBothResponse message, so that any errors
* are reported early.
*/
logical_decoding_ctx =
CreateDecodingContext(cmd->startpoint, cmd->options, false,

View File

@ -1315,7 +1315,7 @@ pg_mcv_list_in(PG_FUNCTION_ARGS)
/*
* pg_mcv_list_out - output routine for type PG_MCV_LIST.
* pg_mcv_list_out - output routine for type pg_mcv_list.
*
* MCV lists are serialized into a bytea value, so we simply call byteaout()
* to serialize the value into text. But it'd be nice to serialize that into

View File

@ -1592,7 +1592,7 @@ OpenTemporaryFileInTablespace(Oid tblspcOid, bool rejectError)
* If the file is inside the top-level temporary directory, its name should
* begin with PG_TEMP_FILE_PREFIX so that it can be identified as temporary
* and deleted at startup by RemovePgTempFiles(). Alternatively, it can be
* inside a directory created with PathnameCreateTemporaryDir(), in which case
* inside a directory created with PathNameCreateTemporaryDir(), in which case
* the prefix isn't needed.
*/
File

View File

@ -143,7 +143,7 @@ SharedFileSetOpen(SharedFileSet *fileset, const char *name)
}
/*
* Delete a file that was created with PathNameCreateShared().
* Delete a file that was created with SharedFileSetCreate().
* Return true if the file existed, false if didn't.
*/
bool

View File

@ -113,7 +113,7 @@ BarrierInit(Barrier *barrier, int participants)
* too and then return. Increments the current phase. The caller must be
* attached.
*
* While waiting, pg_stat_activity shows a wait_event_class and wait_event
* While waiting, pg_stat_activity shows a wait_event_type and wait_event
* controlled by the wait_event_info passed in, which should be a value from
* one of the WaitEventXXX enums defined in pgstat.h.
*

View File

@ -1238,7 +1238,7 @@ shm_mq_inc_bytes_written(shm_mq *mq, Size n)
/*
* Separate prior reads of mq_ring from the write of mq_bytes_written
* which we're about to do. Pairs with the read barrier found in
* shm_mq_get_receive_bytes.
* shm_mq_receive_bytes.
*/
pg_write_barrier();

View File

@ -230,7 +230,7 @@ const Oid fmgr_last_builtin_oid = %u;
|, $last_builtin_oid;
# Create fmgr_builtins_oid_index table.
# Create fmgr_builtin_oid_index table.
printf $tfh qq|
const uint16 fmgr_builtin_oid_index[%u] = {
|, $last_builtin_oid + 1;

View File

@ -53,7 +53,7 @@ typedef struct OkeysState
int sent_count;
} OkeysState;
/* state for iterate_json_string_values function */
/* state for iterate_json_values function */
typedef struct IterateJsonStringValuesState
{
JsonLexContext *lex;

View File

@ -173,7 +173,7 @@ sts_initialize(SharedTuplestore *sts, int participants,
}
/*
* Attach to a SharedTupleStore that has been initialized by another backend,
* Attach to a SharedTuplestore that has been initialized by another backend,
* so that this backend can read and write tuples.
*/
SharedTuplestoreAccessor *

View File

@ -327,7 +327,7 @@ flagInhTables(Archive *fout, TableInfo *tblinfo, int numTables,
/*
* flagInhIndexes -
* Create AttachIndexInfo objects for partitioned indexes, and add
* Create IndexAttachInfo objects for partitioned indexes, and add
* appropriate dependency links.
*/
static void

View File

@ -22,7 +22,7 @@ typedef uint64 XLogRecPtr;
/*
* Zero is used indicate an invalid pointer. Bootstrap skips the first possible
* WAL segment, initializing the first WAL page at XLOG_SEG_SIZE, so no XLOG
* WAL segment, initializing the first WAL page at WAL segment size, so no XLOG
* record can begin at zero.
*/
#define InvalidXLogRecPtr 0

View File

@ -30,6 +30,6 @@ extern bool has_partition_attrs(Relation rel, Bitmapset *attnums,
extern Oid get_default_partition_oid(Oid parentId);
extern void update_default_partition_oid(Oid parentId, Oid defaultPartId);
extern List *get_proposed_default_constraint(List *new_part_constaints);
extern List *get_proposed_default_constraint(List *new_part_constraints);
#endif /* PARTITION_H */

View File

@ -41,8 +41,8 @@ CATALOG(pg_foreign_data_wrapper,2328,ForeignDataWrapperRelationId)
} FormData_pg_foreign_data_wrapper;
/* ----------------
* Form_pg_fdw corresponds to a pointer to a tuple with
* the format of pg_fdw relation.
* Form_pg_foreign_data_wrapper corresponds to a pointer to a tuple with
* the format of pg_foreign_data_wrapper relation.
* ----------------
*/
typedef FormData_pg_foreign_data_wrapper *Form_pg_foreign_data_wrapper;

View File

@ -68,8 +68,8 @@
* A TupleTableSlot can also be "empty", indicated by flag TTS_FLAG_EMPTY set
* in tts_flags, holding no valid data. This is the only valid state for a
* freshly-created slot that has not yet had a tuple descriptor assigned to
* it. In this state, TTS_SHOULDFREE should not be set in tts_flag, tts_tuple
* must be NULL, tts_buffer InvalidBuffer, and tts_nvalid zero.
* it. In this state, TTS_SHOULDFREE should not be set in tts_flags, tts_tuple
* must be NULL and tts_nvalid zero.
*
* The tupleDescriptor is simply referenced, not copied, by the TupleTableSlot
* code. The caller of ExecSetSlotDescriptor() is responsible for providing
@ -87,7 +87,7 @@
* the descriptor is provided), or when a descriptor is assigned to the slot;
* they are of length equal to the descriptor's natts.
*
* The TTS_FLAG_SLOW flag and tts_off are saved state for
* The TTS_FLAG_SLOW flag is saved state for
* slot_deform_heap_tuple, and should not be touched by any other code.
*----------
*/

View File

@ -28,7 +28,5 @@ extern void fix_placeholder_input_needed_levels(PlannerInfo *root);
extern void add_placeholders_to_base_rels(PlannerInfo *root);
extern void add_placeholders_to_joinrel(PlannerInfo *root, RelOptInfo *joinrel,
RelOptInfo *outer_rel, RelOptInfo *inner_rel);
extern void add_placeholders_to_child_joinrel(PlannerInfo *root,
RelOptInfo *childrel, RelOptInfo *parentrel);
#endif /* PLACEHOLDER_H */

View File

@ -41,7 +41,7 @@ struct RelOptInfo;
* subsidiary data, such as the FmgrInfos.
* planstate Points to the parent plan node's PlanState when called
* during execution; NULL when called from the planner.
* exprstates Array of ExprStates, indexed as per PruneCtxStateIdx; one
* exprstates Array of ExprStates, indexed as per PruneCxtStateIdx; one
* for each partition key in each pruning step. Allocated if
* planstate is non-NULL, otherwise NULL.
*/

View File

@ -67,7 +67,7 @@ pg_leftmost_one_pos64(uint64 word)
shift -= 8;
return shift + pg_leftmost_one_pos[(word >> shift) & 255];
#endif /* HAVE__BUIILTIN_CLZ */
#endif /* HAVE__BUILTIN_CLZ */
}
/*

View File

@ -145,7 +145,7 @@ typedef enum JsonToIndex
jtiAll = jtiKey | jtiString | jtiNumeric | jtiBool
} JsonToIndex;
/* an action that will be applied to each value in iterate_json(b)_vaues functions */
/* an action that will be applied to each value in iterate_json(b)_values functions */
typedef void (*JsonIterateStringValuesAction) (void *state, char *elem_value, int elem_len);
/* an action that will be applied to each value in transform_json(b)_values functions */

View File

@ -1,7 +1,7 @@
/*-------------------------------------------------------------------------
*
* sharedtuplestore.h
* Simple mechinism for sharing tuples between backends.
* Simple mechanism for sharing tuples between backends.
*
* Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California

View File

@ -84,7 +84,7 @@
*/
#ifdef PG_NEED_PERL_XSUB_H
/*
* On Windows, port_win32.h defines macros for a lot of these same functions.
* On Windows, win32_port.h defines macros for a lot of these same functions.
* To avoid compiler warnings when XSUB.h redefines them, #undef our versions.
*/
#ifdef WIN32

View File

@ -9,7 +9,7 @@
* src/port/pwrite.c
*
* Note that this implementation changes the current file position, unlike
* the POSIX function, so we use the name pg_write().
* the POSIX function, so we use the name pg_pwrite().
*
*-------------------------------------------------------------------------
*/

View File

@ -67,7 +67,7 @@ step "s2c" { COMMIT; }
# in the new partition should contain the changes made by session s2.
permutation "s1b" "s2b" "s2u1" "s1u" "s2c" "s1c" "s1s"
# Same as above, except, session s1 is waiting in GetTupleTrigger().
# Same as above, except, session s1 is waiting in GetTupleForTrigger().
permutation "s1b" "s2b" "s2ut1" "s1ut" "s2c" "s1c" "s1st" "s1stl"
# Below two cases are similar to the above two; except that the session s1

View File

@ -2,6 +2,6 @@ test_integerset contains unit tests for testing the integer set implementation
in src/backend/lib/integerset.c.
The tests verify the correctness of the implementation, but they can also be
used as a micro-benchmark. If you set the 'intset_tests_stats' flag in
used as a micro-benchmark. If you set the 'intset_test_stats' flag in
test_integerset.c, the tests will print extra information about execution time
and memory usage.

View File

@ -11,7 +11,6 @@ our $config = {
# blocksize => 8, # --with-blocksize, 8kB by default
# wal_blocksize => 8, # --with-wal-blocksize, 8kB by default
# wal_segsize => 16, # --with-wal-segsize, 16MB by default
ldap => 1, # --with-ldap
extraver => undef, # --with-extra-version=<string>
gss => undef, # --with-gssapi=<path>