Fix a boatload of typos in C comments.

Justin Pryzby

Discussion: https://postgr.es/m/20180331105640.GK28454@telsasoft.com
This commit is contained in:
Tom Lane 2018-04-01 15:01:28 -04:00
parent 686d399f2b
commit 0b11a674fb
29 changed files with 32 additions and 32 deletions

View File

@ -163,7 +163,7 @@ gen_tabs(void)
q;
/* log and power tables for GF(2**8) finite field with */
/* 0x11b as modular polynomial - the simplest prmitive */
/* 0x11b as modular polynomial - the simplest primitive */
/* root is 0x11, used here to generate the tables */
for (i = 0, p = 1; i < 256; ++i)

View File

@ -60,7 +60,7 @@ InitializeSession(void)
* Initialize the per-session DSM segment if it isn't already initialized, and
* return its handle so that worker processes can attach to it.
*
* Unlike the per-context DSM segment, this segement and its contents are
* Unlike the per-context DSM segment, this segment and its contents are
* reused for future parallel queries.
*
* Return DSM_HANDLE_INVALID if a segment can't be allocated due to lack of

View File

@ -187,9 +187,9 @@ top:
_bt_relbuf(rel, buf);
/*
* Something did not workout. Just forget about the cached
* Something did not work out. Just forget about the cached
* block and follow the normal path. It might be set again if
* the conditions are favourble.
* the conditions are favourable.
*/
RelationSetTargetBlock(rel, InvalidBlockNumber);
}

View File

@ -409,7 +409,7 @@ ExecSetExecProcNode(PlanState *node, ExecProcNodeMtd function)
* Add a wrapper around the ExecProcNode callback that checks stack depth
* during the first execution and maybe adds an instrumentation
* wrapper. When the callback is changed after execution has already begun
* that means we'll superflously execute ExecProcNodeFirst, but that seems
* that means we'll superfluously execute ExecProcNodeFirst, but that seems
* ok.
*/
node->ExecProcNodeReal = function;

View File

@ -1768,7 +1768,7 @@ llvm_compile_expr(ExprState *state)
b_compare_result,
b_null);
/* build block analying the !NULL comparator result */
/* build block analyzing the !NULL comparator result */
LLVMPositionBuilderAtEnd(b, b_compare_result);
/* if results equal, compare next, otherwise done */

View File

@ -92,7 +92,7 @@ print_gen(FILE *fp, Pool *pool, int generation)
{
int lowest;
/* Get index to lowest ranking gene in poplulation. */
/* Get index to lowest ranking gene in population. */
/* Use 2nd to last since last is buffer. */
lowest = pool->size > 1 ? pool->size - 2 : 0;

View File

@ -6709,7 +6709,7 @@ create_partial_grouping_paths(PlannerInfo *root,
* Gather Merge.
*
* NB: This function shouldn't be used for anything other than a grouped or
* partially grouped relation not only because of the fact that it explcitly
* partially grouped relation not only because of the fact that it explicitly
* references group_pathkeys but we pass "true" as the third argument to
* generate_gather_paths().
*/
@ -6841,7 +6841,7 @@ apply_scanjoin_target_to_paths(PlannerInfo *root,
*/
rel->reltarget = llast_node(PathTarget, scanjoin_targets);
/* Special case: handly dummy relations separately. */
/* Special case: handle dummy relations separately. */
if (is_dummy_rel)
{
/*

View File

@ -710,7 +710,7 @@ adjustJoinTreeList(Query *parsetree, bool removert, int rt_index)
* using the parent relation as reference. It must not do anything that
* will not be correct when transposed to the child relation(s). (Step 4
* is incorrect by this light, since child relations might have different
* colun ordering, but the planner will fix things by re-sorting the tlist
* column ordering, but the planner will fix things by re-sorting the tlist
* for each child.)
*/
static List *

View File

@ -374,7 +374,7 @@ on_shmem_exit(pg_on_exit_callback function, Datum arg)
/* ----------------------------------------------------------------
* cancel_before_shmem_exit
*
* this function removes a previously-registed before_shmem_exit
* this function removes a previously-registered before_shmem_exit
* callback. For simplicity, only the latest entry can be
* removed. (We could work harder but there is no need for
* current uses.)

View File

@ -1172,7 +1172,7 @@ get_object_field_end(void *state, char *fname, bool isnull)
if (get_last && _state->result_start != NULL)
{
/*
* make a text object from the string from the prevously noted json
* make a text object from the string from the previously noted json
* start up to the end of the previous token (the lexer is by now
* ahead of us on whatever came after what we're interested in).
*/

View File

@ -912,7 +912,7 @@ ascii(PG_FUNCTION_ARGS)
*
* Returns the character having the binary equivalent to val.
*
* For UTF8 we treat the argumwent as a Unicode code point.
* For UTF8 we treat the argument as a Unicode code point.
* For other multi-byte encodings we raise an error for arguments
* outside the strict ASCII range (1..127).
*

View File

@ -649,7 +649,7 @@ dsa_pin_mapping(dsa_area *area)
* will result in an ERROR.
*
* DSA_ALLOC_NO_OOM causes this function to return InvalidDsaPointer when
* no memory is available or a size limit establed by set_dsa_size_limit
* no memory is available or a size limit established by set_dsa_size_limit
* would be exceeded. Otherwise, such allocations will result in an ERROR.
*
* DSA_ALLOC_ZERO causes the allocated memory to be zeroed. Otherwise, the

View File

@ -386,7 +386,7 @@ sts_puttuple(SharedTuplestoreAccessor *accessor, void *meta_data,
sts_flush_chunk(accessor);
/*
* How many oveflow chunks to go? This will allow readers to
* How many overflow chunks to go? This will allow readers to
* skip all of them at once instead of reading each one.
*/
accessor->write_chunk->overflow = (size + STS_CHUNK_DATA_SIZE - 1) /

View File

@ -121,7 +121,7 @@ sendFeedback(PGconn *conn, TimestampTz now, bool force, bool replyRequested)
int len = 0;
/*
* we normally don't want to send superfluous feedbacks, but if it's
* we normally don't want to send superfluous feedback, but if it's
* because of a timeout we need to, otherwise wal_sender_timeout will kill
* us.
*/

View File

@ -811,7 +811,7 @@ main(int argc, char **argv)
/*
* In binary-upgrade mode, we do not have to worry about the actual blob
* data or the associated metadata that resides in the pg_largeobject and
* pg_largeobject_metadata tables, respectivly.
* pg_largeobject_metadata tables, respectively.
*
* However, we do need to collect blob information as there may be
* comments or other information on blobs that we do need to dump out.

View File

@ -17,7 +17,7 @@
* too much time if the crosstab to generate happens to be unreasonably large
* (worst case: a NxN cartesian product with N=number of tuples).
* The value of 1600 corresponds to the maximum columns per table in storage,
* but it could be as much as INT_MAX theorically.
* but it could be as much as INT_MAX theoretically.
*/
#define CROSSTABVIEW_MAX_COLUMNS 1600

View File

@ -239,7 +239,7 @@ typedef HashScanOpaqueData *HashScanOpaque;
#define HASH_SPLITPOINT_PHASE_MASK (HASH_SPLITPOINT_PHASES_PER_GRP - 1)
#define HASH_SPLITPOINT_GROUPS_WITH_ONE_PHASE 10
/* defines max number of splitpoit phases a hash index can have */
/* defines max number of splitpoint phases a hash index can have */
#define HASH_MAX_SPLITPOINT_GROUP 32
#define HASH_MAX_SPLITPOINTS \
(((HASH_MAX_SPLITPOINT_GROUP - HASH_SPLITPOINT_GROUPS_WITH_ONE_PHASE) * \

View File

@ -190,7 +190,7 @@ typedef struct ParallelHashJoinBatch
/*
* Each backend requires a small amount of per-batch state to interact with
* each ParalellHashJoinBatch.
* each ParallelHashJoinBatch.
*/
typedef struct ParallelHashJoinBatchAccessor
{
@ -201,7 +201,7 @@ typedef struct ParallelHashJoinBatchAccessor
size_t ntuples; /* number of tuples */
size_t size; /* size of partition in memory */
size_t estimated_size; /* size of partition on disk */
size_t old_ntuples; /* how many tuples before repartioning? */
size_t old_ntuples; /* how many tuples before repartitioning? */
bool at_least_one_chunk; /* has this backend allocated a chunk? */
bool done; /* flag to remember that a batch is done */

View File

@ -104,7 +104,7 @@ typedef struct AggStatePerTransData
/*
* Comparators for input columns --- only set/used when aggregate has
* DISTINCT flag. equalfnOne version is used for single-column
* commparisons, equalfnMulti for the case of multiple columns.
* comparisons, equalfnMulti for the case of multiple columns.
*/
FmgrInfo equalfnOne;
ExprState *equalfnMulti;

View File

@ -47,7 +47,7 @@ struct TableFuncScanState;
*
* DestroyBuilder shall release all resources associated with a table builder
* context. It may be called either because all rows have been consumed, or
* because an error ocurred while processing the table expression.
* because an error occurred while processing the table expression.
*/
typedef struct TableFuncRoutine
{

View File

@ -15,7 +15,7 @@
#include "utils/resowner.h"
/* Flags deterimining what kind of JIT operations to perform */
/* Flags determining what kind of JIT operations to perform */
#define PGJIT_NONE 0
#define PGJIT_PERFORM 1 << 0
#define PGJIT_OPT3 1 << 1

View File

@ -107,7 +107,7 @@ extern void llvm_inline(LLVMModuleRef mod);
/*
****************************************************************************
* Code ceneration functions.
* Code generation functions.
****************************************************************************
*/
extern bool llvm_compile_expr(struct ExprState *state);

View File

@ -42,7 +42,7 @@ typedef dshash_hash (*dshash_hash_function) (const void *v, size_t size,
* Compare and hash functions must be supplied even when attaching, because we
* can't safely share function pointers between backends in general. Either
* the arg variants or the non-arg variants should be supplied; the other
* function pointers should be NULL. If the arg varants are supplied then the
* function pointers should be NULL. If the arg variants are supplied then the
* user data pointer supplied to the create and attach functions will be
* passed to the hash and compare functions.
*/

View File

@ -410,7 +410,7 @@ extern const pg_wchar_tbl pg_wchar_table[];
* points to a lookup table for the second byte. And so on.
*
* Physically, all the trees are stored in one big array, in 'chars16' or
* 'chars32', depending on the maximum value that needs to be reprented. For
* 'chars32', depending on the maximum value that needs to be represented. For
* each level in each tree, we also store lower and upper bound of allowed
* values - values outside those bounds are considered invalid, and are left
* out of the tables.

View File

@ -1444,7 +1444,7 @@ typedef JoinPath NestPath;
* that the executor need find only one match per outer tuple, and that the
* mergeclauses are sufficient to identify a match. In such cases the
* executor can immediately advance the outer relation after processing a
* match, and therefoere it need never back up the inner relation.
* match, and therefore it need never back up the inner relation.
*
* materialize_inner is true if a Material node should be placed atop the
* inner input. This may appear with or without an inner Sort step.

View File

@ -347,7 +347,7 @@ extern int isinf(double x);
/*
* Glibc doesn't use the builtin for clang due to a *gcc* bug in a version
* newer than the gcc compatibility clang claims to have. This would cause a
* *lot* of superflous function calls, therefore revert when using clang.
* *lot* of superfluous function calls, therefore revert when using clang.
*/
#ifdef __clang__
/* needs to be separate to not confuse other compilers */

View File

@ -471,7 +471,7 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno,
memcpy(str, pval, size);
str[varcharsize-1] = '\0';
/* compatiblity mode empty string gets -1 indicator but no warning */
/* compatibility mode empty string gets -1 indicator but no warning */
if (size == 0) {
/* truncation */
switch (ind_type)

View File

@ -316,7 +316,7 @@ DecodeISO8601Interval(char *str,
* places where DecodeTime is called; and added
* int range = INTERVAL_FULL_RANGE;
*
* * ECPG semes not to have a global IntervalStyle
* * ECPG seems not to have a global IntervalStyle
* so added
* int IntervalStyle = INTSTYLE_POSTGRES;
*/

View File

@ -84,7 +84,7 @@ PLyUnicode_Bytes(PyObject *unicode)
* function. The result is palloc'ed.
*
* Note that this function is disguised as PyString_AsString() when
* using Python 3. That function retuns a pointer into the internal
* using Python 3. That function returns a pointer into the internal
* memory of the argument, which isn't exactly the interface of this
* function. But in either case you get a rather short-lived
* reference that you ought to better leave alone.