Fix various typos and incorrect/outdated name references

Author: Alexander Lakhin
Discussion: https://postgr.es/m/699beab4-a6ca-92c9-f152-f559caf6dc25@gmail.com
This commit is contained in:
David Rowley 2023-04-19 13:50:33 +12:00
parent fcb21b3acd
commit 3f58a4e296
28 changed files with 35 additions and 35 deletions

View File

@ -49,7 +49,7 @@ $node->command_fails_like(
qr/shell command for backup is not configured/,
'fails if basebackup_to_shell.command is not set');
# Configure basebackup_to_shell.command and reload the configuation file.
# Configure basebackup_to_shell.command and reload the configuration file.
my $backup_path = PostgreSQL::Test::Utils::tempdir;
my $escaped_backup_path = $backup_path;
$escaped_backup_path =~ s{\\}{\\\\}g

View File

@ -85,7 +85,7 @@ GetCurrentLSN(void)
}
/*
* Intialize WAL reader and identify first valid LSN.
* Initialize WAL reader and identify first valid LSN.
*/
static XLogReaderState *
InitXLogReaderState(XLogRecPtr lsn)

View File

@ -376,7 +376,7 @@ InitializeParallelDSM(ParallelContext *pcxt)
/*
* Serialize the transaction snapshot if the transaction
* isolation-level uses a transaction snapshot.
* isolation level uses a transaction snapshot.
*/
if (IsolationUsesXactSnapshot())
{

View File

@ -5835,7 +5835,7 @@ ReachedEndOfBackup(XLogRecPtr EndRecPtr, TimeLineID tli)
{
/*
* We have reached the end of base backup, as indicated by pg_control. The
* data on disk is now consistent (unless minRecovery point is further
* data on disk is now consistent (unless minRecoveryPoint is further
* ahead, which can happen if we crashed during previous recovery). Reset
* backupStartPoint and backupEndPoint, and update minRecoveryPoint to
* make sure we don't allow starting up at an earlier point even if

View File

@ -457,9 +457,9 @@ XLogPrefetcherComputeStats(XLogPrefetcher *prefetcher)
* *lsn, and the I/O will be considered to have completed once that LSN is
* replayed.
*
* Returns LRQ_NO_IO if we examined the next block reference and found that it
* was already in the buffer pool, or we decided for various reasons not to
* prefetch.
* Returns LRQ_NEXT_NO_IO if we examined the next block reference and found
* that it was already in the buffer pool, or we decided for various reasons
* not to prefetch.
*/
static LsnReadQueueNextStatus
XLogPrefetcherNextBlock(uintptr_t pgsr_private, XLogRecPtr *lsn)

View File

@ -8,7 +8,7 @@
* or standby mode, depending on configuration options and the state of
* the control file and possible backup label file. PerformWalRecovery()
* performs the actual WAL replay, calling the rmgr-specific redo routines.
* EndWalRecovery() performs end-of-recovery checks and cleanup actions,
* FinishWalRecovery() performs end-of-recovery checks and cleanup actions,
* and prepares information needed to initialize the WAL for writes. In
* addition to these three main functions, there are a bunch of functions
* for interrogating recovery state and controlling the recovery process.
@ -505,7 +505,7 @@ EnableStandbyMode(void)
* disk does after initializing other subsystems, but before calling
* PerformWalRecovery().
*
* This initializes some global variables like ArchiveModeRequested, and
* This initializes some global variables like ArchiveRecoveryRequested, and
* StandbyModeRequested and InRecovery.
*/
void
@ -1396,11 +1396,11 @@ read_tablespace_map(List **tablespaces)
*
* This does not close the 'xlogreader' yet, because in some cases the caller
* still wants to re-read the last checkpoint record by calling
* ReadCheckPointRecord().
* ReadCheckpointRecord().
*
* Returns the position of the last valid or applied record, after which new
* WAL should be appended, information about why recovery was ended, and some
* other things. See the WalRecoveryResult struct for details.
* other things. See the EndOfWalRecoveryInfo struct for details.
*/
EndOfWalRecoveryInfo *
FinishWalRecovery(void)

View File

@ -3,7 +3,7 @@
* basebackup_copy.c
* send basebackup archives using COPY OUT
*
* We send a result set with information about the tabelspaces to be included
* We send a result set with information about the tablespaces to be included
* in the backup before starting COPY OUT. Then, we start a single COPY OUT
* operation and transmits all the archives and the manifest if present during
* the course of that single COPY OUT. Each CopyData message begins with a

View File

@ -497,7 +497,7 @@ compare_int16(const void *a, const void *b)
* and a Bitmapset with them; verify that each attribute is appropriate
* to have in a publication column list (no system or generated attributes,
* no duplicates). Additional checks with replica identity are done later;
* see check_publication_columns.
* see pub_collist_contains_invalid_column.
*
* Note that the attribute numbers are *not* offset by
* FirstLowInvalidHeapAttributeNumber; system columns are forbidden so this

View File

@ -8,7 +8,7 @@
*
* In a parallel vacuum, we perform both index bulk deletion and index cleanup
* with parallel worker processes. Individual indexes are processed by one
* vacuum process. ParalleVacuumState contains shared information as well as
* vacuum process. ParallelVacuumState contains shared information as well as
* the memory space for storing dead items allocated in the DSM segment. We
* launch parallel worker processes at the start of parallel index
* bulk-deletion and index cleanup and once all indexes are processed, the

View File

@ -4081,7 +4081,7 @@ build_pertrans_for_aggref(AggStatePerTrans pertrans,
numTransArgs = pertrans->numTransInputs + 1;
/*
* Set up infrastructure for calling the transfn. Note that invtrans is
* Set up infrastructure for calling the transfn. Note that invtransfn is
* not needed here.
*/
build_aggregate_transfn_expr(inputTypes,

View File

@ -740,7 +740,7 @@ ExecGetUpdateNewTuple(ResultRelInfo *relinfo,
*
* Returns RETURNING result if any, otherwise NULL.
* *inserted_tuple is the tuple that's effectively inserted;
* *inserted_destrel is the relation where it was inserted.
* *insert_destrel is the relation where it was inserted.
* These are only set on success.
*
* This may change the currently active tuple conversion map in

View File

@ -432,7 +432,7 @@ AtEOXact_SPI(bool isCommit)
/*
* Pop stack entries, stopping if we find one marked internal_xact (that
* one belongs to the caller of SPI_commit or SPI_abort).
* one belongs to the caller of SPI_commit or SPI_rollback).
*/
while (_SPI_connected >= 0)
{

View File

@ -3152,7 +3152,7 @@ check_hashjoinable(RestrictInfo *restrictinfo)
/*
* check_memoizable
* If the restrictinfo's clause is suitable to be used for a Memoize node,
* set the lefthasheqoperator and righthasheqoperator to the hash equality
* set the left_hasheqoperator and right_hasheqoperator to the hash equality
* operator that will be needed during caching.
*/
static void

View File

@ -1556,7 +1556,7 @@ FetchTableStates(bool *started_tx)
* Does the subscription have tables?
*
* If there were not-READY relations found then we know it does. But
* if table_state_not_ready was empty we still need to check again to
* if table_states_not_ready was empty we still need to check again to
* see if there are 0 tables.
*/
has_subrels = (table_states_not_ready != NIL) ||

View File

@ -2000,7 +2000,7 @@ static inline GlobalVisHorizonKind
GlobalVisHorizonKindForRel(Relation rel)
{
/*
* Other relkkinds currently don't contain xids, nor always the necessary
* Other relkinds currently don't contain xids, nor always the necessary
* logical decoding markers.
*/
Assert(!rel ||

View File

@ -544,7 +544,7 @@ GenerationBlockInit(GenerationContext *context, GenerationBlock *block,
/*
* GenerationBlockIsEmpty
* Returns true iif 'block' contains no chunks
* Returns true iff 'block' contains no chunks
*/
static inline bool
GenerationBlockIsEmpty(GenerationBlock *block)

View File

@ -494,7 +494,7 @@ static void tuplesort_updatemax(Tuplesortstate *state);
* abbreviations of text or multi-key sorts. There could be! Is it worth it?
*/
/* Used if first key's comparator is ssup_datum_unsigned_compare */
/* Used if first key's comparator is ssup_datum_unsigned_cmp */
static pg_attribute_always_inline int
qsort_tuple_unsigned_compare(SortTuple *a, SortTuple *b, Tuplesortstate *state)
{
@ -517,7 +517,7 @@ qsort_tuple_unsigned_compare(SortTuple *a, SortTuple *b, Tuplesortstate *state)
}
#if SIZEOF_DATUM >= 8
/* Used if first key's comparator is ssup_datum_signed_compare */
/* Used if first key's comparator is ssup_datum_signed_cmp */
static pg_attribute_always_inline int
qsort_tuple_signed_compare(SortTuple *a, SortTuple *b, Tuplesortstate *state)
{
@ -541,7 +541,7 @@ qsort_tuple_signed_compare(SortTuple *a, SortTuple *b, Tuplesortstate *state)
}
#endif
/* Used if first key's comparator is ssup_datum_int32_compare */
/* Used if first key's comparator is ssup_datum_int32_cmp */
static pg_attribute_always_inline int
qsort_tuple_int32_compare(SortTuple *a, SortTuple *b, Tuplesortstate *state)
{

View File

@ -1630,7 +1630,7 @@ ThereAreNoPriorRegisteredSnapshots(void)
}
/*
* HaveRegisteredOrActiveSnapshots
* HaveRegisteredOrActiveSnapshot
* Is there any registered or active snapshot?
*
* NB: Unless pushed or active, the cached catalog snapshot will not cause

View File

@ -150,7 +150,7 @@ bbstreamer_gzip_writer_content(bbstreamer *streamer,
* calling gzclose.
*
* It makes no difference whether we opened the file or the caller did it,
* because libz provides no way of avoiding a close on the underling file
* because libz provides no way of avoiding a close on the underlying file
* handle. Notice, however, that bbstreamer_gzip_writer_new() uses dup() to
* work around this issue, so that the behavior from the caller's viewpoint
* is the same as for bbstreamer_plain_writer.

View File

@ -9636,7 +9636,7 @@ getAdditionalACLs(Archive *fout)
{
if (dobj->objType == DO_TABLE)
{
/* For a column initpriv, set the table's ACL flags */
/* For a column initprivs, set the table's ACL flags */
dobj->components |= DUMP_COMPONENT_ACL;
((TableInfo *) dobj)->hascolumnACLs = true;
}

View File

@ -102,7 +102,7 @@ typedef struct PgStatShared_HashEntry
} PgStatShared_HashEntry;
/*
* Common header struct for PgStatShm_Stat*Entry.
* Common header struct for PgStatShared_*.
*/
typedef struct PgStatShared_Common
{

View File

@ -372,7 +372,7 @@ ApplySortAbbrevFullComparator(Datum datum1, bool isNull1,
/*
* Datum comparison functions that we have specialized sort routines for.
* Datatypes that install these as their comparator or abbrevated comparator
* Datatypes that install these as their comparator or abbreviated comparator
* are eligible for faster sorting.
*/
extern int ssup_datum_unsigned_cmp(Datum x, Datum y, SortSupport ssup);

View File

@ -74,7 +74,7 @@ PQsetTraceFlags(PGconn *conn, int flags)
/*
* Print the current time, with microseconds, into a caller-supplied
* buffer.
* Cribbed from setup_formatted_log_time, but much simpler.
* Cribbed from get_formatted_log_time, but much simpler.
*/
static void
pqTraceFormatTimestamp(char *timestr, size_t ts_len)

View File

@ -306,7 +306,7 @@ plpython3_inline_handler(PG_FUNCTION_ARGS)
/*
* Setup error traceback support for ereport().
* plpython_inline_error_callback doesn't currently need exec_ctx, but
* for consistency with plpython_call_handler we do it the same way.
* for consistency with plpython3_call_handler we do it the same way.
*/
plerrcontext.callback = plpython_inline_error_callback;
plerrcontext.arg = exec_ctx;

View File

@ -564,7 +564,7 @@ is($result, qq(), 'inserts into tab4_1 replicated');
# now switch the order of publications in the list, try again, the result
# should be the same (no dependence on order of pulications)
# should be the same (no dependence on order of publications)
$node_subscriber2->safe_psql('postgres',
"ALTER SUBSCRIPTION sub2 SET PUBLICATION pub_all, pub_lower_level");

View File

@ -264,7 +264,7 @@ sub test_streaming
is($result, qq(1), 'transaction is prepared on subscriber');
# Insert a different record (now we are outside of the 2PC transaction)
# Note: the 2PC transaction still holds row locks so make sure this insert is for a separare primary key
# Note: the 2PC transaction still holds row locks so make sure this insert is for a separate primary key
$node_publisher->safe_psql('postgres',
"INSERT INTO test_tab VALUES (99999, 'foobar')");

View File

@ -13,7 +13,7 @@ my $offset = 0;
# Test skipping the transaction. This function must be called after the caller
# has inserted data that conflicts with the subscriber. The finish LSN of the
# error transaction that is used to specify to ALTER SUBSCRIPTION ... SKIP is
# fetched from the server logs. After executing ALTER SUBSCRITPION ... SKIP, we
# fetched from the server logs. After executing ALTER SUBSCRIPTION ... SKIP, we
# check if logical replication can continue working by inserting $nonconflict_data
# on the publisher.
sub test_skip_lsn

View File

@ -6,7 +6,7 @@
# Perl script that tries to add PGDLLIMPORT markings to PostgreSQL
# header files.
#
# This relies on a few idiosyncracies of the PostgreSQL coding style,
# This relies on a few idiosyncrasies of the PostgreSQL coding style,
# such as the fact that we always use "extern" in function
# declarations, and that we don't use // comments. It's not very
# smart and may not catch all cases.