Pgindent run before 9.1 beta2.

This commit is contained in:
Bruce Momjian 2011-06-09 14:32:50 -04:00
parent adf43b2b36
commit 6560407c7d
92 changed files with 644 additions and 620 deletions

View File

@ -33,12 +33,12 @@ typedef struct
/* Methods */
bool (*f_gt) (const void *, const void *, Oid); /* greater than */
bool (*f_ge) (const void *, const void *, Oid); /* greater equal */
bool (*f_eq) (const void *, const void *, Oid); /* equal */
bool (*f_le) (const void *, const void *, Oid); /* less equal */
bool (*f_lt) (const void *, const void *, Oid); /* less than */
int32 (*f_cmp) (const void *, const void *, Oid); /* compare */
bool (*f_gt) (const void *, const void *, Oid); /* greater than */
bool (*f_ge) (const void *, const void *, Oid); /* greater equal */
bool (*f_eq) (const void *, const void *, Oid); /* equal */
bool (*f_le) (const void *, const void *, Oid); /* less equal */
bool (*f_lt) (const void *, const void *, Oid); /* less than */
int32 (*f_cmp) (const void *, const void *, Oid); /* compare */
GBT_VARKEY *(*f_l2n) (GBT_VARKEY *); /* convert leaf to node */
} gbtree_vinfo;

View File

@ -533,7 +533,7 @@ usage(void)
"Main intended use as restore_command in recovery.conf:\n"
" restore_command = 'pg_standby [OPTION]... ARCHIVELOCATION %%f %%p %%r'\n"
"e.g.\n"
" restore_command = 'pg_standby /mnt/server/archiverdir %%f %%p %%r'\n");
" restore_command = 'pg_standby /mnt/server/archiverdir %%f %%p %%r'\n");
printf("\nReport bugs to <pgsql-bugs@postgresql.org>.\n");
}

View File

@ -362,7 +362,7 @@ check_new_cluster_is_empty(void)
/* pg_largeobject and its index should be skipped */
if (strcmp(rel_arr->rels[relnum].nspname, "pg_catalog") != 0)
pg_log(PG_FATAL, "New cluster database \"%s\" is not empty\n",
new_cluster.dbarr.dbs[dbnum].db_name);
new_cluster.dbarr.dbs[dbnum].db_name);
}
}
@ -381,17 +381,18 @@ check_new_cluster_is_empty(void)
static void
check_old_cluster_has_new_cluster_dbs(void)
{
int old_dbnum, new_dbnum;
int old_dbnum,
new_dbnum;
for (new_dbnum = 0; new_dbnum < new_cluster.dbarr.ndbs; new_dbnum++)
{
for (old_dbnum = 0; old_dbnum < old_cluster.dbarr.ndbs; old_dbnum++)
if (strcmp(old_cluster.dbarr.dbs[old_dbnum].db_name,
new_cluster.dbarr.dbs[new_dbnum].db_name) == 0)
new_cluster.dbarr.dbs[new_dbnum].db_name) == 0)
break;
if (old_dbnum == old_cluster.dbarr.ndbs)
pg_log(PG_FATAL, "New cluster database \"%s\" does not exist in the old cluster\n",
new_cluster.dbarr.dbs[new_dbnum].db_name);
new_cluster.dbarr.dbs[new_dbnum].db_name);
}
}
@ -495,7 +496,7 @@ check_is_super_user(ClusterInfo *cluster)
if (PQntuples(res) != 1 || strcmp(PQgetvalue(res, 0, 0), "t") != 0)
pg_log(PG_FATAL, "database user \"%s\" is not a superuser\n",
os_info.user);
os_info.user);
PQclear(res);

View File

@ -90,10 +90,10 @@ get_control_data(ClusterInfo *cluster, bool live_check)
pg_putenv("LC_TIME", NULL);
pg_putenv("LANG",
#ifndef WIN32
NULL);
NULL);
#else
/* On Windows the default locale cannot be English, so force it */
"en");
"en");
#endif
pg_putenv("LANGUAGE", NULL);
pg_putenv("LC_ALL", NULL);

View File

@ -99,16 +99,17 @@ verify_directories(void)
if (access(".", R_OK | W_OK
#ifndef WIN32
/*
* Do a directory execute check only on Unix because execute permission
* on NTFS means "can execute scripts", which we don't care about.
* Also, X_OK is not defined in the Windows API.
* Do a directory execute check only on Unix because execute permission on
* NTFS means "can execute scripts", which we don't care about. Also, X_OK
* is not defined in the Windows API.
*/
| X_OK
| X_OK
#endif
) != 0)
) != 0)
pg_log(PG_FATAL,
"You must have read and write access in the current directory.\n");
"You must have read and write access in the current directory.\n");
check_bin_dir(&old_cluster);
check_data_dir(old_cluster.pgdata);
@ -132,16 +133,18 @@ check_data_dir(const char *pg_data)
{
char subDirName[MAXPGPATH];
int subdirnum;
/* start check with top-most directory */
const char *requiredSubdirs[] = {"", "base", "global", "pg_clog",
"pg_multixact", "pg_subtrans", "pg_tblspc", "pg_twophase",
"pg_xlog"};
"pg_xlog"};
for (subdirnum = 0;
subdirnum < sizeof(requiredSubdirs) / sizeof(requiredSubdirs[0]);
++subdirnum)
{
struct stat statBuf;
snprintf(subDirName, sizeof(subDirName), "%s/%s", pg_data,
requiredSubdirs[subdirnum]);
@ -173,8 +176,8 @@ check_bin_dir(ClusterInfo *cluster)
report_status(PG_FATAL, "check for %s failed: %s\n",
cluster->bindir, getErrorText(errno));
else if (!S_ISDIR(statBuf.st_mode))
report_status(PG_FATAL, "%s is not a directory\n",
cluster->bindir);
report_status(PG_FATAL, "%s is not a directory\n",
cluster->bindir);
validate_exec(cluster->bindir, "postgres");
validate_exec(cluster->bindir, "pg_ctl");

View File

@ -158,6 +158,7 @@ parseCommandLine(int argc, char *argv[])
case 'u':
pg_free(os_info.user);
os_info.user = pg_strdup(optarg);
/*
* Push the user name into the environment so pre-9.1
* pg_ctl/libpq uses it.

View File

@ -378,7 +378,7 @@ void *pg_malloc(int size);
void pg_free(void *ptr);
const char *getErrorText(int errNum);
unsigned int str2uint(const char *str);
void pg_putenv(const char *var, const char *val);
void pg_putenv(const char *var, const char *val);
/* version.c */

View File

@ -52,8 +52,8 @@ get_db_conn(ClusterInfo *cluster, const char *db_name)
char conn_opts[MAXPGPATH];
snprintf(conn_opts, sizeof(conn_opts),
"dbname = '%s' user = '%s' port = %d", db_name, os_info.user,
cluster->port);
"dbname = '%s' user = '%s' port = %d", db_name, os_info.user,
cluster->port);
return PQconnectdb(conn_opts);
}
@ -146,16 +146,18 @@ start_postmaster(ClusterInfo *cluster)
PGconn *conn;
bool exit_hook_registered = false;
int pg_ctl_return = 0;
#ifndef WIN32
char *output_filename = log_opts.filename;
char *output_filename = log_opts.filename;
#else
/*
* On Win32, we can't send both pg_upgrade output and pg_ctl output to the
* same file because we get the error: "The process cannot access the file
* because it is being used by another process." so we have to send all
* other output to 'nul'.
*/
char *output_filename = DEVNULL;
char *output_filename = DEVNULL;
#endif
if (!exit_hook_registered)
@ -180,13 +182,13 @@ start_postmaster(ClusterInfo *cluster)
"-o \"-p %d %s\" start >> \"%s\" 2>&1" SYSTEMQUOTE,
cluster->bindir, output_filename, cluster->pgdata, cluster->port,
(cluster->controldata.cat_ver >=
BINARY_UPGRADE_SERVER_FLAG_CAT_VER) ? "-b" :
"-c autovacuum=off -c autovacuum_freeze_max_age=2000000000",
BINARY_UPGRADE_SERVER_FLAG_CAT_VER) ? "-b" :
"-c autovacuum=off -c autovacuum_freeze_max_age=2000000000",
log_opts.filename);
/*
* Don't throw an error right away, let connecting throw the error
* because it might supply a reason for the failure.
* Don't throw an error right away, let connecting throw the error because
* it might supply a reason for the failure.
*/
pg_ctl_return = exec_prog(false, "%s", cmd);
@ -196,7 +198,7 @@ start_postmaster(ClusterInfo *cluster)
{
pg_log(PG_REPORT, "\nconnection to database failed: %s\n",
PQerrorMessage(conn));
if (conn)
if (conn)
PQfinish(conn);
pg_log(PG_FATAL, "unable to connect to %s postmaster started with the command: %s\n",
CLUSTER_NAME(cluster), cmd);
@ -206,8 +208,8 @@ start_postmaster(ClusterInfo *cluster)
/* If the connection didn't fail, fail now */
if (pg_ctl_return != 0)
pg_log(PG_FATAL, "pg_ctl failed to start the %s server\n",
CLUSTER_NAME(cluster));
CLUSTER_NAME(cluster));
os_info.running_cluster = cluster;
}
@ -218,11 +220,12 @@ stop_postmaster(bool fast)
char cmd[MAXPGPATH];
const char *bindir;
const char *datadir;
#ifndef WIN32
char *output_filename = log_opts.filename;
char *output_filename = log_opts.filename;
#else
/* See comment in start_postmaster() about why win32 output is ignored. */
char *output_filename = DEVNULL;
char *output_filename = DEVNULL;
#endif
if (os_info.running_cluster == &old_cluster)
@ -268,17 +271,17 @@ check_pghost_envvar(void)
for (option = start; option->keyword != NULL; option++)
{
if (option->envvar && (strcmp(option->envvar, "PGHOST") == 0 ||
strcmp(option->envvar, "PGHOSTADDR") == 0))
strcmp(option->envvar, "PGHOSTADDR") == 0))
{
const char *value = getenv(option->envvar);
if (value && strlen(value) > 0 &&
/* check for 'local' host values */
/* check for 'local' host values */
(strcmp(value, "localhost") != 0 && strcmp(value, "127.0.0.1") != 0 &&
strcmp(value, "::1") != 0 && value[0] != '/'))
pg_log(PG_FATAL,
"libpq environment variable %s has a non-local server value: %s\n",
option->envvar, value);
"libpq environment variable %s has a non-local server value: %s\n",
option->envvar, value);
}
}

View File

@ -281,4 +281,3 @@ pg_putenv(const char *var, const char *val)
#endif
}
}

View File

@ -56,15 +56,15 @@ callConsistentFn(GinState *ginstate, GinScanKey key)
key->recheckCurItem = true;
return DatumGetBool(FunctionCall8Coll(&ginstate->consistentFn[key->attnum - 1],
ginstate->supportCollation[key->attnum - 1],
ginstate->supportCollation[key->attnum - 1],
PointerGetDatum(key->entryRes),
UInt16GetDatum(key->strategy),
key->query,
UInt32GetDatum(key->nuserentries),
PointerGetDatum(key->extra_data),
PointerGetDatum(&key->recheckCurItem),
PointerGetDatum(&key->recheckCurItem),
PointerGetDatum(key->queryValues),
PointerGetDatum(key->queryCategories)));
PointerGetDatum(key->queryCategories)));
}
/*
@ -252,7 +252,7 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
*----------
*/
cmp = DatumGetInt32(FunctionCall4Coll(&btree->ginstate->comparePartialFn[attnum - 1],
btree->ginstate->supportCollation[attnum - 1],
btree->ginstate->supportCollation[attnum - 1],
scanEntry->queryKey,
idatum,
UInt16GetDatum(scanEntry->strategy),
@ -1178,10 +1178,10 @@ matchPartialInPendingList(GinState *ginstate, Page page,
*----------
*/
cmp = DatumGetInt32(FunctionCall4Coll(&ginstate->comparePartialFn[entry->attnum - 1],
ginstate->supportCollation[entry->attnum - 1],
ginstate->supportCollation[entry->attnum - 1],
entry->queryKey,
datum[off - 1],
UInt16GetDatum(entry->strategy),
UInt16GetDatum(entry->strategy),
PointerGetDatum(entry->extra_data)));
if (cmp == 0)
return true;

View File

@ -306,11 +306,11 @@ ginNewScanKey(IndexScanDesc scan)
/* OK to call the extractQueryFn */
queryValues = (Datum *)
DatumGetPointer(FunctionCall7Coll(&so->ginstate.extractQueryFn[skey->sk_attno - 1],
so->ginstate.supportCollation[skey->sk_attno - 1],
so->ginstate.supportCollation[skey->sk_attno - 1],
skey->sk_argument,
PointerGetDatum(&nQueryValues),
UInt16GetDatum(skey->sk_strategy),
PointerGetDatum(&partial_matches),
UInt16GetDatum(skey->sk_strategy),
PointerGetDatum(&partial_matches),
PointerGetDatum(&extra_data),
PointerGetDatum(&nullFlags),
PointerGetDatum(&searchMode)));

View File

@ -94,8 +94,8 @@ initGinState(GinState *state, Relation index)
* type for a noncollatable indexed data type (for instance, hstore
* uses text index entries). If there's no index collation then
* specify default collation in case the support functions need
* collation. This is harmless if the support functions don't
* care about collation, so we just do it unconditionally. (We could
* collation. This is harmless if the support functions don't care
* about collation, so we just do it unconditionally. (We could
* alternatively call get_typcollation, but that seems like expensive
* overkill --- there aren't going to be any cases where a GIN storage
* type has a nondefault collation.)
@ -293,7 +293,7 @@ ginCompareEntries(GinState *ginstate, OffsetNumber attnum,
/* both not null, so safe to call the compareFn */
return DatumGetInt32(FunctionCall2Coll(&ginstate->compareFn[attnum - 1],
ginstate->supportCollation[attnum - 1],
ginstate->supportCollation[attnum - 1],
a, b));
}
@ -400,7 +400,7 @@ ginExtractEntries(GinState *ginstate, OffsetNumber attnum,
nullFlags = NULL; /* in case extractValue doesn't set it */
entries = (Datum *)
DatumGetPointer(FunctionCall3Coll(&ginstate->extractValueFn[attnum - 1],
ginstate->supportCollation[attnum - 1],
ginstate->supportCollation[attnum - 1],
value,
PointerGetDatum(nentries),
PointerGetDatum(&nullFlags)));

View File

@ -1399,7 +1399,7 @@ initGISTstate(GISTSTATE *giststate, Relation index)
/*
* If the index column has a specified collation, we should honor that
* while doing comparisons. However, we may have a collatable storage
* type for a noncollatable indexed data type. If there's no index
* type for a noncollatable indexed data type. If there's no index
* collation then specify default collation in case the support
* functions need collation. This is harmless if the support
* functions don't care about collation, so we just do it

View File

@ -448,7 +448,7 @@ gistdentryinit(GISTSTATE *giststate, int nkey, GISTENTRY *e,
gistentryinit(*e, k, r, pg, o, l);
dep = (GISTENTRY *)
DatumGetPointer(FunctionCall1Coll(&giststate->decompressFn[nkey],
giststate->supportCollation[nkey],
giststate->supportCollation[nkey],
PointerGetDatum(e)));
/* decompressFn may just return the given pointer */
if (dep != e)
@ -475,7 +475,7 @@ gistcentryinit(GISTSTATE *giststate, int nkey,
gistentryinit(*e, k, r, pg, o, l);
cep = (GISTENTRY *)
DatumGetPointer(FunctionCall1Coll(&giststate->compressFn[nkey],
giststate->supportCollation[nkey],
giststate->supportCollation[nkey],
PointerGetDatum(e)));
/* compressFn may just return the given pointer */
if (cep != e)

View File

@ -80,7 +80,7 @@
*
* Note: the ReindexIsProcessingIndex() check in RELATION_CHECKS is there
* to check that we don't try to scan or do retail insertions into an index
* that is currently being rebuilt or pending rebuild. This helps to catch
* that is currently being rebuilt or pending rebuild. This helps to catch
* things that don't work when reindexing system catalogs. The assertion
* doesn't prevent the actual rebuild because we don't use RELATION_CHECKS
* when calling the index AM's ambuild routine, and there is no reason for

View File

@ -738,7 +738,7 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2)
{
compare =
DatumGetInt32(FunctionCall2Coll(&entry->sk_func,
entry->sk_collation,
entry->sk_collation,
attrDatum1,
attrDatum2));

View File

@ -635,7 +635,7 @@ _bt_compare_scankey_args(IndexScanDesc scan, ScanKey op,
*result = DatumGetBool(OidFunctionCall2Coll(cmp_proc,
op->sk_collation,
leftarg->sk_argument,
rightarg->sk_argument));
rightarg->sk_argument));
return true;
}
}

View File

@ -6656,15 +6656,15 @@ StartupXLOG(void)
ereport(FATAL,
(errmsg("requested recovery stop point is before consistent recovery point")));
}
/*
* Ran off end of WAL before reaching end-of-backup WAL record,
* or minRecoveryPoint. That's usually a bad sign, indicating that
* you tried to recover from an online backup but never called
* Ran off end of WAL before reaching end-of-backup WAL record, or
* minRecoveryPoint. That's usually a bad sign, indicating that you
* tried to recover from an online backup but never called
* pg_stop_backup(), or you didn't archive all the WAL up to that
* point. However, this also happens in crash recovery, if the
* system crashes while an online backup is in progress. We
* must not treat that as an error, or the database will refuse
* to start up.
* point. However, this also happens in crash recovery, if the system
* crashes while an online backup is in progress. We must not treat
* that as an error, or the database will refuse to start up.
*/
if (InArchiveRecovery)
{
@ -6674,7 +6674,7 @@ StartupXLOG(void)
errhint("Online backup started with pg_start_backup() must be ended with pg_stop_backup(), and all WAL up to that point must be available at recovery.")));
else
ereport(FATAL,
(errmsg("WAL ends before consistent recovery point")));
(errmsg("WAL ends before consistent recovery point")));
}
}

View File

@ -1773,8 +1773,8 @@ index_build(Relation heapRelation,
* However, when reindexing an existing index, we should do nothing here.
* Any HOT chains that are broken with respect to the index must predate
* the index's original creation, so there is no need to change the
* index's usability horizon. Moreover, we *must not* try to change
* the index's pg_index entry while reindexing pg_index itself, and this
* index's usability horizon. Moreover, we *must not* try to change the
* index's pg_index entry while reindexing pg_index itself, and this
* optimization nicely prevents that.
*/
if (indexInfo->ii_BrokenHotChain && !isreindex)
@ -1824,7 +1824,7 @@ index_build(Relation heapRelation,
/*
* If it's for an exclusion constraint, make a second pass over the heap
* to verify that the constraint is satisfied. We must not do this until
* to verify that the constraint is satisfied. We must not do this until
* the index is fully valid. (Broken HOT chains shouldn't matter, though;
* see comments for IndexCheckExclusion.)
*/
@ -2136,8 +2136,8 @@ IndexBuildHeapScan(Relation heapRelation,
/*
* It's a HOT-updated tuple deleted by our own xact.
* We can assume the deletion will commit (else the
* index contents don't matter), so treat the same
* as RECENTLY_DEAD HOT-updated tuples.
* index contents don't matter), so treat the same as
* RECENTLY_DEAD HOT-updated tuples.
*/
indexIt = false;
/* mark the index as unsafe for old snapshots */
@ -2146,9 +2146,9 @@ IndexBuildHeapScan(Relation heapRelation,
else
{
/*
* It's a regular tuple deleted by our own xact.
* Index it but don't check for uniqueness, the same
* as a RECENTLY_DEAD tuple.
* It's a regular tuple deleted by our own xact. Index
* it but don't check for uniqueness, the same as a
* RECENTLY_DEAD tuple.
*/
indexIt = true;
}
@ -2281,9 +2281,8 @@ IndexCheckExclusion(Relation heapRelation,
/*
* If we are reindexing the target index, mark it as no longer being
* reindexed, to forestall an Assert in index_beginscan when we try to
* use the index for probes. This is OK because the index is now
* fully valid.
* reindexed, to forestall an Assert in index_beginscan when we try to use
* the index for probes. This is OK because the index is now fully valid.
*/
if (ReindexIsCurrentlyProcessingIndex(RelationGetRelid(indexRelation)))
ResetReindexProcessing();
@ -2855,9 +2854,9 @@ reindex_index(Oid indexId, bool skip_constraint_checks)
*
* We can also reset indcheckxmin, because we have now done a
* non-concurrent index build, *except* in the case where index_build
* found some still-broken HOT chains. If it did, we normally leave
* found some still-broken HOT chains. If it did, we normally leave
* indcheckxmin alone (note that index_build won't have changed it,
* because this is a reindex). But if the index was invalid or not ready
* because this is a reindex). But if the index was invalid or not ready
* and there were broken HOT chains, it seems best to force indcheckxmin
* true, because the normal argument that the HOT chains couldn't conflict
* with the index is suspect for an invalid index.
@ -2929,7 +2928,7 @@ reindex_index(Oid indexId, bool skip_constraint_checks)
* the data in a manner that risks a change in constraint validity.
*
* Returns true if any indexes were rebuilt (including toast table's index
* when relevant). Note that a CommandCounterIncrement will occur after each
* when relevant). Note that a CommandCounterIncrement will occur after each
* index rebuild.
*/
bool

View File

@ -362,7 +362,7 @@ RangeVarGetCreationNamespace(const RangeVar *newRelation)
/*
* RangeVarGetAndCheckCreationNamespace
* As RangeVarGetCreationNamespace, but with a permissions check.
* As RangeVarGetCreationNamespace, but with a permissions check.
*/
Oid
RangeVarGetAndCheckCreationNamespace(const RangeVar *newRelation)

View File

@ -60,8 +60,8 @@ AlterTableCreateToastTable(Oid relOid, Datum reloptions)
/*
* Grab a DDL-exclusive lock on the target table, since we'll update the
* pg_class tuple. This is redundant for all present users. Tuple toasting
* behaves safely in the face of a concurrent TOAST table add.
* pg_class tuple. This is redundant for all present users. Tuple
* toasting behaves safely in the face of a concurrent TOAST table add.
*/
rel = heap_open(relOid, ShareUpdateExclusiveLock);
@ -274,13 +274,13 @@ create_toast_table(Relation rel, Oid toastOid, Oid toastIndexOid, Datum reloptio
coloptions[1] = 0;
index_create(toast_rel, toast_idxname, toastIndexOid,
indexInfo,
list_make2("chunk_id", "chunk_seq"),
BTREE_AM_OID,
rel->rd_rel->reltablespace,
collationObjectId, classObjectId, coloptions, (Datum) 0,
true, false, false, false,
true, false, false);
indexInfo,
list_make2("chunk_id", "chunk_seq"),
BTREE_AM_OID,
rel->rd_rel->reltablespace,
collationObjectId, classObjectId, coloptions, (Datum) 0,
true, false, false, false,
true, false, false);
heap_close(toast_rel, NoLock);

View File

@ -566,7 +566,7 @@ do_analyze_rel(Relation onerel, VacuumStmt *vacstmt, bool inh)
}
/*
* Report ANALYZE to the stats collector, too. However, if doing
* Report ANALYZE to the stats collector, too. However, if doing
* inherited stats we shouldn't report, because the stats collector only
* tracks per-table stats.
*/
@ -1231,7 +1231,7 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
qsort((void *) rows, numrows, sizeof(HeapTuple), compare_rows);
/*
* Estimate total numbers of rows in relation. For live rows, use
* Estimate total numbers of rows in relation. For live rows, use
* vac_estimate_reltuples; for dead rows, we have no source of old
* information, so we have to assume the density is the same in unseen
* pages as in the pages we scanned.

View File

@ -762,12 +762,12 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex,
/*
* If the OldHeap has a toast table, get lock on the toast table to keep
* it from being vacuumed. This is needed because autovacuum processes
* it from being vacuumed. This is needed because autovacuum processes
* toast tables independently of their main tables, with no lock on the
* latter. If an autovacuum were to start on the toast table after we
* latter. If an autovacuum were to start on the toast table after we
* compute our OldestXmin below, it would use a later OldestXmin, and then
* possibly remove as DEAD toast tuples belonging to main tuples we think
* are only RECENTLY_DEAD. Then we'd fail while trying to copy those
* are only RECENTLY_DEAD. Then we'd fail while trying to copy those
* tuples.
*
* We don't need to open the toast relation here, just lock it. The lock

View File

@ -185,14 +185,15 @@ DefineIndex(RangeVar *heapRelation,
rel->rd_rel->relkind != RELKIND_UNCATALOGED)
{
if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
/*
* Custom error message for FOREIGN TABLE since the term is
* close to a regular table and can confuse the user.
* Custom error message for FOREIGN TABLE since the term is close
* to a regular table and can confuse the user.
*/
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("cannot create index on foreign table \"%s\"",
heapRelation->relname)));
heapRelation->relname)));
else
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),

View File

@ -1077,12 +1077,12 @@ read_info(SeqTable elm, Relation rel, Buffer *buf)
tuple.t_data = (HeapTupleHeader) PageGetItem(page, lp);
/*
* Previous releases of Postgres neglected to prevent SELECT FOR UPDATE
* on a sequence, which would leave a non-frozen XID in the sequence
* tuple's xmax, which eventually leads to clog access failures or worse.
* If we see this has happened, clean up after it. We treat this like a
* hint bit update, ie, don't bother to WAL-log it, since we can certainly
* do this again if the update gets lost.
* Previous releases of Postgres neglected to prevent SELECT FOR UPDATE on
* a sequence, which would leave a non-frozen XID in the sequence tuple's
* xmax, which eventually leads to clog access failures or worse. If we
* see this has happened, clean up after it. We treat this like a hint
* bit update, ie, don't bother to WAL-log it, since we can certainly do
* this again if the update gets lost.
*/
if (HeapTupleHeaderGetXmax(tuple.t_data) != InvalidTransactionId)
{

View File

@ -2679,7 +2679,8 @@ AlterTableGetLockLevel(List *cmds)
* These subcommands affect implicit row type conversion. They
* have affects similar to CREATE/DROP CAST on queries. We
* don't provide for invalidating parse trees as a result of
* such changes. Do avoid concurrent pg_class updates, though.
* such changes. Do avoid concurrent pg_class updates,
* though.
*/
case AT_AddOf:
case AT_DropOf:
@ -2946,7 +2947,7 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd,
case AT_DisableRule:
case AT_DropInherit: /* NO INHERIT */
case AT_AddOf: /* OF */
case AT_DropOf: /* NOT OF */
case AT_DropOf: /* NOT OF */
ATSimplePermissions(rel, ATT_TABLE);
/* These commands never recurse */
/* No command-specific prep needed */
@ -4067,7 +4068,7 @@ find_typed_table_dependencies(Oid typeOid, const char *typeName, DropBehavior be
*
* Check whether a type is suitable for CREATE TABLE OF/ALTER TABLE OF. If it
* isn't suitable, throw an error. Currently, we require that the type
* originated with CREATE TYPE AS. We could support any row type, but doing so
* originated with CREATE TYPE AS. We could support any row type, but doing so
* would require handling a number of extra corner cases in the DDL commands.
*/
void
@ -4083,6 +4084,7 @@ check_of_type(HeapTuple typetuple)
Assert(OidIsValid(typ->typrelid));
typeRelation = relation_open(typ->typrelid, AccessShareLock);
typeOk = (typeRelation->rd_rel->relkind == RELKIND_COMPOSITE_TYPE);
/*
* Close the parent rel, but keep our AccessShareLock on it until xact
* commit. That will prevent someone else from deleting or ALTERing
@ -7406,8 +7408,8 @@ ATExecChangeOwner(Oid relationOid, Oid newOwnerId, bool recursing, LOCKMODE lock
default:
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("\"%s\" is not a table, view, sequence, or foreign table",
NameStr(tuple_class->relname))));
errmsg("\"%s\" is not a table, view, sequence, or foreign table",
NameStr(tuple_class->relname))));
}
/*
@ -8603,7 +8605,7 @@ ATExecDropInherit(Relation rel, RangeVar *parent, LOCKMODE lockmode)
* Drop the dependency created by StoreCatalogInheritance1 (CREATE TABLE
* INHERITS/ALTER TABLE INHERIT -- refclassid will be RelationRelationId) or
* heap_create_with_catalog (CREATE TABLE OF/ALTER TABLE OF -- refclassid will
* be TypeRelationId). There's no convenient way to do this, so go trawling
* be TypeRelationId). There's no convenient way to do this, so go trawling
* through pg_depend.
*/
static void
@ -8730,8 +8732,8 @@ ATExecAddOf(Relation rel, const TypeName *ofTypename, LOCKMODE lockmode)
if (strncmp(table_attname, type_attname, NAMEDATALEN) != 0)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("table has column \"%s\" where type requires \"%s\"",
table_attname, type_attname)));
errmsg("table has column \"%s\" where type requires \"%s\"",
table_attname, type_attname)));
/* Compare type. */
if (table_attr->atttypid != type_attr->atttypid ||
@ -8739,8 +8741,8 @@ ATExecAddOf(Relation rel, const TypeName *ofTypename, LOCKMODE lockmode)
table_attr->attcollation != type_attr->attcollation)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("table \"%s\" has different type for column \"%s\"",
RelationGetRelationName(rel), type_attname)));
errmsg("table \"%s\" has different type for column \"%s\"",
RelationGetRelationName(rel), type_attname)));
}
DecrTupleDescRefCount(typeTupleDesc);
@ -8748,6 +8750,7 @@ ATExecAddOf(Relation rel, const TypeName *ofTypename, LOCKMODE lockmode)
for (; table_attno <= tableTupleDesc->natts; table_attno++)
{
Form_pg_attribute table_attr = tableTupleDesc->attrs[table_attno - 1];
if (!table_attr->attisdropped)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
@ -8785,7 +8788,7 @@ ATExecAddOf(Relation rel, const TypeName *ofTypename, LOCKMODE lockmode)
/*
* ALTER TABLE NOT OF
*
* Detach a typed table from its originating type. Just clear reloftype and
* Detach a typed table from its originating type. Just clear reloftype and
* remove the dependency.
*/
static void
@ -8802,8 +8805,8 @@ ATExecDropOf(Relation rel, LOCKMODE lockmode)
RelationGetRelationName(rel))));
/*
* We don't bother to check ownership of the type --- ownership of the table
* is presumed enough rights. No lock required on the type, either.
* We don't bother to check ownership of the type --- ownership of the
* table is presumed enough rights. No lock required on the type, either.
*/
drop_parent_dependency(relid, TypeRelationId, rel->rd_rel->reloftype);

View File

@ -96,6 +96,7 @@ get_ts_parser_func(DefElem *defel, int attnum)
break;
case Anum_pg_ts_parser_prslextype:
nargs = 1;
/*
* Note: because the lextype method returns type internal, it must
* have an internal-type argument for security reasons. The

View File

@ -1069,7 +1069,7 @@ DefineDomain(CreateDomainStmt *stmt)
basetypeMod, /* typeMod value */
typNDims, /* Array dimensions for base type */
typNotNull, /* Type NOT NULL */
domaincoll); /* type's collation */
domaincoll); /* type's collation */
/*
* Process constraints which refer to the domain ID returned by TypeCreate

View File

@ -459,7 +459,7 @@ vacuum_set_xid_limits(int freeze_min_age,
* If we scanned the whole relation then we should just use the count of
* live tuples seen; but if we did not, we should not trust the count
* unreservedly, especially not in VACUUM, which may have scanned a quite
* nonrandom subset of the table. When we have only partial information,
* nonrandom subset of the table. When we have only partial information,
* we take the old value of pg_class.reltuples as a measurement of the
* tuple density in the unscanned pages.
*
@ -471,7 +471,7 @@ vac_estimate_reltuples(Relation relation, bool is_analyze,
BlockNumber scanned_pages,
double scanned_tuples)
{
BlockNumber old_rel_pages = relation->rd_rel->relpages;
BlockNumber old_rel_pages = relation->rd_rel->relpages;
double old_rel_tuples = relation->rd_rel->reltuples;
double old_density;
double new_density;
@ -483,8 +483,8 @@ vac_estimate_reltuples(Relation relation, bool is_analyze,
return scanned_tuples;
/*
* If scanned_pages is zero but total_pages isn't, keep the existing
* value of reltuples.
* If scanned_pages is zero but total_pages isn't, keep the existing value
* of reltuples.
*/
if (scanned_pages == 0)
return old_rel_tuples;
@ -498,23 +498,23 @@ vac_estimate_reltuples(Relation relation, bool is_analyze,
/*
* Okay, we've covered the corner cases. The normal calculation is to
* convert the old measurement to a density (tuples per page), then
* update the density using an exponential-moving-average approach,
* and finally compute reltuples as updated_density * total_pages.
* convert the old measurement to a density (tuples per page), then update
* the density using an exponential-moving-average approach, and finally
* compute reltuples as updated_density * total_pages.
*
* For ANALYZE, the moving average multiplier is just the fraction of
* the table's pages we scanned. This is equivalent to assuming
* that the tuple density in the unscanned pages didn't change. Of
* course, it probably did, if the new density measurement is different.
* But over repeated cycles, the value of reltuples will converge towards
* the correct value, if repeated measurements show the same new density.
* For ANALYZE, the moving average multiplier is just the fraction of the
* table's pages we scanned. This is equivalent to assuming that the
* tuple density in the unscanned pages didn't change. Of course, it
* probably did, if the new density measurement is different. But over
* repeated cycles, the value of reltuples will converge towards the
* correct value, if repeated measurements show the same new density.
*
* For VACUUM, the situation is a bit different: we have looked at a
* nonrandom sample of pages, but we know for certain that the pages we
* didn't look at are precisely the ones that haven't changed lately.
* Thus, there is a reasonable argument for doing exactly the same thing
* as for the ANALYZE case, that is use the old density measurement as
* the value for the unscanned pages.
* as for the ANALYZE case, that is use the old density measurement as the
* value for the unscanned pages.
*
* This logic could probably use further refinement.
*/

View File

@ -86,7 +86,7 @@ typedef struct LVRelStats
/* Overall statistics about rel */
BlockNumber rel_pages; /* total number of pages */
BlockNumber scanned_pages; /* number of pages we examined */
double scanned_tuples; /* counts only tuples on scanned pages */
double scanned_tuples; /* counts only tuples on scanned pages */
double old_rel_tuples; /* previous value of pg_class.reltuples */
double new_rel_tuples; /* new estimated total # of tuples */
BlockNumber pages_removed;
@ -211,7 +211,7 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt,
vac_update_relstats(onerel,
vacrelstats->rel_pages, vacrelstats->new_rel_tuples,
vacrelstats->hasindex,
(vacrelstats->scanned_pages < vacrelstats->rel_pages) ?
(vacrelstats->scanned_pages < vacrelstats->rel_pages) ?
InvalidTransactionId :
FreezeLimit);
@ -341,9 +341,9 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
* of pages.
*
* Before entering the main loop, establish the invariant that
* next_not_all_visible_block is the next block number >= blkno that's
* not all-visible according to the visibility map, or nblocks if there's
* no such block. Also, we set up the skipping_all_visible_blocks flag,
* next_not_all_visible_block is the next block number >= blkno that's not
* all-visible according to the visibility map, or nblocks if there's no
* such block. Also, we set up the skipping_all_visible_blocks flag,
* which is needed because we need hysteresis in the decision: once we've
* started skipping blocks, we may as well skip everything up to the next
* not-all-visible block.
@ -804,7 +804,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
/* now we can compute the new value for pg_class.reltuples */
vacrelstats->new_rel_tuples = vac_estimate_reltuples(onerel, false,
nblocks,
vacrelstats->scanned_pages,
vacrelstats->scanned_pages,
num_tuples);
/* If any tuples need to be deleted, perform final vacuum cycle */
@ -1082,11 +1082,11 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
if (new_rel_pages != old_rel_pages)
{
/*
* Note: we intentionally don't update vacrelstats->rel_pages with
* the new rel size here. If we did, it would amount to assuming that
* the new pages are empty, which is unlikely. Leaving the numbers
* alone amounts to assuming that the new pages have the same tuple
* density as existing ones, which is less unlikely.
* Note: we intentionally don't update vacrelstats->rel_pages with the
* new rel size here. If we did, it would amount to assuming that the
* new pages are empty, which is unlikely. Leaving the numbers alone
* amounts to assuming that the new pages have the same tuple density
* as existing ones, which is less unlikely.
*/
UnlockRelation(onerel, AccessExclusiveLock);
return;

View File

@ -807,9 +807,9 @@ check_client_encoding(char **newval, void **extra, GucSource source)
*
* XXX Although canonicalizing seems like a good idea in the abstract, it
* breaks pre-9.1 JDBC drivers, which expect that if they send "UNICODE"
* as the client_encoding setting then it will read back the same way.
* As a workaround, don't replace the string if it's "UNICODE". Remove
* that hack when pre-9.1 JDBC drivers are no longer in use.
* as the client_encoding setting then it will read back the same way. As
* a workaround, don't replace the string if it's "UNICODE". Remove that
* hack when pre-9.1 JDBC drivers are no longer in use.
*/
if (strcmp(*newval, canonical_name) != 0 &&
strcmp(*newval, "UNICODE") != 0)

View File

@ -265,8 +265,8 @@ ExecHashJoin(HashJoinState *node)
/*
* We check for interrupts here because this corresponds to
* where we'd fetch a row from a child plan node in other
* join types.
* where we'd fetch a row from a child plan node in other join
* types.
*/
CHECK_FOR_INTERRUPTS();

View File

@ -929,7 +929,7 @@ pg_GSS_error(int severity, char *errmsg, OM_uint32 maj_stat, OM_uint32 min_stat)
/* Fetch major status message */
msg_ctx = 0;
gss_display_status(&lmin_s, maj_stat, GSS_C_GSS_CODE,
GSS_C_NO_OID, &msg_ctx, &gmsg);
GSS_C_NO_OID, &msg_ctx, &gmsg);
strlcpy(msg_major, gmsg.value, sizeof(msg_major));
gss_release_buffer(&lmin_s, &gmsg);
@ -945,7 +945,7 @@ pg_GSS_error(int severity, char *errmsg, OM_uint32 maj_stat, OM_uint32 min_stat)
/* Fetch mechanism minor status message */
msg_ctx = 0;
gss_display_status(&lmin_s, min_stat, GSS_C_MECH_CODE,
GSS_C_NO_OID, &msg_ctx, &gmsg);
GSS_C_NO_OID, &msg_ctx, &gmsg);
strlcpy(msg_minor, gmsg.value, sizeof(msg_minor));
gss_release_buffer(&lmin_s, &gmsg);
@ -1761,7 +1761,7 @@ auth_peer(hbaPort *port)
if (errno == ENOSYS)
ereport(LOG,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("peer authentication is not supported on this platform")));
errmsg("peer authentication is not supported on this platform")));
else
ereport(LOG,
(errcode_for_socket_access(),

View File

@ -494,8 +494,8 @@ check_role(const char *role, Oid roleid, char *param_str)
return true;
}
else if (strcmp(tok, role) == 0 ||
(strcmp(tok, "replication\n") == 0 &&
strcmp(role,"replication") ==0) ||
(strcmp(tok, "replication\n") == 0 &&
strcmp(role, "replication") == 0) ||
strcmp(tok, "all\n") == 0)
return true;
}

View File

@ -392,7 +392,7 @@ get_current_username(const char *progname)
/* Allocate new memory because later getpwuid() calls can overwrite it. */
return strdup(pw->pw_name);
#else
unsigned long namesize = 256 /* UNLEN */ + 1;
unsigned long namesize = 256 /* UNLEN */ + 1;
char *name;
name = malloc(namesize);

View File

@ -74,6 +74,7 @@ geqo(PlannerInfo *root, int number_of_rels, List *initial_rels)
Pool *pool;
int pool_size,
number_generations;
#ifdef GEQO_DEBUG
int status_interval;
#endif

View File

@ -2687,7 +2687,7 @@ cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
* evaluation of AND/OR? Probably *not*, because that would make the
* results depend on the clause ordering, and we are not in any position
* to expect that the current ordering of the clauses is the one that's
* going to end up being used. The above per-RestrictInfo caching would
* going to end up being used. The above per-RestrictInfo caching would
* not mix well with trying to re-order clauses anyway.
*/
if (IsA(node, FuncExpr))

View File

@ -953,7 +953,7 @@ is_dummy_rel(RelOptInfo *rel)
* dummy.
*
* Also, when called during GEQO join planning, we are in a short-lived
* memory context. We must make sure that the dummy path attached to a
* memory context. We must make sure that the dummy path attached to a
* baserel survives the GEQO cycle, else the baserel is trashed for future
* GEQO cycles. On the other hand, when we are marking a joinrel during GEQO,
* we don't want the dummy path to clutter the main planning context. Upshot

View File

@ -3383,9 +3383,9 @@ add_sort_column(AttrNumber colIdx, Oid sortOp, Oid coll, bool nulls_first,
* opposite nulls direction is redundant.
*
* We could probably consider sort keys with the same sortop and
* different collations to be redundant too, but for the moment
* treat them as not redundant. This will be needed if we ever
* support collations with different notions of equality.
* different collations to be redundant too, but for the moment treat
* them as not redundant. This will be needed if we ever support
* collations with different notions of equality.
*/
if (sortColIdx[i] == colIdx &&
sortOperators[numCols] == sortOp &&
@ -3419,7 +3419,7 @@ add_sort_column(AttrNumber colIdx, Oid sortOp, Oid coll, bool nulls_first,
*
* We must convert the pathkey information into arrays of sort key column
* numbers, sort operator OIDs, collation OIDs, and nulls-first flags,
* which is the representation the executor wants. These are returned into
* which is the representation the executor wants. These are returned into
* the output parameters *p_numsortkeys etc.
*
* If the pathkeys include expressions that aren't simple Vars, we will

View File

@ -1034,8 +1034,8 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
if (parse->hasAggs)
{
/*
* Collect statistics about aggregates for estimating costs.
* Note: we do not attempt to detect duplicate aggregates here; a
* Collect statistics about aggregates for estimating costs. Note:
* we do not attempt to detect duplicate aggregates here; a
* somewhat-overestimated cost is okay for our present purposes.
*/
count_agg_clauses(root, (Node *) tlist, &agg_costs);

View File

@ -933,10 +933,10 @@ generate_setop_tlist(List *colTypes, List *colCollations,
}
/*
* Ensure the tlist entry's exposed collation matches the set-op.
* This is necessary because plan_set_operations() reports the result
* Ensure the tlist entry's exposed collation matches the set-op. This
* is necessary because plan_set_operations() reports the result
* ordering as a list of SortGroupClauses, which don't carry collation
* themselves but just refer to tlist entries. If we don't show the
* themselves but just refer to tlist entries. If we don't show the
* right collation then planner.c might do the wrong thing in
* higher-level queries.
*

View File

@ -86,7 +86,7 @@ typedef struct
static bool contain_agg_clause_walker(Node *node, void *context);
static bool pull_agg_clause_walker(Node *node, List **context);
static bool count_agg_clauses_walker(Node *node,
count_agg_clauses_context *context);
count_agg_clauses_context *context);
static bool find_window_functions_walker(Node *node, WindowFuncLists *lists);
static bool expression_returns_set_rows_walker(Node *node, double *count);
static bool contain_subplans_walker(Node *node, void *context);
@ -2884,9 +2884,9 @@ eval_const_expressions_mutator(Node *node,
/*
* We can remove null constants from the list. For a non-null
* constant, if it has not been preceded by any other
* non-null-constant expressions then it is the result.
* Otherwise, it's the next argument, but we can drop following
* arguments since they will never be reached.
* non-null-constant expressions then it is the result. Otherwise,
* it's the next argument, but we can drop following arguments
* since they will never be reached.
*/
if (IsA(e, Const))
{

View File

@ -575,7 +575,7 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt)
* We must assign collations now because assign_query_collations
* doesn't process rangetable entries. We just assign all the
* collations independently in each row, and don't worry about
* whether they are consistent vertically. The outer INSERT query
* whether they are consistent vertically. The outer INSERT query
* isn't going to care about the collations of the VALUES columns,
* so it's not worth the effort to identify a common collation for
* each one here. (But note this does have one user-visible
@ -1100,16 +1100,16 @@ transformValuesClause(ParseState *pstate, SelectStmt *stmt)
* doesn't process rangetable entries, and (2) we need to label the VALUES
* RTE with column collations for use in the outer query. We don't
* consider conflict of implicit collations to be an error here; instead
* the column will just show InvalidOid as its collation, and you'll get
* a failure later if that results in failure to resolve a collation.
* the column will just show InvalidOid as its collation, and you'll get a
* failure later if that results in failure to resolve a collation.
*
* Note we modify the per-column expression lists in-place.
*/
collations = NIL;
for (i = 0; i < sublist_length; i++)
{
Oid coltype;
Oid colcoll;
Oid coltype;
Oid colcoll;
coltype = select_common_type(pstate, colexprs[i], "VALUES", NULL);
@ -1210,7 +1210,7 @@ transformValuesClause(ParseState *pstate, SelectStmt *stmt)
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("VALUES must not contain table references"),
parser_errposition(pstate,
locate_var_of_level((Node *) exprsLists, 0))));
locate_var_of_level((Node *) exprsLists, 0))));
/*
* Another thing we can't currently support is NEW/OLD references in rules
@ -1225,7 +1225,7 @@ transformValuesClause(ParseState *pstate, SelectStmt *stmt)
errmsg("VALUES must not contain OLD or NEW references"),
errhint("Use SELECT ... UNION ALL ... instead."),
parser_errposition(pstate,
locate_var_of_level((Node *) exprsLists, 0))));
locate_var_of_level((Node *) exprsLists, 0))));
qry->rtable = pstate->p_rtable;
qry->jointree = makeFromExpr(pstate->p_joinlist, NULL);
@ -1237,13 +1237,13 @@ transformValuesClause(ParseState *pstate, SelectStmt *stmt)
(errcode(ERRCODE_GROUPING_ERROR),
errmsg("cannot use aggregate function in VALUES"),
parser_errposition(pstate,
locate_agg_of_level((Node *) exprsLists, 0))));
locate_agg_of_level((Node *) exprsLists, 0))));
if (pstate->p_hasWindowFuncs)
ereport(ERROR,
(errcode(ERRCODE_WINDOWING_ERROR),
errmsg("cannot use window function in VALUES"),
parser_errposition(pstate,
locate_windowfunc((Node *) exprsLists))));
locate_windowfunc((Node *) exprsLists))));
assign_query_collations(pstate, qry);

View File

@ -167,7 +167,7 @@ coerce_type(ParseState *pstate, Node *node,
*
* These cases are unlike the ones above because the exposed type of
* the argument must be an actual array or enum type. In particular
* the argument must *not* be an UNKNOWN constant. If it is, we just
* the argument must *not* be an UNKNOWN constant. If it is, we just
* fall through; below, we'll call anyarray_in or anyenum_in, which
* will produce an error. Also, if what we have is a domain over
* array or enum, we have to relabel it to its base type.
@ -1290,7 +1290,7 @@ coerce_to_common_type(ParseState *pstate, Node *node,
*
* Domains over arrays match ANYARRAY, and are immediately flattened to their
* base type. (Thus, for example, we will consider it a match if one ANYARRAY
* argument is a domain over int4[] while another one is just int4[].) Also
* argument is a domain over int4[] while another one is just int4[].) Also
* notice that such a domain does *not* match ANYNONARRAY.
*
* If we have UNKNOWN input (ie, an untyped literal) for any polymorphic
@ -1444,7 +1444,7 @@ check_generic_type_consistency(Oid *actual_arg_types,
* is an extra restriction if not.)
*
* Domains over arrays match ANYARRAY arguments, and are immediately flattened
* to their base type. (In particular, if the return type is also ANYARRAY,
* to their base type. (In particular, if the return type is also ANYARRAY,
* we'll set it to the base type not the domain type.)
*
* When allow_poly is false, we are not expecting any of the actual_arg_types

View File

@ -157,7 +157,7 @@ transformCreateStmt(CreateStmt *stmt, const char *queryString)
stmt = (CreateStmt *) copyObject(stmt);
/*
* Look up the creation namespace. This also checks permissions on the
* Look up the creation namespace. This also checks permissions on the
* target namespace, so that we throw any permissions error as early as
* possible.
*/
@ -169,7 +169,7 @@ transformCreateStmt(CreateStmt *stmt, const char *queryString)
*/
if (stmt->if_not_exists)
{
Oid existing_relid;
Oid existing_relid;
existing_relid = get_relname_relid(stmt->relation->relname,
namespaceid);
@ -178,7 +178,7 @@ transformCreateStmt(CreateStmt *stmt, const char *queryString)
ereport(NOTICE,
(errcode(ERRCODE_DUPLICATE_TABLE),
errmsg("relation \"%s\" already exists, skipping",
stmt->relation->relname)));
stmt->relation->relname)));
return NIL;
}
}
@ -2544,8 +2544,8 @@ transformColumnType(CreateStmtContext *cxt, ColumnDef *column)
Form_pg_type typtup = (Form_pg_type) GETSTRUCT(ctype);
LookupCollation(cxt->pstate,
column->collClause->collname,
column->collClause->location);
column->collClause->collname,
column->collClause->location);
/* Complain if COLLATE is applied to an uncollatable type */
if (!OidIsValid(typtup->typcollation))
ereport(ERROR,

View File

@ -121,7 +121,7 @@ InternalIpcSemaphoreCreate(IpcSemaphoreKey semKey, int numSems)
"semaphore sets (SEMMNI), or the system wide maximum number of "
"semaphores (SEMMNS), would be exceeded. You need to raise the "
"respective kernel parameter. Alternatively, reduce PostgreSQL's "
"consumption of semaphores by reducing its max_connections parameter.\n"
"consumption of semaphores by reducing its max_connections parameter.\n"
"The PostgreSQL documentation contains more information about "
"configuring your system for PostgreSQL.") : 0));
}

View File

@ -135,7 +135,7 @@ crashDumpHandler(struct _EXCEPTION_POINTERS * pExceptionInfo)
systemTicks = GetTickCount();
snprintf(dumpPath, _MAX_PATH,
"crashdumps\\postgres-pid%0i-%0i.mdmp",
"crashdumps\\postgres-pid%0i-%0i.mdmp",
(int) selfPid, (int) systemTicks);
dumpPath[_MAX_PATH - 1] = '\0';

View File

@ -373,7 +373,7 @@ pgwin32_recv(SOCKET s, char *buf, int len, int f)
* The second argument to send() is defined by SUS to be a "const void *"
* and so we use the same signature here to keep compilers happy when
* handling callers.
*
*
* But the buf member of a WSABUF struct is defined as "char *", so we cast
* the second argument to that here when assigning it, also to keep compilers
* happy.

View File

@ -94,7 +94,7 @@ WaitLatchOrSocket(volatile Latch *latch, SOCKET sock, bool forRead,
DWORD rc;
HANDLE events[3];
HANDLE latchevent;
HANDLE sockevent = WSA_INVALID_EVENT; /* silence compiler */
HANDLE sockevent = WSA_INVALID_EVENT; /* silence compiler */
int numevents;
int result = 0;

View File

@ -1486,10 +1486,10 @@ ServerLoop(void)
WalWriterPID = StartWalWriter();
/*
* If we have lost the autovacuum launcher, try to start a new one.
* We don't want autovacuum to run in binary upgrade mode because
* autovacuum might update relfrozenxid for empty tables before
* the physical files are put in place.
* If we have lost the autovacuum launcher, try to start a new one. We
* don't want autovacuum to run in binary upgrade mode because
* autovacuum might update relfrozenxid for empty tables before the
* physical files are put in place.
*/
if (!IsBinaryUpgrade && AutoVacPID == 0 &&
(AutoVacuumingActive() || start_autovac_launcher) &&

View File

@ -23,7 +23,7 @@
* several implementation strategies depending on the situation:
*
* 1. In C/POSIX collations, we use hard-wired code. We can't depend on
* the <ctype.h> functions since those will obey LC_CTYPE. Note that these
* the <ctype.h> functions since those will obey LC_CTYPE. Note that these
* collations don't give a fig about multibyte characters.
*
* 2. In the "default" collation (which is supposed to obey LC_CTYPE):
@ -35,10 +35,10 @@
*
* 2b. In all other encodings, or on machines that lack <wctype.h>, we use
* the <ctype.h> functions for pg_wchar values up to 255, and punt for values
* above that. This is only 100% correct in single-byte encodings such as
* LATINn. However, non-Unicode multibyte encodings are mostly Far Eastern
* above that. This is only 100% correct in single-byte encodings such as
* LATINn. However, non-Unicode multibyte encodings are mostly Far Eastern
* character sets for which the properties being tested here aren't very
* relevant for higher code values anyway. The difficulty with using the
* relevant for higher code values anyway. The difficulty with using the
* <wctype.h> functions with non-Unicode multibyte encodings is that we can
* have no certainty that the platform's wchar_t representation matches
* what we do in pg_wchar conversions.
@ -87,134 +87,134 @@ static pg_locale_t pg_regex_locale;
#define PG_ISSPACE 0x80
static const unsigned char pg_char_properties[128] = {
/* NUL */ 0,
/* ^A */ 0,
/* ^B */ 0,
/* ^C */ 0,
/* ^D */ 0,
/* ^E */ 0,
/* ^F */ 0,
/* ^G */ 0,
/* ^H */ 0,
/* ^I */ PG_ISSPACE,
/* ^J */ PG_ISSPACE,
/* ^K */ PG_ISSPACE,
/* ^L */ PG_ISSPACE,
/* ^M */ PG_ISSPACE,
/* ^N */ 0,
/* ^O */ 0,
/* ^P */ 0,
/* ^Q */ 0,
/* ^R */ 0,
/* ^S */ 0,
/* ^T */ 0,
/* ^U */ 0,
/* ^V */ 0,
/* ^W */ 0,
/* ^X */ 0,
/* ^Y */ 0,
/* ^Z */ 0,
/* ^[ */ 0,
/* ^\ */ 0,
/* ^] */ 0,
/* ^^ */ 0,
/* ^_ */ 0,
/* */ PG_ISPRINT | PG_ISSPACE,
/* ! */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* " */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* # */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* $ */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* % */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* & */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* ' */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* ( */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* ) */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* * */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* + */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* , */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* - */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* . */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* / */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* 0 */ PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
/* 1 */ PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
/* 2 */ PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
/* 3 */ PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
/* 4 */ PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
/* 5 */ PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
/* 6 */ PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
/* 7 */ PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
/* 8 */ PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
/* 9 */ PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
/* : */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* ; */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* < */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* = */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* > */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* ? */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* @ */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* A */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
/* B */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
/* C */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
/* D */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
/* E */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
/* F */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
/* G */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
/* H */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
/* I */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
/* J */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
/* K */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
/* L */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
/* M */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
/* N */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
/* O */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
/* P */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
/* Q */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
/* R */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
/* S */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
/* T */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
/* U */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
/* V */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
/* W */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
/* X */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
/* Y */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
/* Z */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
/* [ */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* \ */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* ] */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* ^ */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* _ */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* ` */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* a */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
/* b */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
/* c */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
/* d */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
/* e */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
/* f */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
/* g */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
/* h */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
/* i */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
/* j */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
/* k */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
/* l */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
/* m */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
/* n */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
/* o */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
/* p */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
/* q */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
/* r */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
/* s */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
/* t */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
/* u */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
/* v */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
/* w */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
/* x */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
/* y */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
/* z */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
/* { */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* | */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* } */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* ~ */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* DEL */ 0
/* NUL */ 0,
/* ^A */ 0,
/* ^B */ 0,
/* ^C */ 0,
/* ^D */ 0,
/* ^E */ 0,
/* ^F */ 0,
/* ^G */ 0,
/* ^H */ 0,
/* ^I */ PG_ISSPACE,
/* ^J */ PG_ISSPACE,
/* ^K */ PG_ISSPACE,
/* ^L */ PG_ISSPACE,
/* ^M */ PG_ISSPACE,
/* ^N */ 0,
/* ^O */ 0,
/* ^P */ 0,
/* ^Q */ 0,
/* ^R */ 0,
/* ^S */ 0,
/* ^T */ 0,
/* ^U */ 0,
/* ^V */ 0,
/* ^W */ 0,
/* ^X */ 0,
/* ^Y */ 0,
/* ^Z */ 0,
/* ^[ */ 0,
/* ^\ */ 0,
/* ^] */ 0,
/* ^^ */ 0,
/* ^_ */ 0,
/* */ PG_ISPRINT | PG_ISSPACE,
/* ! */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* " */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* # */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* $ */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* % */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* & */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* ' */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* ( */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* ) */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* * */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* + */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* , */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* - */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* . */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* / */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* 0 */ PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
/* 1 */ PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
/* 2 */ PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
/* 3 */ PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
/* 4 */ PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
/* 5 */ PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
/* 6 */ PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
/* 7 */ PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
/* 8 */ PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
/* 9 */ PG_ISDIGIT | PG_ISGRAPH | PG_ISPRINT,
/* : */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* ; */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* < */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* = */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* > */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* ? */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* @ */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* A */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
/* B */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
/* C */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
/* D */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
/* E */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
/* F */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
/* G */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
/* H */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
/* I */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
/* J */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
/* K */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
/* L */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
/* M */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
/* N */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
/* O */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
/* P */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
/* Q */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
/* R */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
/* S */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
/* T */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
/* U */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
/* V */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
/* W */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
/* X */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
/* Y */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
/* Z */ PG_ISALPHA | PG_ISUPPER | PG_ISGRAPH | PG_ISPRINT,
/* [ */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* \ */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* ] */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* ^ */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* _ */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* ` */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* a */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
/* b */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
/* c */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
/* d */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
/* e */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
/* f */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
/* g */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
/* h */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
/* i */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
/* j */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
/* k */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
/* l */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
/* m */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
/* n */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
/* o */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
/* p */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
/* q */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
/* r */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
/* s */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
/* t */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
/* u */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
/* v */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
/* w */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
/* x */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
/* y */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
/* z */ PG_ISALPHA | PG_ISLOWER | PG_ISGRAPH | PG_ISPRINT,
/* { */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* | */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* } */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* ~ */ PG_ISGRAPH | PG_ISPRINT | PG_ISPUNCT,
/* DEL */ 0
};
@ -242,8 +242,8 @@ pg_set_regex_collation(Oid collation)
{
/*
* NB: pg_newlocale_from_collation will fail if not HAVE_LOCALE_T;
* the case of pg_regex_locale != 0 but not HAVE_LOCALE_T does
* not have to be considered below.
* the case of pg_regex_locale != 0 but not HAVE_LOCALE_T does not
* have to be considered below.
*/
pg_regex_locale = pg_newlocale_from_collation(collation);
}

View File

@ -236,8 +236,8 @@ SyncRepWaitForLSN(XLogRecPtr XactCommitLSN)
/*
* If the postmaster dies, we'll probably never get an
* acknowledgement, because all the wal sender processes will exit.
* So just bail out.
* acknowledgement, because all the wal sender processes will exit. So
* just bail out.
*/
if (!PostmasterIsAlive(true))
{

View File

@ -488,13 +488,13 @@ DefineQueryRewrite(char *rulename,
if (action != NIL || is_instead)
{
InsertRule(rulename,
event_type,
event_relid,
event_attno,
is_instead,
event_qual,
action,
replace);
event_type,
event_relid,
event_attno,
is_instead,
event_qual,
action,
replace);
/*
* Set pg_class 'relhasrules' field TRUE for event relation. If

View File

@ -455,17 +455,17 @@ rewriteRuleAction(Query *parsetree,
}
/*
* If the original query has any CTEs, copy them into the rule action.
* But we don't need them for a utility action.
* If the original query has any CTEs, copy them into the rule action. But
* we don't need them for a utility action.
*/
if (parsetree->cteList != NIL && sub_action->commandType != CMD_UTILITY)
{
ListCell *lc;
/*
* Annoying implementation restriction: because CTEs are identified
* by name within a cteList, we can't merge a CTE from the original
* query if it has the same name as any CTE in the rule action.
* Annoying implementation restriction: because CTEs are identified by
* name within a cteList, we can't merge a CTE from the original query
* if it has the same name as any CTE in the rule action.
*
* This could possibly be fixed by using some sort of internally
* generated ID, instead of names, to link CTE RTEs to their CTEs.
@ -2116,15 +2116,15 @@ RewriteQuery(Query *parsetree, List *rewrite_events)
/*
* If the original query has a CTE list, and we generated more than one
* non-utility result query, we have to fail because we'll have copied
* the CTE list into each result query. That would break the expectation
* of single evaluation of CTEs. This could possibly be fixed by
* non-utility result query, we have to fail because we'll have copied the
* CTE list into each result query. That would break the expectation of
* single evaluation of CTEs. This could possibly be fixed by
* restructuring so that a CTE list can be shared across multiple Query
* and PlannableStatement nodes.
*/
if (parsetree->cteList != NIL)
{
int qcount = 0;
int qcount = 0;
foreach(lc1, rewritten)
{

View File

@ -29,7 +29,7 @@ t_isdigit(const char *ptr)
int clen = pg_mblen(ptr);
wchar_t character[2];
Oid collation = DEFAULT_COLLATION_OID; /* TODO */
pg_locale_t mylocale = 0; /* TODO */
pg_locale_t mylocale = 0; /* TODO */
if (clen == 1 || lc_ctype_is_c(collation))
return isdigit(TOUCHAR(ptr));
@ -45,7 +45,7 @@ t_isspace(const char *ptr)
int clen = pg_mblen(ptr);
wchar_t character[2];
Oid collation = DEFAULT_COLLATION_OID; /* TODO */
pg_locale_t mylocale = 0; /* TODO */
pg_locale_t mylocale = 0; /* TODO */
if (clen == 1 || lc_ctype_is_c(collation))
return isspace(TOUCHAR(ptr));
@ -61,7 +61,7 @@ t_isalpha(const char *ptr)
int clen = pg_mblen(ptr);
wchar_t character[2];
Oid collation = DEFAULT_COLLATION_OID; /* TODO */
pg_locale_t mylocale = 0; /* TODO */
pg_locale_t mylocale = 0; /* TODO */
if (clen == 1 || lc_ctype_is_c(collation))
return isalpha(TOUCHAR(ptr));
@ -77,7 +77,7 @@ t_isprint(const char *ptr)
int clen = pg_mblen(ptr);
wchar_t character[2];
Oid collation = DEFAULT_COLLATION_OID; /* TODO */
pg_locale_t mylocale = 0; /* TODO */
pg_locale_t mylocale = 0; /* TODO */
if (clen == 1 || lc_ctype_is_c(collation))
return isprint(TOUCHAR(ptr));
@ -250,7 +250,7 @@ lowerstr_with_len(const char *str, int len)
#ifdef USE_WIDE_UPPER_LOWER
Oid collation = DEFAULT_COLLATION_OID; /* TODO */
pg_locale_t mylocale = 0; /* TODO */
pg_locale_t mylocale = 0; /* TODO */
#endif
if (len == 0)

View File

@ -300,7 +300,7 @@ TParserInit(char *str, int len)
if (prs->charmaxlen > 1)
{
Oid collation = DEFAULT_COLLATION_OID; /* TODO */
pg_locale_t mylocale = 0; /* TODO */
pg_locale_t mylocale = 0; /* TODO */
prs->usewide = true;
if (lc_ctype_is_c(collation))

View File

@ -4049,10 +4049,11 @@ EncodeInterval(struct pg_tm * tm, fsec_t fsec, int style, char *str)
/* Compatible with postgresql < 8.4 when DateStyle = 'iso' */
case INTSTYLE_POSTGRES:
cp = AddPostgresIntPart(cp, year, "year", &is_zero, &is_before);
/*
* Ideally we should spell out "month" like we do for "year"
* and "day". However, for backward compatibility, we can't
* easily fix this. bjm 2011-05-24
* Ideally we should spell out "month" like we do for "year" and
* "day". However, for backward compatibility, we can't easily
* fix this. bjm 2011-05-24
*/
cp = AddPostgresIntPart(cp, mon, "mon", &is_zero, &is_before);
cp = AddPostgresIntPart(cp, mday, "day", &is_zero, &is_before);

View File

@ -564,9 +564,9 @@ strftime_win32(char *dst, size_t dstlen, const wchar_t *format, const struct tm
dst[len] = '\0';
if (encoding != PG_UTF8)
{
char *convstr =
(char *) pg_do_encoding_conversion((unsigned char *) dst,
len, PG_UTF8, encoding);
char *convstr =
(char *) pg_do_encoding_conversion((unsigned char *) dst,
len, PG_UTF8, encoding);
if (dst != convstr)
{
@ -1099,19 +1099,19 @@ wchar2char(char *to, const wchar_t *from, size_t tolen, pg_locale_t locale)
#ifdef HAVE_WCSTOMBS_L
/* Use wcstombs_l for nondefault locales */
result = wcstombs_l(to, from, tolen, locale);
#else /* !HAVE_WCSTOMBS_L */
#else /* !HAVE_WCSTOMBS_L */
/* We have to temporarily set the locale as current ... ugh */
locale_t save_locale = uselocale(locale);
result = wcstombs(to, from, tolen);
uselocale(save_locale);
#endif /* HAVE_WCSTOMBS_L */
#else /* !HAVE_LOCALE_T */
#endif /* HAVE_WCSTOMBS_L */
#else /* !HAVE_LOCALE_T */
/* Can't have locale != 0 without HAVE_LOCALE_T */
elog(ERROR, "wcstombs_l is not available");
result = 0; /* keep compiler quiet */
#endif /* HAVE_LOCALE_T */
#endif /* HAVE_LOCALE_T */
}
return result;
@ -1174,19 +1174,19 @@ char2wchar(wchar_t *to, size_t tolen, const char *from, size_t fromlen,
#ifdef HAVE_WCSTOMBS_L
/* Use mbstowcs_l for nondefault locales */
result = mbstowcs_l(to, str, tolen, locale);
#else /* !HAVE_WCSTOMBS_L */
#else /* !HAVE_WCSTOMBS_L */
/* We have to temporarily set the locale as current ... ugh */
locale_t save_locale = uselocale(locale);
result = mbstowcs(to, str, tolen);
uselocale(save_locale);
#endif /* HAVE_WCSTOMBS_L */
#else /* !HAVE_LOCALE_T */
#endif /* HAVE_WCSTOMBS_L */
#else /* !HAVE_LOCALE_T */
/* Can't have locale != 0 without HAVE_LOCALE_T */
elog(ERROR, "mbstowcs_l is not available");
result = 0; /* keep compiler quiet */
#endif /* HAVE_LOCALE_T */
result = 0; /* keep compiler quiet */
#endif /* HAVE_LOCALE_T */
}
pfree(str);
@ -1213,4 +1213,4 @@ char2wchar(wchar_t *to, size_t tolen, const char *from, size_t fromlen,
return result;
}
#endif /* USE_WIDE_UPPER_LOWER */
#endif /* USE_WIDE_UPPER_LOWER */

View File

@ -83,7 +83,7 @@
#define RIAttName(rel, attnum) NameStr(*attnumAttName(rel, attnum))
#define RIAttType(rel, attnum) attnumTypeId(rel, attnum)
#define RIAttCollation(rel, attnum) attnumCollationId(rel, attnum)
#define RIAttCollation(rel, attnum) attnumCollationId(rel, attnum)
#define RI_TRIGTYPE_INSERT 1
#define RI_TRIGTYPE_UPDATE 2
@ -3024,8 +3024,8 @@ ri_GenerateQualCollation(StringInfo buf, Oid collation)
collname = NameStr(colltup->collname);
/*
* We qualify the name always, for simplicity and to ensure the query
* is not search-path-dependent.
* We qualify the name always, for simplicity and to ensure the query is
* not search-path-dependent.
*/
quoteOneName(onename, get_namespace_name(colltup->collnamespace));
appendStringInfo(buf, " COLLATE %s", onename);
@ -3964,8 +3964,8 @@ ri_AttributesEqual(Oid eq_opr, Oid typeid,
}
/*
* Apply the comparison operator. We assume it doesn't
* care about collations.
* Apply the comparison operator. We assume it doesn't care about
* collations.
*/
return DatumGetBool(FunctionCall2(&entry->eq_opr_finfo,
oldvalue, newvalue));

View File

@ -5193,8 +5193,8 @@ get_rule_expr(Node *node, deparse_context *context,
if (caseexpr->arg)
{
/*
* The parser should have produced WHEN clauses of
* the form "CaseTestExpr = RHS", possibly with an
* The parser should have produced WHEN clauses of the
* form "CaseTestExpr = RHS", possibly with an
* implicit coercion inserted above the CaseTestExpr.
* For accurate decompilation of rules it's essential
* that we show just the RHS. However in an

View File

@ -291,12 +291,12 @@ var_eq_const(VariableStatData *vardata, Oid operator,
/* be careful to apply operator right way 'round */
if (varonleft)
match = DatumGetBool(FunctionCall2Coll(&eqproc,
DEFAULT_COLLATION_OID,
DEFAULT_COLLATION_OID,
values[i],
constval));
else
match = DatumGetBool(FunctionCall2Coll(&eqproc,
DEFAULT_COLLATION_OID,
DEFAULT_COLLATION_OID,
constval,
values[i]));
if (match)
@ -1185,7 +1185,7 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate)
}
/*
* Divide pattern into fixed prefix and remainder. XXX we have to assume
* Divide pattern into fixed prefix and remainder. XXX we have to assume
* default collation here, because we don't have access to the actual
* input collation for the operator. FIXME ...
*/
@ -2403,9 +2403,9 @@ eqjoinsel_semi(Oid operator,
* before doing the division.
*
* Crude as the above is, it's completely useless if we don't have
* reliable ndistinct values for both sides. Hence, if either nd1
* or nd2 is default, punt and assume half of the uncertain rows
* have join partners.
* reliable ndistinct values for both sides. Hence, if either nd1 or
* nd2 is default, punt and assume half of the uncertain rows have
* join partners.
*/
if (nd1 != DEFAULT_NUM_DISTINCT && nd2 != DEFAULT_NUM_DISTINCT)
{
@ -4779,7 +4779,7 @@ get_actual_variable_range(PlannerInfo *root, VariableStatData *vardata,
* Check whether char is a letter (and, hence, subject to case-folding)
*
* In multibyte character sets, we can't use isalpha, and it does not seem
* worth trying to convert to wchar_t to use iswalpha. Instead, just assume
* worth trying to convert to wchar_t to use iswalpha. Instead, just assume
* any multibyte char is potentially case-varying.
*/
static int
@ -4823,7 +4823,7 @@ like_fixed_prefix(Const *patt_const, bool case_insensitive, Oid collation,
int pos,
match_pos;
bool is_multibyte = (pg_database_encoding_max_length() > 1);
pg_locale_t locale = 0;
pg_locale_t locale = 0;
bool locale_is_c = false;
/* the right-hand const is type text or bytea */
@ -4834,7 +4834,7 @@ like_fixed_prefix(Const *patt_const, bool case_insensitive, Oid collation,
if (typeid == BYTEAOID)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("case insensitive matching not supported on type bytea")));
errmsg("case insensitive matching not supported on type bytea")));
/* If case-insensitive, we need locale info */
if (lc_ctype_is_c(collation))
@ -4891,7 +4891,7 @@ like_fixed_prefix(Const *patt_const, bool case_insensitive, Oid collation,
/* Stop if case-varying character (it's sort of a wildcard) */
if (case_insensitive &&
pattern_char_isalpha(patt[pos], is_multibyte, locale, locale_is_c))
pattern_char_isalpha(patt[pos], is_multibyte, locale, locale_is_c))
break;
match[match_pos++] = patt[pos];
@ -4938,7 +4938,7 @@ regex_fixed_prefix(Const *patt_const, bool case_insensitive, Oid collation,
char *rest;
Oid typeid = patt_const->consttype;
bool is_multibyte = (pg_database_encoding_max_length() > 1);
pg_locale_t locale = 0;
pg_locale_t locale = 0;
bool locale_is_c = false;
/*
@ -5050,7 +5050,7 @@ regex_fixed_prefix(Const *patt_const, bool case_insensitive, Oid collation,
/* Stop if case-varying character (it's sort of a wildcard) */
if (case_insensitive &&
pattern_char_isalpha(patt[pos], is_multibyte, locale, locale_is_c))
pattern_char_isalpha(patt[pos], is_multibyte, locale, locale_is_c))
break;
/*

View File

@ -3829,7 +3829,7 @@ text_format(PG_FUNCTION_ARGS)
if (*cp < '0' || *cp > '9')
{
++arg;
if (arg <= 0) /* overflow? */
if (arg <= 0) /* overflow? */
{
/*
* Should not happen, as you can't pass billions of arguments
@ -3848,9 +3848,9 @@ text_format(PG_FUNCTION_ARGS)
arg = 0;
do
{
int newarg = arg * 10 + (*cp - '0');
int newarg = arg * 10 + (*cp - '0');
if (newarg / 10 != arg) /* overflow? */
if (newarg / 10 != arg) /* overflow? */
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("argument number is out of range")));

View File

@ -11,7 +11,7 @@
*
* Several seemingly-odd choices have been made to support use of the type
* cache by generic array and record handling routines, such as array_eq(),
* record_cmp(), and hash_array(). Because those routines are used as index
* record_cmp(), and hash_array(). Because those routines are used as index
* support operations, they cannot leak memory. To allow them to execute
* efficiently, all information that they would like to re-use across calls
* is kept in the type cache.
@ -276,7 +276,7 @@ lookup_type_cache(Oid type_id, int flags)
if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
typentry->eq_opr == InvalidOid)
{
Oid eq_opr = InvalidOid;
Oid eq_opr = InvalidOid;
if (typentry->btree_opf != InvalidOid)
eq_opr = get_opfamily_member(typentry->btree_opf,
@ -291,10 +291,10 @@ lookup_type_cache(Oid type_id, int flags)
HTEqualStrategyNumber);
/*
* If the proposed equality operator is array_eq or record_eq,
* check to see if the element type or column types support equality.
* If not, array_eq or record_eq would fail at runtime, so we don't
* want to report that the type has equality.
* If the proposed equality operator is array_eq or record_eq, check
* to see if the element type or column types support equality. If
* not, array_eq or record_eq would fail at runtime, so we don't want
* to report that the type has equality.
*/
if (eq_opr == ARRAY_EQ_OP &&
!array_element_has_equality(typentry))
@ -315,7 +315,7 @@ lookup_type_cache(Oid type_id, int flags)
}
if ((flags & TYPECACHE_LT_OPR) && typentry->lt_opr == InvalidOid)
{
Oid lt_opr = InvalidOid;
Oid lt_opr = InvalidOid;
if (typentry->btree_opf != InvalidOid)
lt_opr = get_opfamily_member(typentry->btree_opf,
@ -335,7 +335,7 @@ lookup_type_cache(Oid type_id, int flags)
}
if ((flags & TYPECACHE_GT_OPR) && typentry->gt_opr == InvalidOid)
{
Oid gt_opr = InvalidOid;
Oid gt_opr = InvalidOid;
if (typentry->btree_opf != InvalidOid)
gt_opr = get_opfamily_member(typentry->btree_opf,
@ -356,7 +356,7 @@ lookup_type_cache(Oid type_id, int flags)
if ((flags & (TYPECACHE_CMP_PROC | TYPECACHE_CMP_PROC_FINFO)) &&
typentry->cmp_proc == InvalidOid)
{
Oid cmp_proc = InvalidOid;
Oid cmp_proc = InvalidOid;
if (typentry->btree_opf != InvalidOid)
cmp_proc = get_opfamily_proc(typentry->btree_opf,
@ -377,7 +377,7 @@ lookup_type_cache(Oid type_id, int flags)
if ((flags & (TYPECACHE_HASH_PROC | TYPECACHE_HASH_PROC_FINFO)) &&
typentry->hash_proc == InvalidOid)
{
Oid hash_proc = InvalidOid;
Oid hash_proc = InvalidOid;
/*
* We insist that the eq_opr, if one has been determined, match the
@ -460,7 +460,7 @@ load_typcache_tupdesc(TypeCacheEntry *typentry)
{
Relation rel;
if (!OidIsValid(typentry->typrelid)) /* should not happen */
if (!OidIsValid(typentry->typrelid)) /* should not happen */
elog(ERROR, "invalid typrelid for composite type %u",
typentry->type_id);
rel = relation_open(typentry->typrelid, AccessShareLock);
@ -468,9 +468,9 @@ load_typcache_tupdesc(TypeCacheEntry *typentry)
/*
* Link to the tupdesc and increment its refcount (we assert it's a
* refcounted descriptor). We don't use IncrTupleDescRefCount() for
* this, because the reference mustn't be entered in the current
* resource owner; it can outlive the current query.
* refcounted descriptor). We don't use IncrTupleDescRefCount() for this,
* because the reference mustn't be entered in the current resource owner;
* it can outlive the current query.
*/
typentry->tupDesc = RelationGetDescr(rel);
@ -520,7 +520,7 @@ array_element_has_hashing(TypeCacheEntry *typentry)
static void
cache_array_element_properties(TypeCacheEntry *typentry)
{
Oid elem_type = get_base_element_type(typentry->type_id);
Oid elem_type = get_base_element_type(typentry->type_id);
if (OidIsValid(elem_type))
{
@ -571,7 +571,7 @@ cache_record_field_properties(TypeCacheEntry *typentry)
{
TupleDesc tupdesc;
int newflags;
int i;
int i;
/* Fetch composite type's tupdesc if we don't have it already */
if (typentry->tupDesc == NULL)

View File

@ -1053,7 +1053,7 @@ DirectFunctionCall2Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2)
Datum
DirectFunctionCall3Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2,
Datum arg3)
Datum arg3)
{
FunctionCallInfoData fcinfo;
Datum result;
@ -1078,7 +1078,7 @@ DirectFunctionCall3Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2,
Datum
DirectFunctionCall4Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2,
Datum arg3, Datum arg4)
Datum arg3, Datum arg4)
{
FunctionCallInfoData fcinfo;
Datum result;
@ -1105,7 +1105,7 @@ DirectFunctionCall4Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2,
Datum
DirectFunctionCall5Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5)
Datum arg3, Datum arg4, Datum arg5)
{
FunctionCallInfoData fcinfo;
Datum result;
@ -1134,8 +1134,8 @@ DirectFunctionCall5Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2,
Datum
DirectFunctionCall6Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5,
Datum arg6)
Datum arg3, Datum arg4, Datum arg5,
Datum arg6)
{
FunctionCallInfoData fcinfo;
Datum result;
@ -1166,8 +1166,8 @@ DirectFunctionCall6Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2,
Datum
DirectFunctionCall7Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5,
Datum arg6, Datum arg7)
Datum arg3, Datum arg4, Datum arg5,
Datum arg6, Datum arg7)
{
FunctionCallInfoData fcinfo;
Datum result;
@ -1200,8 +1200,8 @@ DirectFunctionCall7Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2,
Datum
DirectFunctionCall8Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5,
Datum arg6, Datum arg7, Datum arg8)
Datum arg3, Datum arg4, Datum arg5,
Datum arg6, Datum arg7, Datum arg8)
{
FunctionCallInfoData fcinfo;
Datum result;
@ -1236,9 +1236,9 @@ DirectFunctionCall8Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2,
Datum
DirectFunctionCall9Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5,
Datum arg6, Datum arg7, Datum arg8,
Datum arg9)
Datum arg3, Datum arg4, Datum arg5,
Datum arg6, Datum arg7, Datum arg8,
Datum arg9)
{
FunctionCallInfoData fcinfo;
Datum result;
@ -1327,7 +1327,7 @@ FunctionCall2Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2)
Datum
FunctionCall3Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2,
Datum arg3)
Datum arg3)
{
FunctionCallInfoData fcinfo;
Datum result;
@ -1352,7 +1352,7 @@ FunctionCall3Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2,
Datum
FunctionCall4Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2,
Datum arg3, Datum arg4)
Datum arg3, Datum arg4)
{
FunctionCallInfoData fcinfo;
Datum result;
@ -1379,7 +1379,7 @@ FunctionCall4Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2,
Datum
FunctionCall5Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5)
Datum arg3, Datum arg4, Datum arg5)
{
FunctionCallInfoData fcinfo;
Datum result;
@ -1408,8 +1408,8 @@ FunctionCall5Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2,
Datum
FunctionCall6Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5,
Datum arg6)
Datum arg3, Datum arg4, Datum arg5,
Datum arg6)
{
FunctionCallInfoData fcinfo;
Datum result;
@ -1440,8 +1440,8 @@ FunctionCall6Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2,
Datum
FunctionCall7Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5,
Datum arg6, Datum arg7)
Datum arg3, Datum arg4, Datum arg5,
Datum arg6, Datum arg7)
{
FunctionCallInfoData fcinfo;
Datum result;
@ -1474,8 +1474,8 @@ FunctionCall7Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2,
Datum
FunctionCall8Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5,
Datum arg6, Datum arg7, Datum arg8)
Datum arg3, Datum arg4, Datum arg5,
Datum arg6, Datum arg7, Datum arg8)
{
FunctionCallInfoData fcinfo;
Datum result;
@ -1510,9 +1510,9 @@ FunctionCall8Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2,
Datum
FunctionCall9Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5,
Datum arg6, Datum arg7, Datum arg8,
Datum arg9)
Datum arg3, Datum arg4, Datum arg5,
Datum arg6, Datum arg7, Datum arg8,
Datum arg9)
{
FunctionCallInfoData fcinfo;
Datum result;
@ -1625,7 +1625,7 @@ OidFunctionCall2Coll(Oid functionId, Oid collation, Datum arg1, Datum arg2)
Datum
OidFunctionCall3Coll(Oid functionId, Oid collation, Datum arg1, Datum arg2,
Datum arg3)
Datum arg3)
{
FmgrInfo flinfo;
FunctionCallInfoData fcinfo;
@ -1653,7 +1653,7 @@ OidFunctionCall3Coll(Oid functionId, Oid collation, Datum arg1, Datum arg2,
Datum
OidFunctionCall4Coll(Oid functionId, Oid collation, Datum arg1, Datum arg2,
Datum arg3, Datum arg4)
Datum arg3, Datum arg4)
{
FmgrInfo flinfo;
FunctionCallInfoData fcinfo;
@ -1683,7 +1683,7 @@ OidFunctionCall4Coll(Oid functionId, Oid collation, Datum arg1, Datum arg2,
Datum
OidFunctionCall5Coll(Oid functionId, Oid collation, Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5)
Datum arg3, Datum arg4, Datum arg5)
{
FmgrInfo flinfo;
FunctionCallInfoData fcinfo;
@ -1715,8 +1715,8 @@ OidFunctionCall5Coll(Oid functionId, Oid collation, Datum arg1, Datum arg2,
Datum
OidFunctionCall6Coll(Oid functionId, Oid collation, Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5,
Datum arg6)
Datum arg3, Datum arg4, Datum arg5,
Datum arg6)
{
FmgrInfo flinfo;
FunctionCallInfoData fcinfo;
@ -1750,8 +1750,8 @@ OidFunctionCall6Coll(Oid functionId, Oid collation, Datum arg1, Datum arg2,
Datum
OidFunctionCall7Coll(Oid functionId, Oid collation, Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5,
Datum arg6, Datum arg7)
Datum arg3, Datum arg4, Datum arg5,
Datum arg6, Datum arg7)
{
FmgrInfo flinfo;
FunctionCallInfoData fcinfo;
@ -1787,8 +1787,8 @@ OidFunctionCall7Coll(Oid functionId, Oid collation, Datum arg1, Datum arg2,
Datum
OidFunctionCall8Coll(Oid functionId, Oid collation, Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5,
Datum arg6, Datum arg7, Datum arg8)
Datum arg3, Datum arg4, Datum arg5,
Datum arg6, Datum arg7, Datum arg8)
{
FmgrInfo flinfo;
FunctionCallInfoData fcinfo;
@ -1826,9 +1826,9 @@ OidFunctionCall8Coll(Oid functionId, Oid collation, Datum arg1, Datum arg2,
Datum
OidFunctionCall9Coll(Oid functionId, Oid collation, Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5,
Datum arg6, Datum arg7, Datum arg8,
Datum arg9)
Datum arg3, Datum arg4, Datum arg5,
Datum arg6, Datum arg7, Datum arg8,
Datum arg9)
{
FmgrInfo flinfo;
FunctionCallInfoData fcinfo;

View File

@ -630,9 +630,9 @@ InitPostgres(const char *in_dbname, Oid dboid, const char *username,
*/
if (IsBinaryUpgrade && !am_superuser)
{
ereport(FATAL,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("must be superuser to connect in binary upgrade mode")));
ereport(FATAL,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("must be superuser to connect in binary upgrade mode")));
}
/*

View File

@ -1086,7 +1086,7 @@ setup_config(void)
"@authcomment@",
strcmp(authmethod, "trust") ? "" : AUTHTRUST_WARNING);
/* Replace username for replication */
/* Replace username for replication */
conflines = replace_token(conflines,
"@default_username@",
username);
@ -1663,7 +1663,7 @@ setup_collation(void)
*/
if (normalize_locale_name(alias, localebuf))
PG_CMD_PRINTF3("INSERT INTO tmp_pg_collation VALUES (E'%s', E'%s', %d);\n",
escape_quotes(alias), quoted_locale, enc);
escape_quotes(alias), quoted_locale, enc);
}
/* Add an SQL-standard name */
@ -1688,7 +1688,7 @@ setup_collation(void)
" encoding, locale, locale "
" FROM tmp_pg_collation"
" WHERE NOT EXISTS (SELECT 1 FROM pg_collation WHERE collname = tmp_pg_collation.collname)"
" ORDER BY collname, encoding, (collname = locale) DESC, locale;\n");
" ORDER BY collname, encoding, (collname = locale) DESC, locale;\n");
pclose(locale_a_handle);
PG_CMD_CLOSE;
@ -1702,7 +1702,7 @@ setup_collation(void)
#else /* not HAVE_LOCALE_T && not WIN32 */
printf(_("not supported on this platform\n"));
fflush(stdout);
#endif /* not HAVE_LOCALE_T && not WIN32*/
#endif /* not HAVE_LOCALE_T && not WIN32 */
}
/*
@ -2272,20 +2272,19 @@ check_locale_encoding(const char *locale, int user_enc)
static void
strreplace(char *str, char *needle, char *replacement)
{
char *s;
char *s;
s = strstr(str, needle);
if (s != NULL)
{
int replacementlen = strlen(replacement);
char *rest = s + strlen(needle);
int replacementlen = strlen(replacement);
char *rest = s + strlen(needle);
memcpy(s, replacement, replacementlen);
memmove(s + replacementlen, rest, strlen(rest) + 1);
}
}
#endif /* WIN32 */
#endif /* WIN32 */
/*
* Windows has a problem with locale names that have a dot in the country
@ -2306,6 +2305,7 @@ localemap(char *locale)
locale = xstrdup(locale);
#ifdef WIN32
/*
* Map the full country name to an abbreviation that setlocale() accepts.
*
@ -2321,14 +2321,14 @@ localemap(char *locale)
/*
* The ISO-3166 country code for Macau S.A.R. is MAC, but Windows doesn't
* seem to recognize that. And Macau isn't listed in the table of
* accepted abbreviations linked above.
* seem to recognize that. And Macau isn't listed in the table of accepted
* abbreviations linked above.
*
* Fortunately, "ZHM" seems to be accepted as an alias for
* "Chinese (Traditional)_Macau S.A.R..950", so we use that. Note that
* it's unlike HKG and ARE, ZHM is an alias for the whole locale name,
* not just the country part. I'm not sure where that "ZHM" comes from,
* must be some legacy naming scheme. But hey, it works.
* Fortunately, "ZHM" seems to be accepted as an alias for "Chinese
* (Traditional)_Macau S.A.R..950", so we use that. Note that it's unlike
* HKG and ARE, ZHM is an alias for the whole locale name, not just the
* country part. I'm not sure where that "ZHM" comes from, must be some
* legacy naming scheme. But hey, it works.
*
* Some versions of Windows spell it "Macau", others "Macao".
*/
@ -2336,7 +2336,7 @@ localemap(char *locale)
strreplace(locale, "Chinese_Macau S.A.R..950", "ZHM");
strreplace(locale, "Chinese (Traditional)_Macao S.A.R..950", "ZHM");
strreplace(locale, "Chinese_Macao S.A.R..950", "ZHM");
#endif /* WIN32 */
#endif /* WIN32 */
return locale;
}
@ -3000,13 +3000,13 @@ main(int argc, char *argv[])
else if (!pg_valid_server_encoding_id(ctype_enc))
{
/*
* We recognized it, but it's not a legal server encoding.
* On Windows, UTF-8 works with any locale, so we can fall back
* to UTF-8.
* We recognized it, but it's not a legal server encoding. On
* Windows, UTF-8 works with any locale, so we can fall back to
* UTF-8.
*/
#ifdef WIN32
printf(_("Encoding %s implied by locale is not allowed as a server-side encoding.\n"
"The default database encoding will be set to %s instead.\n"),
"The default database encoding will be set to %s instead.\n"),
pg_encoding_to_char(ctype_enc),
pg_encoding_to_char(PG_UTF8));
ctype_enc = PG_UTF8;

View File

@ -130,7 +130,7 @@ usage(void)
printf(_(" -Z, --compress=0-9 compress tar output with given compression level\n"));
printf(_("\nGeneral options:\n"));
printf(_(" -c, --checkpoint=fast|spread\n"
" set fast or spread checkpointing\n"));
" set fast or spread checkpointing\n"));
printf(_(" -l, --label=LABEL set backup label\n"));
printf(_(" -P, --progress show progress information\n"));
printf(_(" -v, --verbose output verbose messages\n"));
@ -1006,7 +1006,7 @@ main(int argc, char **argv)
#ifdef HAVE_LIBZ
compresslevel = Z_DEFAULT_COMPRESSION;
#else
compresslevel = 1; /* will be rejected below */
compresslevel = 1; /* will be rejected below */
#endif
break;
case 'Z':

View File

@ -370,9 +370,9 @@ start_postmaster(void)
* Since there might be quotes to handle here, it is easier simply to pass
* everything to a shell to process them.
*
* XXX it would be better to fork and exec so that we would know the
* child postmaster's PID directly; then test_postmaster_connection could
* use the PID without having to rely on reading it back from the pidfile.
* XXX it would be better to fork and exec so that we would know the child
* postmaster's PID directly; then test_postmaster_connection could use
* the PID without having to rely on reading it back from the pidfile.
*/
if (log_file != NULL)
snprintf(cmd, MAXPGPATH, SYSTEMQUOTE "\"%s\" %s%s < \"%s\" >> \"%s\" 2>&1 &" SYSTEMQUOTE,
@ -479,7 +479,7 @@ test_postmaster_connection(bool do_checkpoint)
time_t pmstart;
/*
* Make sanity checks. If it's for a standalone backend
* Make sanity checks. If it's for a standalone backend
* (negative PID), or the recorded start time is before
* pg_ctl started, then either we are looking at the wrong
* data directory, or this is a pre-existing pidfile that
@ -492,8 +492,8 @@ test_postmaster_connection(bool do_checkpoint)
if (pmpid <= 0 || pmstart < start_time - 2)
{
/*
* Set flag to report stale pidfile if it doesn't
* get overwritten before we give up waiting.
* Set flag to report stale pidfile if it doesn't get
* overwritten before we give up waiting.
*/
found_stale_pidfile = true;
}
@ -552,7 +552,7 @@ test_postmaster_connection(bool do_checkpoint)
* timeout first.
*/
snprintf(connstr, sizeof(connstr),
"dbname=postgres port=%d host='%s' connect_timeout=5",
"dbname=postgres port=%d host='%s' connect_timeout=5",
portnum, host_str);
}
}
@ -570,11 +570,11 @@ test_postmaster_connection(bool do_checkpoint)
/*
* The postmaster should create postmaster.pid very soon after being
* started. If it's not there after we've waited 5 or more seconds,
* assume startup failed and give up waiting. (Note this covers
* both cases where the pidfile was never created, and where it was
* created and then removed during postmaster exit.) Also, if there
* *is* a file there but it appears stale, issue a suitable warning
* and give up waiting.
* assume startup failed and give up waiting. (Note this covers both
* cases where the pidfile was never created, and where it was created
* and then removed during postmaster exit.) Also, if there *is* a
* file there but it appears stale, issue a suitable warning and give
* up waiting.
*/
if (i >= 5)
{
@ -593,7 +593,7 @@ test_postmaster_connection(bool do_checkpoint)
/*
* If we've been able to identify the child postmaster's PID, check
* the process is still alive. This covers cases where the postmaster
* the process is still alive. This covers cases where the postmaster
* successfully created the pidfile but then crashed without removing
* it.
*/

View File

@ -104,7 +104,7 @@ typedef struct _restoreOptions
* restore */
int use_setsessauth;/* Use SET SESSION AUTHORIZATION commands
* instead of OWNER TO */
int no_security_labels; /* Skip security label entries */
int no_security_labels; /* Skip security label entries */
char *superuser; /* Username to use as superuser */
char *use_role; /* Issue SET ROLE to this */
int dataOnly;

View File

@ -7968,14 +7968,14 @@ dumpCompositeType(Archive *fout, TypeInfo *tyinfo)
* collation does not matter for those.
*/
appendPQExpBuffer(query, "SELECT a.attname, "
"pg_catalog.format_type(a.atttypid, a.atttypmod) AS atttypdefn, "
"pg_catalog.format_type(a.atttypid, a.atttypmod) AS atttypdefn, "
"a.attlen, a.attalign, a.attisdropped, "
"CASE WHEN a.attcollation <> at.typcollation "
"THEN a.attcollation ELSE 0 END AS attcollation, "
"ct.typrelid "
"FROM pg_catalog.pg_type ct "
"JOIN pg_catalog.pg_attribute a ON a.attrelid = ct.typrelid "
"LEFT JOIN pg_catalog.pg_type at ON at.oid = a.atttypid "
"JOIN pg_catalog.pg_attribute a ON a.attrelid = ct.typrelid "
"LEFT JOIN pg_catalog.pg_type at ON at.oid = a.atttypid "
"WHERE ct.oid = '%u'::pg_catalog.oid "
"ORDER BY a.attnum ",
tyinfo->dobj.catId.oid);
@ -7988,11 +7988,11 @@ dumpCompositeType(Archive *fout, TypeInfo *tyinfo)
* always be false.
*/
appendPQExpBuffer(query, "SELECT a.attname, "
"pg_catalog.format_type(a.atttypid, a.atttypmod) AS atttypdefn, "
"pg_catalog.format_type(a.atttypid, a.atttypmod) AS atttypdefn, "
"a.attlen, a.attalign, a.attisdropped, "
"0 AS attcollation, "
"ct.typrelid "
"FROM pg_catalog.pg_type ct, pg_catalog.pg_attribute a "
"FROM pg_catalog.pg_type ct, pg_catalog.pg_attribute a "
"WHERE ct.oid = '%u'::pg_catalog.oid "
"AND a.attrelid = ct.typrelid "
"ORDER BY a.attnum ",
@ -8072,15 +8072,15 @@ dumpCompositeType(Archive *fout, TypeInfo *tyinfo)
{
/*
* This is a dropped attribute and we're in binary_upgrade mode.
* Insert a placeholder for it in the CREATE TYPE command, and
* set length and alignment with direct UPDATE to the catalogs
* Insert a placeholder for it in the CREATE TYPE command, and set
* length and alignment with direct UPDATE to the catalogs
* afterwards. See similar code in dumpTableSchema().
*/
appendPQExpBuffer(q, "%s INTEGER /* dummy */", fmtId(attname));
/* stash separately for insertion after the CREATE TYPE */
appendPQExpBuffer(dropped,
"\n-- For binary upgrade, recreate dropped column.\n");
"\n-- For binary upgrade, recreate dropped column.\n");
appendPQExpBuffer(dropped, "UPDATE pg_catalog.pg_attribute\n"
"SET attlen = %s, "
"attalign = '%s', attbyval = false\n"
@ -8380,8 +8380,8 @@ dumpProcLang(Archive *fout, ProcLangInfo *plang)
* However, for a language that belongs to an extension, we must not use
* the shouldDumpProcLangs heuristic, but just dump the language iff we're
* told to (via dobj.dump). Generally the support functions will belong
* to the same extension and so have the same dump flags ... if they don't,
* this might not work terribly nicely.
* to the same extension and so have the same dump flags ... if they
* don't, this might not work terribly nicely.
*/
useParams = (funcInfo != NULL &&
(inlineInfo != NULL || !OidIsValid(plang->laninline)) &&
@ -11181,8 +11181,8 @@ dumpForeignDataWrapper(Archive *fout, FdwInfo *fdwinfo)
return;
/*
* FDWs that belong to an extension are dumped based on their "dump" field.
* Otherwise omit them if we are only dumping some specific object.
* FDWs that belong to an extension are dumped based on their "dump"
* field. Otherwise omit them if we are only dumping some specific object.
*/
if (!fdwinfo->dobj.ext_member)
if (!include_everything)
@ -11963,7 +11963,7 @@ dumpTableSchema(Archive *fout, TableInfo *tbinfo)
if (binary_upgrade)
binary_upgrade_set_type_oids_by_rel_oid(q,
tbinfo->dobj.catId.oid);
tbinfo->dobj.catId.oid);
/* Is it a table or a view? */
if (tbinfo->relkind == RELKIND_VIEW)
@ -12085,6 +12085,7 @@ dumpTableSchema(Archive *fout, TableInfo *tbinfo)
"UNLOGGED " : "",
reltypename,
fmtId(tbinfo->dobj.name));
/*
* In case of a binary upgrade, we dump the table normally and attach
* it to the type afterward.

View File

@ -1746,7 +1746,7 @@ describeOneTableDetails(const char *schemaname,
{
printfPQExpBuffer(&buf,
"SELECT conname,\n"
" pg_catalog.pg_get_constraintdef(r.oid, true) as condef\n"
" pg_catalog.pg_get_constraintdef(r.oid, true) as condef\n"
"FROM pg_catalog.pg_constraint r\n"
"WHERE r.conrelid = '%s' AND r.contype = 'f' ORDER BY 1",
oid);
@ -2693,7 +2693,7 @@ listDomains(const char *pattern, bool showSystem)
printfPQExpBuffer(&buf,
"SELECT n.nspname as \"%s\",\n"
" t.typname as \"%s\",\n"
" pg_catalog.format_type(t.typbasetype, t.typtypmod) as \"%s\",\n"
" pg_catalog.format_type(t.typbasetype, t.typtypmod) as \"%s\",\n"
" TRIM(LEADING\n",
gettext_noop("Schema"),
gettext_noop("Name"),
@ -2703,7 +2703,7 @@ listDomains(const char *pattern, bool showSystem)
" COALESCE((SELECT ' collate ' || c.collname FROM pg_catalog.pg_collation c, pg_catalog.pg_type bt\n"
" WHERE c.oid = t.typcollation AND bt.oid = t.typbasetype AND t.typcollation <> bt.typcollation), '') ||\n");
appendPQExpBuffer(&buf,
" CASE WHEN t.typnotnull THEN ' not null' ELSE '' END ||\n"
" CASE WHEN t.typnotnull THEN ' not null' ELSE '' END ||\n"
" CASE WHEN t.typdefault IS NOT NULL THEN ' default ' || t.typdefault ELSE '' END\n"
" ) as \"%s\",\n",
gettext_noop("Modifier"));

View File

@ -192,11 +192,11 @@ main(int argc, char *argv[])
appendPQExpBuffer(&sql, ";\n");
/*
* Connect to the 'postgres' database by default, except have
* the 'postgres' user use 'template1' so he can create the
* 'postgres' database.
*/
/*
* Connect to the 'postgres' database by default, except have the
* 'postgres' user use 'template1' so he can create the 'postgres'
* database.
*/
conn = connectDatabase(strcmp(dbname, "postgres") == 0 ? "template1" : "postgres",
host, port, username, prompt_password, progname);

View File

@ -113,11 +113,10 @@ main(int argc, char *argv[])
appendPQExpBuffer(&sql, "DROP DATABASE %s;\n",
fmtId(dbname));
/*
* Connect to the 'postgres' database by default, except have
* the 'postgres' user use 'template1' so he can drop the
* 'postgres' database.
*/
/*
* Connect to the 'postgres' database by default, except have the
* 'postgres' user use 'template1' so he can drop the 'postgres' database.
*/
conn = connectDatabase(strcmp(dbname, "postgres") == 0 ? "template1" : "postgres",
host, port, username, prompt_password, progname);

View File

@ -433,70 +433,70 @@ extern int no_such_variable
* are allowed to be NULL.
*/
extern Datum DirectFunctionCall1Coll(PGFunction func, Oid collation,
Datum arg1);
Datum arg1);
extern Datum DirectFunctionCall2Coll(PGFunction func, Oid collation,
Datum arg1, Datum arg2);
Datum arg1, Datum arg2);
extern Datum DirectFunctionCall3Coll(PGFunction func, Oid collation,
Datum arg1, Datum arg2,
Datum arg3);
Datum arg1, Datum arg2,
Datum arg3);
extern Datum DirectFunctionCall4Coll(PGFunction func, Oid collation,
Datum arg1, Datum arg2,
Datum arg3, Datum arg4);
Datum arg1, Datum arg2,
Datum arg3, Datum arg4);
extern Datum DirectFunctionCall5Coll(PGFunction func, Oid collation,
Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5);
Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5);
extern Datum DirectFunctionCall6Coll(PGFunction func, Oid collation,
Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5,
Datum arg6);
Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5,
Datum arg6);
extern Datum DirectFunctionCall7Coll(PGFunction func, Oid collation,
Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5,
Datum arg6, Datum arg7);
Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5,
Datum arg6, Datum arg7);
extern Datum DirectFunctionCall8Coll(PGFunction func, Oid collation,
Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5,
Datum arg6, Datum arg7, Datum arg8);
Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5,
Datum arg6, Datum arg7, Datum arg8);
extern Datum DirectFunctionCall9Coll(PGFunction func, Oid collation,
Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5,
Datum arg6, Datum arg7, Datum arg8,
Datum arg9);
Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5,
Datum arg6, Datum arg7, Datum arg8,
Datum arg9);
/* These are for invocation of a previously-looked-up function with a
* directly-computed parameter list. Note that neither arguments nor result
* are allowed to be NULL.
*/
extern Datum FunctionCall1Coll(FmgrInfo *flinfo, Oid collation,
Datum arg1);
Datum arg1);
extern Datum FunctionCall2Coll(FmgrInfo *flinfo, Oid collation,
Datum arg1, Datum arg2);
Datum arg1, Datum arg2);
extern Datum FunctionCall3Coll(FmgrInfo *flinfo, Oid collation,
Datum arg1, Datum arg2,
Datum arg3);
Datum arg1, Datum arg2,
Datum arg3);
extern Datum FunctionCall4Coll(FmgrInfo *flinfo, Oid collation,
Datum arg1, Datum arg2,
Datum arg3, Datum arg4);
Datum arg1, Datum arg2,
Datum arg3, Datum arg4);
extern Datum FunctionCall5Coll(FmgrInfo *flinfo, Oid collation,
Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5);
Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5);
extern Datum FunctionCall6Coll(FmgrInfo *flinfo, Oid collation,
Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5,
Datum arg6);
Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5,
Datum arg6);
extern Datum FunctionCall7Coll(FmgrInfo *flinfo, Oid collation,
Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5,
Datum arg6, Datum arg7);
Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5,
Datum arg6, Datum arg7);
extern Datum FunctionCall8Coll(FmgrInfo *flinfo, Oid collation,
Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5,
Datum arg6, Datum arg7, Datum arg8);
Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5,
Datum arg6, Datum arg7, Datum arg8);
extern Datum FunctionCall9Coll(FmgrInfo *flinfo, Oid collation,
Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5,
Datum arg6, Datum arg7, Datum arg8,
Datum arg9);
Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5,
Datum arg6, Datum arg7, Datum arg8,
Datum arg9);
/* These are for invocation of a function identified by OID with a
* directly-computed parameter list. Note that neither arguments nor result
@ -506,35 +506,35 @@ extern Datum FunctionCall9Coll(FmgrInfo *flinfo, Oid collation,
*/
extern Datum OidFunctionCall0Coll(Oid functionId, Oid collation);
extern Datum OidFunctionCall1Coll(Oid functionId, Oid collation,
Datum arg1);
Datum arg1);
extern Datum OidFunctionCall2Coll(Oid functionId, Oid collation,
Datum arg1, Datum arg2);
Datum arg1, Datum arg2);
extern Datum OidFunctionCall3Coll(Oid functionId, Oid collation,
Datum arg1, Datum arg2,
Datum arg3);
Datum arg1, Datum arg2,
Datum arg3);
extern Datum OidFunctionCall4Coll(Oid functionId, Oid collation,
Datum arg1, Datum arg2,
Datum arg3, Datum arg4);
Datum arg1, Datum arg2,
Datum arg3, Datum arg4);
extern Datum OidFunctionCall5Coll(Oid functionId, Oid collation,
Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5);
Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5);
extern Datum OidFunctionCall6Coll(Oid functionId, Oid collation,
Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5,
Datum arg6);
Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5,
Datum arg6);
extern Datum OidFunctionCall7Coll(Oid functionId, Oid collation,
Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5,
Datum arg6, Datum arg7);
Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5,
Datum arg6, Datum arg7);
extern Datum OidFunctionCall8Coll(Oid functionId, Oid collation,
Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5,
Datum arg6, Datum arg7, Datum arg8);
Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5,
Datum arg6, Datum arg7, Datum arg8);
extern Datum OidFunctionCall9Coll(Oid functionId, Oid collation,
Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5,
Datum arg6, Datum arg7, Datum arg8,
Datum arg9);
Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5,
Datum arg6, Datum arg7, Datum arg8,
Datum arg9);
/* These macros allow the collation argument to be omitted (with a default of
* InvalidOid, ie, no collation). They exist mostly for backwards

View File

@ -725,7 +725,7 @@ typedef struct RangeTblEntry
*
* If the function returns RECORD, funccoltypes lists the column types
* declared in the RTE's column type specification, funccoltypmods lists
* their declared typmods, funccolcollations their collations. Otherwise,
* their declared typmods, funccolcollations their collations. Otherwise,
* those fields are NIL.
*/
Node *funcexpr; /* expression tree for func call */

View File

@ -50,7 +50,7 @@ extern List *make_ands_implicit(Expr *clause);
extern bool contain_agg_clause(Node *clause);
extern List *pull_agg_clause(Node *clause);
extern void count_agg_clauses(PlannerInfo *root, Node *clause,
AggClauseCosts *costs);
AggClauseCosts *costs);
extern bool contain_window_function(Node *clause);
extern WindowFuncLists *find_window_functions(Node *clause, Index maxWinRef);

View File

@ -92,7 +92,7 @@ typedef struct HASHCTL
#define HASH_CONTEXT 0x200 /* Set memory allocation context */
#define HASH_COMPARE 0x400 /* Set user defined comparison function */
#define HASH_KEYCOPY 0x800 /* Set user defined key-copying function */
#define HASH_FIXED_SIZE 0x1000 /* Initial size is a hard limit */
#define HASH_FIXED_SIZE 0x1000 /* Initial size is a hard limit */
/* max_dsize value to indicate expansible directory */

View File

@ -136,7 +136,7 @@ extern Pattern_Prefix_Status pattern_fixed_prefix(Const *patt,
Const **prefix,
Const **rest);
extern Const *make_greater_string(const Const *str_const, FmgrInfo *ltproc,
Oid collation);
Oid collation);
extern Datum eqsel(PG_FUNCTION_ARGS);
extern Datum neqsel(PG_FUNCTION_ARGS);

View File

@ -228,8 +228,12 @@ ecpg_build_compat_sqlda(int line, PGresult *res, int row, enum COMPAT_MODE compa
strcpy(fname, PQfname(res, i));
sqlda->sqlvar[i].sqlname = fname;
fname += strlen(sqlda->sqlvar[i].sqlname) + 1;
/* this is reserved for future use, so we leave it empty for the time being */
/* sqlda->sqlvar[i].sqlformat = (char *) (long) PQfformat(res, i);*/
/*
* this is reserved for future use, so we leave it empty for the time
* being
*/
/* sqlda->sqlvar[i].sqlformat = (char *) (long) PQfformat(res, i); */
sqlda->sqlvar[i].sqlxid = PQftype(res, i);
sqlda->sqlvar[i].sqltypelen = PQfsize(res, i);
}

View File

@ -503,7 +503,7 @@ dttofmtasc_replace(timestamp * ts, date dDate, int dow, struct tm * tm,
case 'G':
{
/* Keep compiler quiet - Don't use a literal format */
const char *fmt = "%G";
const char *fmt = "%G";
tm->tm_mon -= 1;
i = strftime(q, *pstr_len, fmt, tm);
@ -689,7 +689,7 @@ dttofmtasc_replace(timestamp * ts, date dDate, int dow, struct tm * tm,
case 'V':
{
/* Keep compiler quiet - Don't use a literal format */
const char *fmt = "%V";
const char *fmt = "%V";
i = strftime(q, *pstr_len, fmt, tm);
if (i == 0)

View File

@ -325,7 +325,7 @@ pg_GSS_error_int(PQExpBuffer str, const char *mprefix,
do
{
gss_display_status(&lmin_s, stat, type,
GSS_C_NO_OID, &msg_ctx, &lmsg);
GSS_C_NO_OID, &msg_ctx, &lmsg);
appendPQExpBuffer(str, "%s: %s\n", mprefix, (char *) lmsg.value);
gss_release_buffer(&lmin_s, &lmsg);
} while (msg_ctx);
@ -693,9 +693,9 @@ pg_local_sendauth(PGconn *conn)
struct cmsghdr *cmsg;
union
{
struct cmsghdr hdr;
unsigned char buf[CMSG_SPACE(sizeof(struct cmsgcred))];
} cmsgbuf;
struct cmsghdr hdr;
unsigned char buf[CMSG_SPACE(sizeof(struct cmsgcred))];
} cmsgbuf;
/*
* The backend doesn't care what we send here, but it wants exactly one

View File

@ -1054,18 +1054,18 @@ connectFailureMessage(PGconn *conn, int errorno)
if ((conn->pghostaddr == NULL) &&
(conn->pghost == NULL || strcmp(conn->pghost, host_addr) != 0))
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not connect to server: %s\n"
"\tIs the server running on host \"%s\" (%s) and accepting\n"
"\tTCP/IP connections on port %s?\n"),
libpq_gettext("could not connect to server: %s\n"
"\tIs the server running on host \"%s\" (%s) and accepting\n"
"\tTCP/IP connections on port %s?\n"),
SOCK_STRERROR(errorno, sebuf, sizeof(sebuf)),
displayed_host,
host_addr,
conn->pgport);
else
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not connect to server: %s\n"
"\tIs the server running on host \"%s\" and accepting\n"
"\tTCP/IP connections on port %s?\n"),
libpq_gettext("could not connect to server: %s\n"
"\tIs the server running on host \"%s\" and accepting\n"
"\tTCP/IP connections on port %s?\n"),
SOCK_STRERROR(errorno, sebuf, sizeof(sebuf)),
displayed_host,
conn->pgport);
@ -1854,6 +1854,7 @@ keep_going: /* We will come back to here until there is
int packetlen;
#ifdef HAVE_UNIX_SOCKETS
/*
* Implement requirepeer check, if requested and it's a
* Unix-domain socket.
@ -1870,14 +1871,17 @@ keep_going: /* We will come back to here until there is
errno = 0;
if (getpeereid(conn->sock, &uid, &gid) != 0)
{
/* Provide special error message if getpeereid is a stub */
/*
* Provide special error message if getpeereid is a
* stub
*/
if (errno == ENOSYS)
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("requirepeer parameter is not supported on this platform\n"));
else
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not get peer credentials: %s\n"),
pqStrerror(errno, sebuf, sizeof(sebuf)));
pqStrerror(errno, sebuf, sizeof(sebuf)));
goto error_return;
}
@ -1899,7 +1903,7 @@ keep_going: /* We will come back to here until there is
goto error_return;
}
}
#endif /* HAVE_UNIX_SOCKETS */
#endif /* HAVE_UNIX_SOCKETS */
#ifdef USE_SSL

View File

@ -1986,7 +1986,7 @@ plperl_call_perl_trigger_func(plperl_proc_desc *desc, FunctionCallInfo fcinfo,
if (!TDsv)
elog(ERROR, "couldn't fetch $_TD");
save_item(TDsv); /* local $_TD */
save_item(TDsv); /* local $_TD */
sv_setsv(TDsv, td);
PUSHMARK(sp);
@ -3564,7 +3564,7 @@ hv_store_string(HV *hv, const char *key, SV *val)
* does not appear that hashes track UTF-8-ness of keys at all in Perl
* 5.6.
*/
hlen = - (int) strlen(hkey);
hlen = -(int) strlen(hkey);
ret = hv_store(hv, hkey, hlen, val, 0);
if (hkey != key)
@ -3589,7 +3589,7 @@ hv_fetch_string(HV *hv, const char *key)
GetDatabaseEncoding(), PG_UTF8);
/* See notes in hv_store_string */
hlen = - (int) strlen(hkey);
hlen = -(int) strlen(hkey);
ret = hv_fetch(hv, hkey, hlen, 0);
if (hkey != key)

View File

@ -59,13 +59,13 @@
#undef vsnprintf
#endif
#ifdef __GNUC__
#define vsnprintf(...) pg_vsnprintf(__VA_ARGS__)
#define snprintf(...) pg_snprintf(__VA_ARGS__)
#define vsnprintf(...) pg_vsnprintf(__VA_ARGS__)
#define snprintf(...) pg_snprintf(__VA_ARGS__)
#else
#define vsnprintf pg_vsnprintf
#define snprintf pg_snprintf
#endif /* __GNUC__ */
#endif /* USE_REPL_SNPRINTF */
#define vsnprintf pg_vsnprintf
#define snprintf pg_snprintf
#endif /* __GNUC__ */
#endif /* USE_REPL_SNPRINTF */
/* perl version and platform portability */
#define NEED_eval_pv

View File

@ -4400,7 +4400,7 @@ exec_get_datum_type_info(PLpgSQL_execstate *estate,
default:
elog(ERROR, "unrecognized dtype: %d", datum->dtype);
*typeid = InvalidOid; /* keep compiler quiet */
*typeid = InvalidOid; /* keep compiler quiet */
*typmod = -1;
*collation = InvalidOid;
break;

View File

@ -4512,8 +4512,8 @@ get_source_line(const char *src, int lineno)
/*
* Sanity check, next < s if the line was all-whitespace, which should
* never happen if Python reported a frame created on that line, but
* check anyway.
* never happen if Python reported a frame created on that line, but check
* anyway.
*/
if (next < s)
return NULL;
@ -4680,7 +4680,10 @@ PLy_traceback(char **xmsg, char **tbmsg, int *tb_depth)
&tbstr, "\n PL/Python function \"%s\", line %ld, in %s",
proname, plain_lineno - 1, fname);
/* function code object was compiled with "<string>" as the filename */
/*
* function code object was compiled with "<string>" as the
* filename
*/
if (PLy_curr_procedure && plain_filename != NULL &&
strcmp(plain_filename, "<string>") == 0)
{

View File

@ -61,7 +61,7 @@ extern char *optarg;
#define BADARG (int)':'
#define EMSG ""
int getopt(int nargc, char *const * nargv, const char * ostr);
int getopt(int nargc, char *const * nargv, const char *ostr);
/*
* getopt
@ -74,7 +74,7 @@ int getopt(int nargc, char *const * nargv, const char * ostr);
* returning -1.)
*/
int
getopt(int nargc, char *const * nargv, const char * ostr)
getopt(int nargc, char *const * nargv, const char *ostr)
{
static char *place = EMSG; /* option letter processing */
char *oli; /* option letter list index */

View File

@ -69,7 +69,7 @@ getpeereid(int sock, uid_t *uid, gid_t *gid)
*gid = ucred_getegid(ucred);
ucred_free(ucred);
if (*uid == (uid_t)(-1) || *gid == (gid_t)(-1))
if (*uid == (uid_t) (-1) || *gid == (gid_t) (-1))
return -1;
return 0;
#else

View File

@ -81,8 +81,8 @@ inet_net_ntop(int af, const void *src, int bits, char *dst, size_t size)
* We need to cover both the address family constants used by the PG inet
* type (PGSQL_AF_INET and PGSQL_AF_INET6) and those used by the system
* libraries (AF_INET and AF_INET6). We can safely assume PGSQL_AF_INET
* == AF_INET, but the INET6 constants are very likely to be different.
* If AF_INET6 isn't defined, silently ignore it.
* == AF_INET, but the INET6 constants are very likely to be different. If
* AF_INET6 isn't defined, silently ignore it.
*/
switch (af)
{

View File

@ -23,7 +23,7 @@ pg_set_noblock(pgsocket sock)
#if !defined(WIN32)
return (fcntl(sock, F_SETFL, O_NONBLOCK) != -1);
#else
unsigned long ioctlsocket_ret = 1;
unsigned long ioctlsocket_ret = 1;
/* Returns non-0 on failure, while fcntl() returns -1 on failure */
return (ioctlsocket(sock, FIONBIO, &ioctlsocket_ret) == 0);
@ -42,7 +42,7 @@ pg_set_block(pgsocket sock)
return false;
return true;
#else
unsigned long ioctlsocket_ret = 0;
unsigned long ioctlsocket_ret = 0;
/* Returns non-0 on failure, while fcntl() returns -1 on failure */
return (ioctlsocket(sock, FIONBIO, &ioctlsocket_ret) == 0);

View File

@ -2140,7 +2140,7 @@ regression_main(int argc, char *argv[], init_function ifunc, test_function tfunc
#ifndef WIN32_ONLY_COMPILER
snprintf(buf, sizeof(buf),
SYSTEMQUOTE "\"%s\" -C \"%s/%s\" DESTDIR=\"%s/install\" install >> \"%s/log/install.log\" 2>&1" SYSTEMQUOTE,
makeprog, top_builddir, sl->str, temp_install, outputdir);
makeprog, top_builddir, sl->str, temp_install, outputdir);
#else
fprintf(stderr, _("\n%s: --extra-install option not supported on this platform\n"), progname);
exit_nicely(2);

View File

@ -1480,9 +1480,9 @@ pg_timezone_initialize(void)
* postgresql.conf, this code will not do what you might expect, namely
* call select_default_timezone() and install that value as the setting.
* Rather, the previously active setting --- typically the one from
* postgresql.conf --- will be reinstalled, relabeled as PGC_S_ENV_VAR.
* If we did try to install the "correct" default value, the effect would
* be that each postmaster child would independently run an extremely
* postgresql.conf --- will be reinstalled, relabeled as PGC_S_ENV_VAR. If
* we did try to install the "correct" default value, the effect would be
* that each postmaster child would independently run an extremely
* expensive search of the timezone database, bringing the database to its
* knees for possibly multiple seconds. This is so unpleasant, and could
* so easily be triggered quite unintentionally, that it seems better to