Run pgindent on 9.2 source tree in preparation for first 9.3

commit-fest.
This commit is contained in:
Bruce Momjian 2012-06-10 15:20:04 -04:00
parent 60801944fa
commit 927d61eeff
494 changed files with 7343 additions and 7046 deletions

View File

@ -23,7 +23,7 @@ static int auto_explain_log_min_duration = -1; /* msec or -1 */
static bool auto_explain_log_analyze = false;
static bool auto_explain_log_verbose = false;
static bool auto_explain_log_buffers = false;
static bool auto_explain_log_timing = false;
static bool auto_explain_log_timing = false;
static int auto_explain_log_format = EXPLAIN_FORMAT_TEXT;
static bool auto_explain_log_nested_statements = false;

View File

@ -1140,7 +1140,7 @@ storeHandler(PGresult *res, const PGdataValue *columns,
* strings and add null termination. As a micro-optimization, allocate
* all the strings with one palloc.
*/
pbuflen = nfields; /* count the null terminators themselves */
pbuflen = nfields; /* count the null terminators themselves */
for (i = 0; i < nfields; i++)
{
int len = columns[i].len;

View File

@ -109,17 +109,17 @@ PG_FUNCTION_INFO_V1(file_fdw_validator);
* FDW callback routines
*/
static void fileGetForeignRelSize(PlannerInfo *root,
RelOptInfo *baserel,
Oid foreigntableid);
RelOptInfo *baserel,
Oid foreigntableid);
static void fileGetForeignPaths(PlannerInfo *root,
RelOptInfo *baserel,
Oid foreigntableid);
RelOptInfo *baserel,
Oid foreigntableid);
static ForeignScan *fileGetForeignPlan(PlannerInfo *root,
RelOptInfo *baserel,
Oid foreigntableid,
ForeignPath *best_path,
List *tlist,
List *scan_clauses);
RelOptInfo *baserel,
Oid foreigntableid,
ForeignPath *best_path,
List *tlist,
List *scan_clauses);
static void fileExplainForeignScan(ForeignScanState *node, ExplainState *es);
static void fileBeginForeignScan(ForeignScanState *node, int eflags);
static TupleTableSlot *fileIterateForeignScan(ForeignScanState *node);
@ -141,7 +141,7 @@ static void estimate_size(PlannerInfo *root, RelOptInfo *baserel,
static void estimate_costs(PlannerInfo *root, RelOptInfo *baserel,
FileFdwPlanState *fdw_private,
Cost *startup_cost, Cost *total_cost);
static int file_acquire_sample_rows(Relation onerel, int elevel,
static int file_acquire_sample_rows(Relation onerel, int elevel,
HeapTuple *rows, int targrows,
double *totalrows, double *totaldeadrows);
@ -180,7 +180,7 @@ file_fdw_validator(PG_FUNCTION_ARGS)
List *options_list = untransformRelOptions(PG_GETARG_DATUM(0));
Oid catalog = PG_GETARG_OID(1);
char *filename = NULL;
DefElem *force_not_null = NULL;
DefElem *force_not_null = NULL;
List *other_options = NIL;
ListCell *cell;
@ -233,7 +233,7 @@ file_fdw_validator(PG_FUNCTION_ARGS)
buf.len > 0
? errhint("Valid options in this context are: %s",
buf.data)
: errhint("There are no valid options in this context.")));
: errhint("There are no valid options in this context.")));
}
/*
@ -393,13 +393,13 @@ get_file_fdw_attribute_options(Oid relid)
options = GetForeignColumnOptions(relid, attnum);
foreach(lc, options)
{
DefElem *def = (DefElem *) lfirst(lc);
DefElem *def = (DefElem *) lfirst(lc);
if (strcmp(def->defname, "force_not_null") == 0)
{
if (defGetBoolean(def))
{
char *attname = pstrdup(NameStr(attr->attname));
char *attname = pstrdup(NameStr(attr->attname));
fnncolumns = lappend(fnncolumns, makeString(attname));
}
@ -429,8 +429,8 @@ fileGetForeignRelSize(PlannerInfo *root,
FileFdwPlanState *fdw_private;
/*
* Fetch options. We only need filename at this point, but we might
* as well get everything and not need to re-fetch it later in planning.
* Fetch options. We only need filename at this point, but we might as
* well get everything and not need to re-fetch it later in planning.
*/
fdw_private = (FileFdwPlanState *) palloc(sizeof(FileFdwPlanState));
fileGetOptions(foreigntableid,
@ -468,13 +468,14 @@ fileGetForeignPaths(PlannerInfo *root,
baserel->rows,
startup_cost,
total_cost,
NIL, /* no pathkeys */
NULL, /* no outer rel either */
NIL)); /* no fdw_private data */
NIL, /* no pathkeys */
NULL, /* no outer rel either */
NIL)); /* no fdw_private data */
/*
* If data file was sorted, and we knew it somehow, we could insert
* appropriate pathkeys into the ForeignPath node to tell the planner that.
* appropriate pathkeys into the ForeignPath node to tell the planner
* that.
*/
}
@ -505,8 +506,8 @@ fileGetForeignPlan(PlannerInfo *root,
return make_foreignscan(tlist,
scan_clauses,
scan_relid,
NIL, /* no expressions to evaluate */
NIL); /* no private state either */
NIL, /* no expressions to evaluate */
NIL); /* no private state either */
}
/*
@ -665,14 +666,14 @@ fileAnalyzeForeignTable(Relation relation,
{
char *filename;
List *options;
struct stat stat_buf;
struct stat stat_buf;
/* Fetch options of foreign table */
fileGetOptions(RelationGetRelid(relation), &filename, &options);
/*
* Get size of the file. (XXX if we fail here, would it be better to
* just return false to skip analyzing the table?)
* Get size of the file. (XXX if we fail here, would it be better to just
* return false to skip analyzing the table?)
*/
if (stat(filename, &stat_buf) < 0)
ereport(ERROR,
@ -746,7 +747,7 @@ estimate_size(PlannerInfo *root, RelOptInfo *baserel,
* planner's idea of the relation width; which is bogus if not all
* columns are being read, not to mention that the text representation
* of a row probably isn't the same size as its internal
* representation. Possibly we could do something better, but the
* representation. Possibly we could do something better, but the
* real answer to anyone who complains is "ANALYZE" ...
*/
int tuple_width;
@ -811,7 +812,7 @@ estimate_costs(PlannerInfo *root, RelOptInfo *baserel,
* which must have at least targrows entries.
* The actual number of rows selected is returned as the function result.
* We also count the total number of rows in the file and return it into
* *totalrows. Note that *totaldeadrows is always set to 0.
* *totalrows. Note that *totaldeadrows is always set to 0.
*
* Note that the returned list of rows is not always in order by physical
* position in the file. Therefore, correlation estimates derived later
@ -824,7 +825,7 @@ file_acquire_sample_rows(Relation onerel, int elevel,
double *totalrows, double *totaldeadrows)
{
int numrows = 0;
double rowstoskip = -1; /* -1 means not set yet */
double rowstoskip = -1; /* -1 means not set yet */
double rstate;
TupleDesc tupDesc;
Datum *values;
@ -853,8 +854,8 @@ file_acquire_sample_rows(Relation onerel, int elevel,
cstate = BeginCopyFrom(onerel, filename, NIL, options);
/*
* Use per-tuple memory context to prevent leak of memory used to read rows
* from the file with Copy routines.
* Use per-tuple memory context to prevent leak of memory used to read
* rows from the file with Copy routines.
*/
tupcontext = AllocSetContextCreate(CurrentMemoryContext,
"file_fdw temporary context",
@ -912,10 +913,10 @@ file_acquire_sample_rows(Relation onerel, int elevel,
if (rowstoskip <= 0)
{
/*
* Found a suitable tuple, so save it, replacing one
* old tuple at random
* Found a suitable tuple, so save it, replacing one old tuple
* at random
*/
int k = (int) (targrows * anl_random_fract());
int k = (int) (targrows * anl_random_fract());
Assert(k >= 0 && k < targrows);
heap_freetuple(rows[k]);

View File

@ -37,7 +37,7 @@ const char *progname;
/* Options and defaults */
bool debug = false; /* are we debugging? */
bool dryrun = false; /* are we performing a dry-run operation? */
char *additional_ext = NULL; /* Extension to remove from filenames */
char *additional_ext = NULL; /* Extension to remove from filenames */
char *archiveLocation; /* where to find the archive? */
char *restartWALFileName; /* the file from which we can restart restore */
@ -136,12 +136,13 @@ CleanupPriorWALFiles(void)
* they were originally written, in case this worries you.
*/
if (strlen(walfile) == XLOG_DATA_FNAME_LEN &&
strspn(walfile, "0123456789ABCDEF") == XLOG_DATA_FNAME_LEN &&
strspn(walfile, "0123456789ABCDEF") == XLOG_DATA_FNAME_LEN &&
strcmp(walfile + 8, exclusiveCleanupFileName + 8) < 0)
{
/*
* Use the original file name again now, including any extension
* that might have been chopped off before testing the sequence.
/*
* Use the original file name again now, including any
* extension that might have been chopped off before testing
* the sequence.
*/
snprintf(WALFilePath, MAXPGPATH, "%s/%s",
archiveLocation, xlde->d_name);
@ -150,7 +151,7 @@ CleanupPriorWALFiles(void)
{
/*
* Prints the name of the file to be removed and skips the
* actual removal. The regular printout is so that the
* actual removal. The regular printout is so that the
* user can pipe the output into some other program.
*/
printf("%s\n", WALFilePath);
@ -298,7 +299,8 @@ main(int argc, char **argv)
dryrun = true;
break;
case 'x':
additional_ext = optarg; /* Extension to remove from xlogfile names */
additional_ext = optarg; /* Extension to remove from
* xlogfile names */
break;
default:
fprintf(stderr, "Try \"%s --help\" for more information.\n", progname);

View File

@ -103,19 +103,19 @@ typedef struct Counters
int64 calls; /* # of times executed */
double total_time; /* total execution time, in msec */
int64 rows; /* total # of retrieved or affected rows */
int64 shared_blks_hit; /* # of shared buffer hits */
int64 shared_blks_hit; /* # of shared buffer hits */
int64 shared_blks_read; /* # of shared disk blocks read */
int64 shared_blks_dirtied; /* # of shared disk blocks dirtied */
int64 shared_blks_written; /* # of shared disk blocks written */
int64 local_blks_hit; /* # of local buffer hits */
int64 local_blks_read; /* # of local disk blocks read */
int64 local_blks_hit; /* # of local buffer hits */
int64 local_blks_read; /* # of local disk blocks read */
int64 local_blks_dirtied; /* # of local disk blocks dirtied */
int64 local_blks_written; /* # of local disk blocks written */
int64 temp_blks_read; /* # of temp blocks read */
int64 temp_blks_read; /* # of temp blocks read */
int64 temp_blks_written; /* # of temp blocks written */
double blk_read_time; /* time spent reading, in msec */
double blk_write_time; /* time spent writing, in msec */
double usage; /* usage factor */
double blk_read_time; /* time spent reading, in msec */
double blk_write_time; /* time spent writing, in msec */
double usage; /* usage factor */
} Counters;
/*
@ -140,7 +140,7 @@ typedef struct pgssSharedState
{
LWLockId lock; /* protects hashtable search/modification */
int query_size; /* max query length in bytes */
double cur_median_usage; /* current median usage in hashtable */
double cur_median_usage; /* current median usage in hashtable */
} pgssSharedState;
/*
@ -150,7 +150,7 @@ typedef struct pgssLocationLen
{
int location; /* start offset in query text */
int length; /* length in bytes, or -1 to ignore */
} pgssLocationLen;
} pgssLocationLen;
/*
* Working state for computing a query jumble and producing a normalized
@ -172,7 +172,7 @@ typedef struct pgssJumbleState
/* Current number of valid entries in clocations array */
int clocations_count;
} pgssJumbleState;
} pgssJumbleState;
/*---- Local variables ----*/
@ -248,21 +248,21 @@ static uint32 pgss_hash_string(const char *str);
static void pgss_store(const char *query, uint32 queryId,
double total_time, uint64 rows,
const BufferUsage *bufusage,
pgssJumbleState * jstate);
pgssJumbleState *jstate);
static Size pgss_memsize(void);
static pgssEntry *entry_alloc(pgssHashKey *key, const char *query,
int query_len, bool sticky);
int query_len, bool sticky);
static void entry_dealloc(void);
static void entry_reset(void);
static void AppendJumble(pgssJumbleState * jstate,
static void AppendJumble(pgssJumbleState *jstate,
const unsigned char *item, Size size);
static void JumbleQuery(pgssJumbleState * jstate, Query *query);
static void JumbleRangeTable(pgssJumbleState * jstate, List *rtable);
static void JumbleExpr(pgssJumbleState * jstate, Node *node);
static void RecordConstLocation(pgssJumbleState * jstate, int location);
static char *generate_normalized_query(pgssJumbleState * jstate, const char *query,
static void JumbleQuery(pgssJumbleState *jstate, Query *query);
static void JumbleRangeTable(pgssJumbleState *jstate, List *rtable);
static void JumbleExpr(pgssJumbleState *jstate, Node *node);
static void RecordConstLocation(pgssJumbleState *jstate, int location);
static char *generate_normalized_query(pgssJumbleState *jstate, const char *query,
int *query_len_p, int encoding);
static void fill_in_constant_lengths(pgssJumbleState * jstate, const char *query);
static void fill_in_constant_lengths(pgssJumbleState *jstate, const char *query);
static int comp_location(const void *a, const void *b);
@ -513,8 +513,8 @@ pgss_shmem_startup(void)
FreeFile(file);
/*
* Remove the file so it's not included in backups/replication
* slaves, etc. A new file will be written on next shutdown.
* Remove the file so it's not included in backups/replication slaves,
* etc. A new file will be written on next shutdown.
*/
unlink(PGSS_DUMP_FILE);
@ -600,7 +600,7 @@ error:
ereport(LOG,
(errcode_for_file_access(),
errmsg("could not write pg_stat_statement file \"%s\": %m",
PGSS_DUMP_FILE ".tmp")));
PGSS_DUMP_FILE ".tmp")));
if (file)
FreeFile(file);
unlink(PGSS_DUMP_FILE ".tmp");
@ -626,8 +626,8 @@ pgss_post_parse_analyze(ParseState *pstate, Query *query)
* the statement contains an optimizable statement for which a queryId
* could be derived (such as EXPLAIN or DECLARE CURSOR). For such cases,
* runtime control will first go through ProcessUtility and then the
* executor, and we don't want the executor hooks to do anything, since
* we are already measuring the statement's costs at the utility level.
* executor, and we don't want the executor hooks to do anything, since we
* are already measuring the statement's costs at the utility level.
*/
if (query->utilityStmt)
{
@ -768,7 +768,7 @@ pgss_ExecutorEnd(QueryDesc *queryDesc)
pgss_store(queryDesc->sourceText,
queryId,
queryDesc->totaltime->total * 1000.0, /* convert to msec */
queryDesc->totaltime->total * 1000.0, /* convert to msec */
queryDesc->estate->es_processed,
&queryDesc->totaltime->bufusage,
NULL);
@ -789,10 +789,9 @@ pgss_ProcessUtility(Node *parsetree, const char *queryString,
DestReceiver *dest, char *completionTag)
{
/*
* If it's an EXECUTE statement, we don't track it and don't increment
* the nesting level. This allows the cycles to be charged to the
* underlying PREPARE instead (by the Executor hooks), which is much more
* useful.
* If it's an EXECUTE statement, we don't track it and don't increment the
* nesting level. This allows the cycles to be charged to the underlying
* PREPARE instead (by the Executor hooks), which is much more useful.
*
* We also don't track execution of PREPARE. If we did, we would get one
* hash table entry for the PREPARE (with hash calculated from the query
@ -942,7 +941,7 @@ static void
pgss_store(const char *query, uint32 queryId,
double total_time, uint64 rows,
const BufferUsage *bufusage,
pgssJumbleState * jstate)
pgssJumbleState *jstate)
{
pgssHashKey key;
pgssEntry *entry;
@ -1355,7 +1354,7 @@ entry_reset(void)
* the current jumble.
*/
static void
AppendJumble(pgssJumbleState * jstate, const unsigned char *item, Size size)
AppendJumble(pgssJumbleState *jstate, const unsigned char *item, Size size)
{
unsigned char *jumble = jstate->jumble;
Size jumble_len = jstate->jumble_len;
@ -1404,7 +1403,7 @@ AppendJumble(pgssJumbleState * jstate, const unsigned char *item, Size size)
* of information).
*/
static void
JumbleQuery(pgssJumbleState * jstate, Query *query)
JumbleQuery(pgssJumbleState *jstate, Query *query)
{
Assert(IsA(query, Query));
Assert(query->utilityStmt == NULL);
@ -1431,7 +1430,7 @@ JumbleQuery(pgssJumbleState * jstate, Query *query)
* Jumble a range table
*/
static void
JumbleRangeTable(pgssJumbleState * jstate, List *rtable)
JumbleRangeTable(pgssJumbleState *jstate, List *rtable)
{
ListCell *lc;
@ -1485,11 +1484,11 @@ JumbleRangeTable(pgssJumbleState * jstate, List *rtable)
*
* Note: the reason we don't simply use expression_tree_walker() is that the
* point of that function is to support tree walkers that don't care about
* most tree node types, but here we care about all types. We should complain
* most tree node types, but here we care about all types. We should complain
* about any unrecognized node type.
*/
static void
JumbleExpr(pgssJumbleState * jstate, Node *node)
JumbleExpr(pgssJumbleState *jstate, Node *node)
{
ListCell *temp;
@ -1874,7 +1873,7 @@ JumbleExpr(pgssJumbleState * jstate, Node *node)
* that is currently being walked.
*/
static void
RecordConstLocation(pgssJumbleState * jstate, int location)
RecordConstLocation(pgssJumbleState *jstate, int location)
{
/* -1 indicates unknown or undefined location */
if (location >= 0)
@ -1909,7 +1908,7 @@ RecordConstLocation(pgssJumbleState * jstate, int location)
* Returns a palloc'd string, which is not necessarily null-terminated.
*/
static char *
generate_normalized_query(pgssJumbleState * jstate, const char *query,
generate_normalized_query(pgssJumbleState *jstate, const char *query,
int *query_len_p, int encoding)
{
char *norm_query;
@ -2007,7 +2006,7 @@ generate_normalized_query(pgssJumbleState * jstate, const char *query,
* a problem.
*
* Duplicate constant pointers are possible, and will have their lengths
* marked as '-1', so that they are later ignored. (Actually, we assume the
* marked as '-1', so that they are later ignored. (Actually, we assume the
* lengths were initialized as -1 to start with, and don't change them here.)
*
* N.B. There is an assumption that a '-' character at a Const location begins
@ -2015,7 +2014,7 @@ generate_normalized_query(pgssJumbleState * jstate, const char *query,
* reason for a constant to start with a '-'.
*/
static void
fill_in_constant_lengths(pgssJumbleState * jstate, const char *query)
fill_in_constant_lengths(pgssJumbleState *jstate, const char *query)
{
pgssLocationLen *locs;
core_yyscan_t yyscanner;

View File

@ -29,7 +29,7 @@
/* These are macros to avoid timing the function call overhead. */
#ifndef WIN32
#define START_TIMER \
#define START_TIMER \
do { \
alarm_triggered = false; \
alarm(secs_per_test); \
@ -37,7 +37,7 @@ do { \
} while (0)
#else
/* WIN32 doesn't support alarm, so we create a thread and sleep there */
#define START_TIMER \
#define START_TIMER \
do { \
alarm_triggered = false; \
if (CreateThread(NULL, 0, process_alarm, NULL, 0, NULL) == \
@ -55,7 +55,7 @@ do { \
gettimeofday(&stop_t, NULL); \
print_elapse(start_t, stop_t, ops); \
} while (0)
static const char *progname;
@ -77,6 +77,7 @@ static void test_sync(int writes_per_op);
static void test_open_syncs(void);
static void test_open_sync(const char *msg, int writes_size);
static void test_file_descriptor_sync(void);
#ifndef WIN32
static void process_alarm(int sig);
#else

View File

@ -1,7 +1,7 @@
/*
* pg_test_timing.c
* tests overhead of timing calls and their monotonicity: that
* they always move forward
* tests overhead of timing calls and their monotonicity: that
* they always move forward
*/
#include "postgres_fe.h"
@ -35,8 +35,8 @@ handle_args(int argc, char *argv[])
{"duration", required_argument, NULL, 'd'},
{NULL, 0, NULL, 0}
};
int option; /* Command line option */
int optindex = 0; /* used by getopt_long */
int option; /* Command line option */
int optindex = 0; /* used by getopt_long */
if (argc > 1)
{
@ -87,7 +87,7 @@ handle_args(int argc, char *argv[])
else
{
fprintf(stderr,
"%s: duration must be a positive integer (duration is \"%d\")\n",
"%s: duration must be a positive integer (duration is \"%d\")\n",
progname, test_duration);
fprintf(stderr, "Try \"%s --help\" for more information.\n",
progname);
@ -98,16 +98,22 @@ handle_args(int argc, char *argv[])
static void
test_timing(int32 duration)
{
uint64 total_time;
int64 time_elapsed = 0;
uint64 loop_count = 0;
uint64 prev, cur;
int32 diff, i, bits, found;
uint64 total_time;
int64 time_elapsed = 0;
uint64 loop_count = 0;
uint64 prev,
cur;
int32 diff,
i,
bits,
found;
instr_time start_time, end_time, temp;
instr_time start_time,
end_time,
temp;
static int64 histogram[32];
char buf[100];
char buf[100];
total_time = duration > 0 ? duration * 1000000 : 0;
@ -146,7 +152,7 @@ test_timing(int32 duration)
INSTR_TIME_SUBTRACT(end_time, start_time);
printf("Per loop time including overhead: %0.2f nsec\n",
INSTR_TIME_GET_DOUBLE(end_time) * 1e9 / loop_count);
INSTR_TIME_GET_DOUBLE(end_time) * 1e9 / loop_count);
printf("Histogram of timing durations:\n");
printf("%9s: %10s %9s\n", "< usec", "count", "percent");

View File

@ -199,9 +199,9 @@ gtrgm_consistent(PG_FUNCTION_ARGS)
* trigram extraction is relatively CPU-expensive. We must include
* strategy number because trigram extraction depends on strategy.
*
* The cached structure contains the strategy number, then the input
* query (starting at a MAXALIGN boundary), then the TRGM value (also
* starting at a MAXALIGN boundary).
* The cached structure contains the strategy number, then the input query
* (starting at a MAXALIGN boundary), then the TRGM value (also starting
* at a MAXALIGN boundary).
*/
if (cache == NULL ||
strategy != *((StrategyNumber *) cache) ||
@ -341,8 +341,7 @@ gtrgm_distance(PG_FUNCTION_ARGS)
char *cache = (char *) fcinfo->flinfo->fn_extra;
/*
* Cache the generated trigrams across multiple calls with the same
* query.
* Cache the generated trigrams across multiple calls with the same query.
*/
if (cache == NULL ||
VARSIZE(cache) != querysize ||

View File

@ -168,7 +168,7 @@ issue_warnings(char *sequence_script_file_name)
SYSTEMQUOTE "\"%s/psql\" --echo-queries "
"--set ON_ERROR_STOP=on "
"--no-psqlrc --port %d --username \"%s\" "
"-f \"%s\" --dbname template1 >> \"%s\" 2>&1" SYSTEMQUOTE,
"-f \"%s\" --dbname template1 >> \"%s\" 2>&1" SYSTEMQUOTE,
new_cluster.bindir, new_cluster.port, os_info.user,
sequence_script_file_name, UTILITY_LOG_FILE);
unlink(sequence_script_file_name);
@ -204,7 +204,7 @@ output_completion_banner(char *analyze_script_file_name,
else
pg_log(PG_REPORT,
"Optimizer statistics and free space information are not transferred\n"
"by pg_upgrade so, once you start the new server, consider running:\n"
"by pg_upgrade so, once you start the new server, consider running:\n"
" %s\n\n", analyze_script_file_name);
pg_log(PG_REPORT,
@ -238,7 +238,8 @@ check_cluster_versions(void)
/*
* We can't allow downgrading because we use the target pg_dumpall, and
* pg_dumpall cannot operate on new database versions, only older versions.
* pg_dumpall cannot operate on new database versions, only older
* versions.
*/
if (old_cluster.major_version > new_cluster.major_version)
pg_log(PG_FATAL, "This utility cannot be used to downgrade to older major PostgreSQL versions.\n");
@ -402,31 +403,31 @@ create_script_for_cluster_analyze(char **analyze_script_file_name)
#endif
fprintf(script, "echo %sThis script will generate minimal optimizer statistics rapidly%s\n",
ECHO_QUOTE, ECHO_QUOTE);
ECHO_QUOTE, ECHO_QUOTE);
fprintf(script, "echo %sso your system is usable, and then gather statistics twice more%s\n",
ECHO_QUOTE, ECHO_QUOTE);
ECHO_QUOTE, ECHO_QUOTE);
fprintf(script, "echo %swith increasing accuracy. When it is done, your system will%s\n",
ECHO_QUOTE, ECHO_QUOTE);
ECHO_QUOTE, ECHO_QUOTE);
fprintf(script, "echo %shave the default level of optimizer statistics.%s\n",
ECHO_QUOTE, ECHO_QUOTE);
ECHO_QUOTE, ECHO_QUOTE);
fprintf(script, "echo\n\n");
fprintf(script, "echo %sIf you have used ALTER TABLE to modify the statistics target for%s\n",
ECHO_QUOTE, ECHO_QUOTE);
ECHO_QUOTE, ECHO_QUOTE);
fprintf(script, "echo %sany tables, you might want to remove them and restore them after%s\n",
ECHO_QUOTE, ECHO_QUOTE);
ECHO_QUOTE, ECHO_QUOTE);
fprintf(script, "echo %srunning this script because they will delay fast statistics generation.%s\n",
ECHO_QUOTE, ECHO_QUOTE);
ECHO_QUOTE, ECHO_QUOTE);
fprintf(script, "echo\n\n");
fprintf(script, "echo %sIf you would like default statistics as quickly as possible, cancel%s\n",
ECHO_QUOTE, ECHO_QUOTE);
ECHO_QUOTE, ECHO_QUOTE);
fprintf(script, "echo %sthis script and run:%s\n",
ECHO_QUOTE, ECHO_QUOTE);
ECHO_QUOTE, ECHO_QUOTE);
fprintf(script, "echo %s vacuumdb --all %s%s\n", ECHO_QUOTE,
/* Did we copy the free space files? */
(GET_MAJOR_VERSION(old_cluster.major_version) >= 804) ?
"--analyze-only" : "--analyze", ECHO_QUOTE);
/* Did we copy the free space files? */
(GET_MAJOR_VERSION(old_cluster.major_version) >= 804) ?
"--analyze-only" : "--analyze", ECHO_QUOTE);
fprintf(script, "echo\n\n");
#ifndef WIN32
@ -441,15 +442,15 @@ create_script_for_cluster_analyze(char **analyze_script_file_name)
#endif
fprintf(script, "echo %sGenerating minimal optimizer statistics (1 target)%s\n",
ECHO_QUOTE, ECHO_QUOTE);
ECHO_QUOTE, ECHO_QUOTE);
fprintf(script, "echo %s--------------------------------------------------%s\n",
ECHO_QUOTE, ECHO_QUOTE);
ECHO_QUOTE, ECHO_QUOTE);
fprintf(script, "vacuumdb --all --analyze-only\n");
fprintf(script, "echo\n");
fprintf(script, "echo %sThe server is now available with minimal optimizer statistics.%s\n",
ECHO_QUOTE, ECHO_QUOTE);
ECHO_QUOTE, ECHO_QUOTE);
fprintf(script, "echo %sQuery performance will be optimal once this script completes.%s\n",
ECHO_QUOTE, ECHO_QUOTE);
ECHO_QUOTE, ECHO_QUOTE);
fprintf(script, "echo\n\n");
#ifndef WIN32
@ -462,9 +463,9 @@ create_script_for_cluster_analyze(char **analyze_script_file_name)
#endif
fprintf(script, "echo %sGenerating medium optimizer statistics (10 targets)%s\n",
ECHO_QUOTE, ECHO_QUOTE);
ECHO_QUOTE, ECHO_QUOTE);
fprintf(script, "echo %s---------------------------------------------------%s\n",
ECHO_QUOTE, ECHO_QUOTE);
ECHO_QUOTE, ECHO_QUOTE);
fprintf(script, "vacuumdb --all --analyze-only\n");
fprintf(script, "echo\n\n");
@ -475,17 +476,17 @@ create_script_for_cluster_analyze(char **analyze_script_file_name)
#endif
fprintf(script, "echo %sGenerating default (full) optimizer statistics (100 targets?)%s\n",
ECHO_QUOTE, ECHO_QUOTE);
ECHO_QUOTE, ECHO_QUOTE);
fprintf(script, "echo %s-------------------------------------------------------------%s\n",
ECHO_QUOTE, ECHO_QUOTE);
ECHO_QUOTE, ECHO_QUOTE);
fprintf(script, "vacuumdb --all %s\n",
/* Did we copy the free space files? */
(GET_MAJOR_VERSION(old_cluster.major_version) >= 804) ?
"--analyze-only" : "--analyze");
/* Did we copy the free space files? */
(GET_MAJOR_VERSION(old_cluster.major_version) >= 804) ?
"--analyze-only" : "--analyze");
fprintf(script, "echo\n\n");
fprintf(script, "echo %sDone%s\n",
ECHO_QUOTE, ECHO_QUOTE);
ECHO_QUOTE, ECHO_QUOTE);
fclose(script);
@ -716,8 +717,8 @@ check_for_isn_and_int8_passing_mismatch(ClusterInfo *cluster)
pg_log(PG_REPORT, "fatal\n");
pg_log(PG_FATAL,
"Your installation contains \"contrib/isn\" functions which rely on the\n"
"bigint data type. Your old and new clusters pass bigint values\n"
"differently so this cluster cannot currently be upgraded. You can\n"
"bigint data type. Your old and new clusters pass bigint values\n"
"differently so this cluster cannot currently be upgraded. You can\n"
"manually upgrade databases that use \"contrib/isn\" facilities and remove\n"
"\"contrib/isn\" from the old cluster and restart the upgrade. A list of\n"
"the problem functions is in the file:\n"
@ -764,9 +765,9 @@ check_for_reg_data_type_usage(ClusterInfo *cluster)
PGconn *conn = connectToServer(cluster, active_db->db_name);
/*
* While several relkinds don't store any data, e.g. views, they
* can be used to define data types of other columns, so we
* check all relkinds.
* While several relkinds don't store any data, e.g. views, they can
* be used to define data types of other columns, so we check all
* relkinds.
*/
res = executeQueryOrDie(conn,
"SELECT n.nspname, c.relname, a.attname "
@ -777,16 +778,16 @@ check_for_reg_data_type_usage(ClusterInfo *cluster)
" NOT a.attisdropped AND "
" a.atttypid IN ( "
" 'pg_catalog.regproc'::pg_catalog.regtype, "
" 'pg_catalog.regprocedure'::pg_catalog.regtype, "
" 'pg_catalog.regprocedure'::pg_catalog.regtype, "
" 'pg_catalog.regoper'::pg_catalog.regtype, "
" 'pg_catalog.regoperator'::pg_catalog.regtype, "
" 'pg_catalog.regoperator'::pg_catalog.regtype, "
/* regclass.oid is preserved, so 'regclass' is OK */
/* regtype.oid is preserved, so 'regtype' is OK */
" 'pg_catalog.regconfig'::pg_catalog.regtype, "
" 'pg_catalog.regdictionary'::pg_catalog.regtype) AND "
" c.relnamespace = n.oid AND "
" n.nspname != 'pg_catalog' AND "
" n.nspname != 'information_schema'");
" 'pg_catalog.regconfig'::pg_catalog.regtype, "
" 'pg_catalog.regdictionary'::pg_catalog.regtype) AND "
" c.relnamespace = n.oid AND "
" n.nspname != 'pg_catalog' AND "
" n.nspname != 'information_schema'");
ntups = PQntuples(res);
i_nspname = PQfnumber(res, "nspname");
@ -822,8 +823,8 @@ check_for_reg_data_type_usage(ClusterInfo *cluster)
pg_log(PG_REPORT, "fatal\n");
pg_log(PG_FATAL,
"Your installation contains one of the reg* data types in user tables.\n"
"These data types reference system OIDs that are not preserved by\n"
"pg_upgrade, so this cluster cannot currently be upgraded. You can\n"
"These data types reference system OIDs that are not preserved by\n"
"pg_upgrade, so this cluster cannot currently be upgraded. You can\n"
"remove the problem tables and restart the upgrade. A list of the problem\n"
"columns is in the file:\n"
" %s\n\n", output_path);
@ -836,9 +837,11 @@ check_for_reg_data_type_usage(ClusterInfo *cluster)
static void
get_bin_version(ClusterInfo *cluster)
{
char cmd[MAXPGPATH], cmd_output[MAX_STRING];
char cmd[MAXPGPATH],
cmd_output[MAX_STRING];
FILE *output;
int pre_dot, post_dot;
int pre_dot,
post_dot;
snprintf(cmd, sizeof(cmd), "\"%s/pg_ctl\" --version", cluster->bindir);
@ -858,4 +861,3 @@ get_bin_version(ClusterInfo *cluster)
cluster->bin_version = (pre_dot * 100 + post_dot) * 100;
}

View File

@ -129,6 +129,7 @@ get_control_data(ClusterInfo *cluster, bool live_check)
pg_log(PG_VERBOSE, "%s", bufin);
#ifdef WIN32
/*
* Due to an installer bug, LANG=C doesn't work for PG 8.3.3, but does
* work 8.2.6 and 8.3.7, so check for non-ASCII output and suggest a
@ -506,7 +507,7 @@ check_control_data(ControlData *oldctrl,
* This is a common 8.3 -> 8.4 upgrade problem, so we are more verbose
*/
pg_log(PG_FATAL,
"You will need to rebuild the new server with configure option\n"
"You will need to rebuild the new server with configure option\n"
"--disable-integer-datetimes or get server binaries built with those\n"
"options.\n");
}
@ -531,6 +532,6 @@ disable_old_cluster(void)
pg_log(PG_REPORT, "\n"
"If you want to start the old cluster, you will need to remove\n"
"the \".old\" suffix from %s/global/pg_control.old.\n"
"Because \"link\" mode was used, the old cluster cannot be safely\n"
"started once the new cluster has been started.\n\n", old_cluster.pgdata);
"Because \"link\" mode was used, the old cluster cannot be safely\n"
"started once the new cluster has been started.\n\n", old_cluster.pgdata);
}

View File

@ -18,8 +18,9 @@
static void check_data_dir(const char *pg_data);
static void check_bin_dir(ClusterInfo *cluster);
static void validate_exec(const char *dir, const char *cmdName);
#ifdef WIN32
static int win32_check_directory_write_permissions(void);
static int win32_check_directory_write_permissions(void);
#endif
@ -64,7 +65,7 @@ exec_prog(bool throw_error, bool is_priv,
pg_log(throw_error ? PG_FATAL : PG_REPORT,
"Consult the last few lines of \"%s\" for\n"
"the probable cause of the failure.\n",
log_file);
log_file);
return 1;
}
@ -142,12 +143,12 @@ verify_directories(void)
static int
win32_check_directory_write_permissions(void)
{
int fd;
int fd;
/*
* We open a file we would normally create anyway. We do this even in
* 'check' mode, which isn't ideal, but this is the best we can do.
*/
* We open a file we would normally create anyway. We do this even in
* 'check' mode, which isn't ideal, but this is the best we can do.
*/
if ((fd = open(GLOBALS_DUMP_FILE, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR)) < 0)
return -1;
close(fd);
@ -184,7 +185,7 @@ check_data_dir(const char *pg_data)
struct stat statBuf;
snprintf(subDirName, sizeof(subDirName), "%s%s%s", pg_data,
/* Win32 can't stat() a directory with a trailing slash. */
/* Win32 can't stat() a directory with a trailing slash. */
*requiredSubdirs[subdirnum] ? "/" : "",
requiredSubdirs[subdirnum]);

View File

@ -233,7 +233,7 @@ copy_file(const char *srcfile, const char *dstfile, bool force)
* large number of times.
*/
int
load_directory(const char *dirname, struct dirent ***namelist)
load_directory(const char *dirname, struct dirent *** namelist)
{
DIR *dirdesc;
struct dirent *direntry;
@ -251,7 +251,7 @@ load_directory(const char *dirname, struct dirent ***namelist)
count++;
*namelist = (struct dirent **) realloc((void *) (*namelist),
(size_t) ((name_num + 1) * sizeof(struct dirent *)));
(size_t) ((name_num + 1) * sizeof(struct dirent *)));
if (*namelist == NULL)
{
@ -314,7 +314,6 @@ win32_pghardlink(const char *src, const char *dst)
else
return 0;
}
#endif
@ -322,13 +321,11 @@ win32_pghardlink(const char *src, const char *dst)
FILE *
fopen_priv(const char *path, const char *mode)
{
mode_t old_umask = umask(S_IRWXG | S_IRWXO);
FILE *fp;
mode_t old_umask = umask(S_IRWXG | S_IRWXO);
FILE *fp;
fp = fopen(path, mode);
umask(old_umask);
return fp;
}

View File

@ -133,7 +133,7 @@ get_loadable_libraries(void)
int totaltups;
int dbnum;
bool found_public_plpython_handler = false;
ress = (PGresult **) pg_malloc(old_cluster.dbarr.ndbs * sizeof(PGresult *));
totaltups = 0;
@ -144,10 +144,10 @@ get_loadable_libraries(void)
PGconn *conn = connectToServer(&old_cluster, active_db->db_name);
/*
* Fetch all libraries referenced in this DB. We can't exclude
* the "pg_catalog" schema because, while such functions are not
* explicitly dumped by pg_dump, they do reference implicit objects
* that pg_dump does dump, e.g. CREATE LANGUAGE plperl.
* Fetch all libraries referenced in this DB. We can't exclude the
* "pg_catalog" schema because, while such functions are not
* explicitly dumped by pg_dump, they do reference implicit objects
* that pg_dump does dump, e.g. CREATE LANGUAGE plperl.
*/
ress[dbnum] = executeQueryOrDie(conn,
"SELECT DISTINCT probin "
@ -158,26 +158,26 @@ get_loadable_libraries(void)
FirstNormalObjectId);
totaltups += PQntuples(ress[dbnum]);
/*
* Systems that install plpython before 8.1 have
* plpython_call_handler() defined in the "public" schema, causing
* pg_dumpall to dump it. However that function still references
* "plpython" (no "2"), so it throws an error on restore. This code
* checks for the problem function, reports affected databases to the
* user and explains how to remove them.
* 8.1 git commit: e0dedd0559f005d60c69c9772163e69c204bac69
* http://archives.postgresql.org/pgsql-hackers/2012-03/msg01101.php
* http://archives.postgresql.org/pgsql-bugs/2012-05/msg00206.php
*/
/*
* Systems that install plpython before 8.1 have
* plpython_call_handler() defined in the "public" schema, causing
* pg_dumpall to dump it. However that function still references
* "plpython" (no "2"), so it throws an error on restore. This code
* checks for the problem function, reports affected databases to the
* user and explains how to remove them. 8.1 git commit:
* e0dedd0559f005d60c69c9772163e69c204bac69
* http://archives.postgresql.org/pgsql-hackers/2012-03/msg01101.php
* http://archives.postgresql.org/pgsql-bugs/2012-05/msg00206.php
*/
if (GET_MAJOR_VERSION(old_cluster.major_version) < 901)
{
PGresult *res;
PGresult *res;
res = executeQueryOrDie(conn,
"SELECT 1 "
"FROM pg_catalog.pg_proc JOIN pg_namespace "
" ON pronamespace = pg_namespace.oid "
"WHERE proname = 'plpython_call_handler' AND "
"FROM pg_catalog.pg_proc JOIN pg_namespace "
" ON pronamespace = pg_namespace.oid "
"WHERE proname = 'plpython_call_handler' AND "
"nspname = 'public' AND "
"prolang = 13 /* C */ AND "
"probin = '$libdir/plpython' AND "
@ -188,23 +188,23 @@ get_loadable_libraries(void)
if (!found_public_plpython_handler)
{
pg_log(PG_WARNING,
"\nThe old cluster has a \"plpython_call_handler\" function defined\n"
"in the \"public\" schema which is a duplicate of the one defined\n"
"in the \"pg_catalog\" schema. You can confirm this by executing\n"
"in psql:\n"
"\n"
" \\df *.plpython_call_handler\n"
"\n"
"The \"public\" schema version of this function was created by a\n"
"pre-8.1 install of plpython, and must be removed for pg_upgrade\n"
"to complete because it references a now-obsolete \"plpython\"\n"
"shared object file. You can remove the \"public\" schema version\n"
"of this function by running the following command:\n"
"\n"
" DROP FUNCTION public.plpython_call_handler()\n"
"\n"
"in each affected database:\n"
"\n");
"\nThe old cluster has a \"plpython_call_handler\" function defined\n"
"in the \"public\" schema which is a duplicate of the one defined\n"
"in the \"pg_catalog\" schema. You can confirm this by executing\n"
"in psql:\n"
"\n"
" \\df *.plpython_call_handler\n"
"\n"
"The \"public\" schema version of this function was created by a\n"
"pre-8.1 install of plpython, and must be removed for pg_upgrade\n"
"to complete because it references a now-obsolete \"plpython\"\n"
"shared object file. You can remove the \"public\" schema version\n"
"of this function by running the following command:\n"
"\n"
" DROP FUNCTION public.plpython_call_handler()\n"
"\n"
"in each affected database:\n"
"\n");
}
pg_log(PG_WARNING, " %s\n", active_db->db_name);
found_public_plpython_handler = true;
@ -217,9 +217,9 @@ get_loadable_libraries(void)
if (found_public_plpython_handler)
pg_log(PG_FATAL,
"Remove the problem functions from the old cluster to continue.\n");
totaltups++; /* reserve for pg_upgrade_support */
"Remove the problem functions from the old cluster to continue.\n");
totaltups++; /* reserve for pg_upgrade_support */
/* Allocate what's certainly enough space */
os_info.libraries = (char **) pg_malloc(totaltups * sizeof(char *));
@ -293,17 +293,17 @@ check_loadable_libraries(void)
PGresult *res;
/*
* In Postgres 9.0, Python 3 support was added, and to do that, a
* plpython2u language was created with library name plpython2.so
* as a symbolic link to plpython.so. In Postgres 9.1, only the
* plpython2.so library was created, and both plpythonu and
* plpython2u pointing to it. For this reason, any reference to
* library name "plpython" in an old PG <= 9.1 cluster must look
* for "plpython2" in the new cluster.
* In Postgres 9.0, Python 3 support was added, and to do that, a
* plpython2u language was created with library name plpython2.so as a
* symbolic link to plpython.so. In Postgres 9.1, only the
* plpython2.so library was created, and both plpythonu and plpython2u
* pointing to it. For this reason, any reference to library name
* "plpython" in an old PG <= 9.1 cluster must look for "plpython2" in
* the new cluster.
*
* For this case, we could check pg_pltemplate, but that only works
* for languages, and does not help with function shared objects,
* so we just do a general fix.
* For this case, we could check pg_pltemplate, but that only works
* for languages, and does not help with function shared objects, so
* we just do a general fix.
*/
if (GET_MAJOR_VERSION(old_cluster.major_version) < 901 &&
strcmp(lib, "$libdir/plpython") == 0)
@ -325,7 +325,7 @@ check_loadable_libraries(void)
/* exit and report missing support library with special message */
if (strcmp(lib, PG_UPGRADE_SUPPORT) == 0)
pg_log(PG_FATAL,
"The pg_upgrade_support module must be created and installed in the new cluster.\n");
"The pg_upgrade_support module must be created and installed in the new cluster.\n");
if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
pg_log(PG_FATAL, "Could not open file \"%s\": %s\n",

View File

@ -57,12 +57,12 @@ gen_db_file_maps(DbInfo *old_db, DbInfo *new_db,
old_db->db_name, old_rel->reloid, new_rel->reloid);
/*
* TOAST table names initially match the heap pg_class oid.
* In pre-8.4, TOAST table names change during CLUSTER; in pre-9.0,
* TOAST table names change during ALTER TABLE ALTER COLUMN SET TYPE.
* In >= 9.0, TOAST relation names always use heap table oids, hence
* we cannot check relation names when upgrading from pre-9.0.
* Clusters upgraded to 9.0 will get matching TOAST names.
* TOAST table names initially match the heap pg_class oid. In
* pre-8.4, TOAST table names change during CLUSTER; in pre-9.0, TOAST
* table names change during ALTER TABLE ALTER COLUMN SET TYPE. In >=
* 9.0, TOAST relation names always use heap table oids, hence we
* cannot check relation names when upgrading from pre-9.0. Clusters
* upgraded to 9.0 will get matching TOAST names.
*/
if (strcmp(old_rel->nspname, new_rel->nspname) != 0 ||
((GET_MAJOR_VERSION(old_cluster.major_version) >= 900 ||
@ -194,16 +194,16 @@ get_db_infos(ClusterInfo *cluster)
char query[QUERY_ALLOC];
snprintf(query, sizeof(query),
"SELECT d.oid, d.datname, %s "
"FROM pg_catalog.pg_database d "
" LEFT OUTER JOIN pg_catalog.pg_tablespace t "
" ON d.dattablespace = t.oid "
"WHERE d.datallowconn = true "
"SELECT d.oid, d.datname, %s "
"FROM pg_catalog.pg_database d "
" LEFT OUTER JOIN pg_catalog.pg_tablespace t "
" ON d.dattablespace = t.oid "
"WHERE d.datallowconn = true "
/* we don't preserve pg_database.oid so we sort by name */
"ORDER BY 2",
"ORDER BY 2",
/* 9.2 removed the spclocation column */
(GET_MAJOR_VERSION(cluster->major_version) <= 901) ?
"t.spclocation" : "pg_catalog.pg_tablespace_location(t.oid) AS spclocation");
(GET_MAJOR_VERSION(cluster->major_version) <= 901) ?
"t.spclocation" : "pg_catalog.pg_tablespace_location(t.oid) AS spclocation");
res = executeQueryOrDie(conn, "%s", query);
@ -276,7 +276,7 @@ get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo)
" LEFT OUTER JOIN pg_catalog.pg_tablespace t "
" ON c.reltablespace = t.oid "
"WHERE relkind IN ('r','t', 'i'%s) AND "
/* exclude possible orphaned temp tables */
/* exclude possible orphaned temp tables */
" ((n.nspname !~ '^pg_temp_' AND "
" n.nspname !~ '^pg_toast_temp_' AND "
" n.nspname NOT IN ('pg_catalog', 'information_schema', 'binary_upgrade') AND "

View File

@ -56,10 +56,10 @@ parseCommandLine(int argc, char *argv[])
int option; /* Command line option */
int optindex = 0; /* used by getopt_long */
int os_user_effective_id;
FILE *fp;
char **filename;
FILE *fp;
char **filename;
time_t run_time = time(NULL);
user_opts.transfer_mode = TRANSFER_MODE_COPY;
os_info.progname = get_progname(argv[0]);
@ -138,11 +138,11 @@ parseCommandLine(int argc, char *argv[])
new_cluster.pgopts = pg_strdup(optarg);
break;
/*
* Someday, the port number option could be removed and
* passed using -o/-O, but that requires postmaster -C
* to be supported on all old/new versions.
*/
/*
* Someday, the port number option could be removed and passed
* using -o/-O, but that requires postmaster -C to be
* supported on all old/new versions.
*/
case 'p':
if ((old_cluster.port = atoi(optarg)) <= 0)
{
@ -196,21 +196,21 @@ parseCommandLine(int argc, char *argv[])
/* Start with newline because we might be appending to a file. */
fprintf(fp, "\n"
"-----------------------------------------------------------------\n"
" pg_upgrade run on %s"
"-----------------------------------------------------------------\n\n",
ctime(&run_time));
" pg_upgrade run on %s"
"-----------------------------------------------------------------\n\n",
ctime(&run_time));
fclose(fp);
}
/* Get values from env if not already set */
check_required_directory(&old_cluster.bindir, "PGBINOLD", "-b",
"old cluster binaries reside");
"old cluster binaries reside");
check_required_directory(&new_cluster.bindir, "PGBINNEW", "-B",
"new cluster binaries reside");
"new cluster binaries reside");
check_required_directory(&old_cluster.pgdata, "PGDATAOLD", "-d",
"old cluster data resides");
"old cluster data resides");
check_required_directory(&new_cluster.pgdata, "PGDATANEW", "-D",
"new cluster data resides");
"new cluster data resides");
}
@ -285,7 +285,7 @@ or\n"), old_cluster.port, new_cluster.port, os_info.user);
*/
static void
check_required_directory(char **dirpath, char *envVarName,
char *cmdLineOption, char *description)
char *cmdLineOption, char *description)
{
if (*dirpath == NULL || strlen(*dirpath) == 0)
{
@ -322,8 +322,10 @@ void
adjust_data_dir(ClusterInfo *cluster)
{
char filename[MAXPGPATH];
char cmd[MAXPGPATH], cmd_output[MAX_STRING];
FILE *fp, *output;
char cmd[MAXPGPATH],
cmd_output[MAX_STRING];
FILE *fp,
*output;
/* If there is no postgresql.conf, it can't be a config-only dir */
snprintf(filename, sizeof(filename), "%s/postgresql.conf", cluster->pgconfig);
@ -345,10 +347,9 @@ adjust_data_dir(ClusterInfo *cluster)
CLUSTER_NAME(cluster));
/*
* We don't have a data directory yet, so we can't check the PG
* version, so this might fail --- only works for PG 9.2+. If this
* fails, pg_upgrade will fail anyway because the data files will not
* be found.
* We don't have a data directory yet, so we can't check the PG version,
* so this might fail --- only works for PG 9.2+. If this fails,
* pg_upgrade will fail anyway because the data files will not be found.
*/
snprintf(cmd, sizeof(cmd), "\"%s/postmaster\" -D \"%s\" -C data_directory",
cluster->bindir, cluster->pgconfig);
@ -356,7 +357,7 @@ adjust_data_dir(ClusterInfo *cluster)
if ((output = popen(cmd, "r")) == NULL ||
fgets(cmd_output, sizeof(cmd_output), output) == NULL)
pg_log(PG_FATAL, "Could not get data directory using %s: %s\n",
cmd, getErrorText(errno));
cmd, getErrorText(errno));
pclose(output);

View File

@ -55,7 +55,7 @@ ClusterInfo old_cluster,
new_cluster;
OSInfo os_info;
char *output_files[] = {
char *output_files[] = {
SERVER_LOG_FILE,
#ifdef WIN32
/* unique file for pg_ctl start */
@ -122,11 +122,10 @@ main(int argc, char **argv)
stop_postmaster(false);
/*
* Most failures happen in create_new_objects(), which has
* completed at this point. We do this here because it is just
* before linking, which will link the old and new cluster data
* files, preventing the old cluster from being safely started
* once the new cluster is started.
* Most failures happen in create_new_objects(), which has completed at
* this point. We do this here because it is just before linking, which
* will link the old and new cluster data files, preventing the old
* cluster from being safely started once the new cluster is started.
*/
if (user_opts.transfer_mode == TRANSFER_MODE_LINK)
disable_old_cluster();
@ -215,8 +214,8 @@ prepare_new_cluster(void)
exec_prog(true, true, UTILITY_LOG_FILE,
SYSTEMQUOTE "\"%s/vacuumdb\" --port %d --username \"%s\" "
"--all --analyze %s >> \"%s\" 2>&1" SYSTEMQUOTE,
new_cluster.bindir, new_cluster.port, os_info.user,
log_opts.verbose ? "--verbose" : "", UTILITY_LOG_FILE);
new_cluster.bindir, new_cluster.port, os_info.user,
log_opts.verbose ? "--verbose" : "", UTILITY_LOG_FILE);
check_ok();
/*
@ -229,8 +228,8 @@ prepare_new_cluster(void)
exec_prog(true, true, UTILITY_LOG_FILE,
SYSTEMQUOTE "\"%s/vacuumdb\" --port %d --username \"%s\" "
"--all --freeze %s >> \"%s\" 2>&1" SYSTEMQUOTE,
new_cluster.bindir, new_cluster.port, os_info.user,
log_opts.verbose ? "--verbose" : "", UTILITY_LOG_FILE);
new_cluster.bindir, new_cluster.port, os_info.user,
log_opts.verbose ? "--verbose" : "", UTILITY_LOG_FILE);
check_ok();
get_pg_database_relfilenode(&new_cluster);
@ -252,8 +251,8 @@ prepare_new_databases(void)
/*
* Install support functions in the global-object restore database to
* preserve pg_authid.oid. pg_dumpall uses 'template0' as its template
* database so objects we add into 'template1' are not propogated. They
* preserve pg_authid.oid. pg_dumpall uses 'template0' as its template
* database so objects we add into 'template1' are not propogated. They
* are removed on pg_upgrade exit.
*/
install_support_functions_in_new_db("template1");
@ -267,7 +266,7 @@ prepare_new_databases(void)
exec_prog(true, true, RESTORE_LOG_FILE,
SYSTEMQUOTE "\"%s/psql\" --echo-queries "
"--set ON_ERROR_STOP=on "
/* --no-psqlrc prevents AUTOCOMMIT=off */
/* --no-psqlrc prevents AUTOCOMMIT=off */
"--no-psqlrc --port %d --username \"%s\" "
"-f \"%s\" --dbname template1 >> \"%s\" 2>&1" SYSTEMQUOTE,
new_cluster.bindir, new_cluster.port, os_info.user,
@ -453,13 +452,13 @@ set_frozenxids(void)
static void
cleanup(void)
{
fclose(log_opts.internal);
/* Remove dump and log files? */
if (!log_opts.retain)
{
char **filename;
char **filename;
for (filename = output_files; *filename != NULL; filename++)
unlink(*filename);

View File

@ -75,7 +75,7 @@ extern char *output_files[];
#define RM_CMD "rm -f"
#define RMDIR_CMD "rm -rf"
#define SCRIPT_EXT "sh"
#define ECHO_QUOTE "'"
#define ECHO_QUOTE "'"
#else
#define pg_copy_file CopyFile
#define pg_mv_file pgrename
@ -85,7 +85,7 @@ extern char *output_files[];
#define RMDIR_CMD "RMDIR /s/q"
#define SCRIPT_EXT "bat"
#define EXE_EXT ".exe"
#define ECHO_QUOTE ""
#define ECHO_QUOTE ""
#endif
#define CLUSTER_NAME(cluster) ((cluster) == &old_cluster ? "old" : \
@ -98,7 +98,7 @@ extern char *output_files[];
/* postmaster/postgres -b (binary_upgrade) flag added during PG 9.1 development */
#define BINARY_UPGRADE_SERVER_FLAG_CAT_VER 201104251
/*
* Visibility map changed with this 9.2 commit,
* Visibility map changed with this 9.2 commit,
* 8f9fe6edce358f7904e0db119416b4d1080a83aa; pick later catalog version.
*/
#define VISIBILITY_MAP_CRASHSAFE_CAT_VER 201107031
@ -114,7 +114,7 @@ typedef struct
Oid reloid; /* relation oid */
Oid relfilenode; /* relation relfile node */
/* relation tablespace path, or "" for the cluster default */
char tablespace[MAXPGPATH];
char tablespace[MAXPGPATH];
} RelInfo;
typedef struct
@ -222,9 +222,11 @@ typedef struct
ControlData controldata; /* pg_control information */
DbInfoArr dbarr; /* dbinfos array */
char *pgdata; /* pathname for cluster's $PGDATA directory */
char *pgconfig; /* pathname for cluster's config file directory */
char *pgconfig; /* pathname for cluster's config file
* directory */
char *bindir; /* pathname for cluster's executable directory */
char *pgopts; /* options to pass to the server, like pg_ctl -o */
char *pgopts; /* options to pass to the server, like pg_ctl
* -o */
unsigned short port; /* port number where postmaster is waiting */
uint32 major_version; /* PG_VERSION of cluster */
char major_version_str[64]; /* string PG_VERSION of cluster */
@ -291,8 +293,8 @@ void check_old_cluster(bool live_check,
void check_new_cluster(void);
void report_clusters_compatible(void);
void issue_warnings(char *sequence_script_file_name);
void output_completion_banner(char *analyze_script_file_name,
char *deletion_script_file_name);
void output_completion_banner(char *analyze_script_file_name,
char *deletion_script_file_name);
void check_cluster_versions(void);
void check_cluster_compatibility(bool live_check);
void create_script_for_old_cluster_deletion(char **deletion_script_file_name);
@ -314,9 +316,10 @@ void split_old_dump(void);
/* exec.c */
int exec_prog(bool throw_error, bool is_priv,
const char *log_file, const char *cmd, ...)
__attribute__((format(PG_PRINTF_ATTRIBUTE, 4, 5)));
int
exec_prog(bool throw_error, bool is_priv,
const char *log_file, const char *cmd,...)
__attribute__((format(PG_PRINTF_ATTRIBUTE, 4, 5)));
void verify_directories(void);
bool is_server_running(const char *datadir);
@ -353,14 +356,14 @@ const char *setupPageConverter(pageCnvCtx **result);
typedef void *pageCnvCtx;
#endif
int load_directory(const char *dirname, struct dirent ***namelist);
int load_directory(const char *dirname, struct dirent *** namelist);
const char *copyAndUpdateFile(pageCnvCtx *pageConverter, const char *src,
const char *dst, bool force);
const char *linkAndUpdateFile(pageCnvCtx *pageConverter, const char *src,
const char *dst);
void check_hard_link(void);
FILE *fopen_priv(const char *path, const char *mode);
FILE *fopen_priv(const char *path, const char *mode);
/* function.c */
@ -399,8 +402,9 @@ void init_tablespaces(void);
/* server.c */
PGconn *connectToServer(ClusterInfo *cluster, const char *db_name);
PGresult *executeQueryOrDie(PGconn *conn, const char *fmt, ...)
__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
PGresult *
executeQueryOrDie(PGconn *conn, const char *fmt,...)
__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
void start_postmaster(ClusterInfo *cluster);
void stop_postmaster(bool fast);
@ -413,12 +417,15 @@ void check_pghost_envvar(void);
char *quote_identifier(const char *s);
int get_user_info(char **user_name);
void check_ok(void);
void report_status(eLogType type, const char *fmt, ...)
__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
void pg_log(eLogType type, char *fmt, ...)
__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
void prep_status(const char *fmt, ...)
__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
void
report_status(eLogType type, const char *fmt,...)
__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
void
pg_log(eLogType type, char *fmt,...)
__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
void
prep_status(const char *fmt,...)
__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
void check_ok(void);
char *pg_strdup(const char *s);
void *pg_malloc(int size);

View File

@ -34,26 +34,28 @@ const char *
transfer_all_new_dbs(DbInfoArr *old_db_arr,
DbInfoArr *new_db_arr, char *old_pgdata, char *new_pgdata)
{
int old_dbnum, new_dbnum;
int old_dbnum,
new_dbnum;
const char *msg = NULL;
prep_status("%s user relation files\n",
user_opts.transfer_mode == TRANSFER_MODE_LINK ? "Linking" : "Copying");
user_opts.transfer_mode == TRANSFER_MODE_LINK ? "Linking" : "Copying");
/* Scan the old cluster databases and transfer their files */
for (old_dbnum = new_dbnum = 0;
old_dbnum < old_db_arr->ndbs;
old_dbnum++, new_dbnum++)
{
DbInfo *old_db = &old_db_arr->dbs[old_dbnum], *new_db = NULL;
DbInfo *old_db = &old_db_arr->dbs[old_dbnum],
*new_db = NULL;
FileNameMap *mappings;
int n_maps;
pageCnvCtx *pageConverter = NULL;
/*
* Advance past any databases that exist in the new cluster
* but not in the old, e.g. "postgres". (The user might
* have removed the 'postgres' database from the old cluster.)
* Advance past any databases that exist in the new cluster but not in
* the old, e.g. "postgres". (The user might have removed the
* 'postgres' database from the old cluster.)
*/
for (; new_dbnum < new_db_arr->ndbs; new_dbnum++)
{
@ -83,8 +85,8 @@ transfer_all_new_dbs(DbInfoArr *old_db_arr,
}
}
prep_status(" "); /* in case nothing printed; pass a space so gcc
* doesn't complain about empty format
prep_status(" "); /* in case nothing printed; pass a space so
* gcc doesn't complain about empty format
* string */
check_ok();
@ -137,14 +139,14 @@ transfer_single_new_db(pageCnvCtx *pageConverter,
int mapnum;
int fileno;
bool vm_crashsafe_change = false;
old_dir[0] = '\0';
/* Do not copy non-crashsafe vm files for binaries that assume crashsafety */
if (old_cluster.controldata.cat_ver < VISIBILITY_MAP_CRASHSAFE_CAT_VER &&
new_cluster.controldata.cat_ver >= VISIBILITY_MAP_CRASHSAFE_CAT_VER)
vm_crashsafe_change = true;
for (mapnum = 0; mapnum < size; mapnum++)
{
char old_file[MAXPGPATH];
@ -190,8 +192,8 @@ transfer_single_new_db(pageCnvCtx *pageConverter,
for (fileno = 0; fileno < numFiles; fileno++)
{
char *vm_offset = strstr(namelist[fileno]->d_name, "_vm");
bool is_vm_file = false;
char *vm_offset = strstr(namelist[fileno]->d_name, "_vm");
bool is_vm_file = false;
/* Is a visibility map file? (name ends with _vm) */
if (vm_offset && strlen(vm_offset) == strlen("_vm"))

View File

@ -161,7 +161,7 @@ start_postmaster(ClusterInfo *cluster)
snprintf(cmd, sizeof(cmd),
SYSTEMQUOTE "\"%s/pg_ctl\" -w -l \"%s\" -D \"%s\" "
"-o \"-p %d %s %s\" start >> \"%s\" 2>&1" SYSTEMQUOTE,
cluster->bindir, SERVER_LOG_FILE, cluster->pgconfig, cluster->port,
cluster->bindir, SERVER_LOG_FILE, cluster->pgconfig, cluster->port,
(cluster->controldata.cat_ver >=
BINARY_UPGRADE_SERVER_FLAG_CAT_VER) ? "-b" :
"-c autovacuum=off -c autovacuum_freeze_max_age=2000000000",
@ -172,11 +172,11 @@ start_postmaster(ClusterInfo *cluster)
* it might supply a reason for the failure.
*/
pg_ctl_return = exec_prog(false, true,
/* pass both file names if the differ */
(strcmp(SERVER_LOG_FILE, SERVER_START_LOG_FILE) == 0) ?
SERVER_LOG_FILE :
SERVER_LOG_FILE " or " SERVER_START_LOG_FILE,
"%s", cmd);
/* pass both file names if the differ */
(strcmp(SERVER_LOG_FILE, SERVER_START_LOG_FILE) == 0) ?
SERVER_LOG_FILE :
SERVER_LOG_FILE " or " SERVER_START_LOG_FILE,
"%s", cmd);
/* Check to see if we can connect to the server; if not, report it. */
if ((conn = get_db_conn(cluster, "template1")) == NULL ||
@ -211,14 +211,14 @@ stop_postmaster(bool fast)
else if (os_info.running_cluster == &new_cluster)
cluster = &new_cluster;
else
return; /* no cluster running */
return; /* no cluster running */
snprintf(cmd, sizeof(cmd),
SYSTEMQUOTE "\"%s/pg_ctl\" -w -D \"%s\" -o \"%s\" "
"%s stop >> \"%s\" 2>&1" SYSTEMQUOTE,
cluster->bindir, cluster->pgconfig,
cluster->pgopts ? cluster->pgopts : "",
fast ? "-m fast" : "", SERVER_STOP_LOG_FILE);
fast ? "-m fast" : "", SERVER_STOP_LOG_FILE);
exec_prog(fast ? false : true, true, SERVER_STOP_LOG_FILE, "%s", cmd);

View File

@ -52,8 +52,8 @@ get_tablespace_paths(void)
"WHERE spcname != 'pg_default' AND "
" spcname != 'pg_global'",
/* 9.2 removed the spclocation column */
(GET_MAJOR_VERSION(old_cluster.major_version) <= 901) ?
"spclocation" : "pg_catalog.pg_tablespace_location(oid) AS spclocation");
(GET_MAJOR_VERSION(old_cluster.major_version) <= 901) ?
"spclocation" : "pg_catalog.pg_tablespace_location(oid) AS spclocation");
res = executeQueryOrDie(conn, "%s", query);

View File

@ -60,10 +60,10 @@ old_8_3_check_for_name_data_type_usage(ClusterInfo *cluster)
" NOT a.attisdropped AND "
" a.atttypid = 'pg_catalog.name'::pg_catalog.regtype AND "
" c.relnamespace = n.oid AND "
/* exclude possible orphaned temp tables */
/* exclude possible orphaned temp tables */
" n.nspname !~ '^pg_temp_' AND "
" n.nspname !~ '^pg_toast_temp_' AND "
" n.nspname NOT IN ('pg_catalog', 'information_schema')");
" n.nspname !~ '^pg_toast_temp_' AND "
" n.nspname NOT IN ('pg_catalog', 'information_schema')");
ntups = PQntuples(res);
i_nspname = PQfnumber(res, "nspname");
@ -98,9 +98,9 @@ old_8_3_check_for_name_data_type_usage(ClusterInfo *cluster)
pg_log(PG_REPORT, "fatal\n");
pg_log(PG_FATAL,
"Your installation contains the \"name\" data type in user tables. This\n"
"data type changed its internal alignment between your old and new\n"
"data type changed its internal alignment between your old and new\n"
"clusters so this cluster cannot currently be upgraded. You can remove\n"
"the problem tables and restart the upgrade. A list of the problem\n"
"the problem tables and restart the upgrade. A list of the problem\n"
"columns is in the file:\n"
" %s\n\n", output_path);
}
@ -150,10 +150,10 @@ old_8_3_check_for_tsquery_usage(ClusterInfo *cluster)
" NOT a.attisdropped AND "
" a.atttypid = 'pg_catalog.tsquery'::pg_catalog.regtype AND "
" c.relnamespace = n.oid AND "
/* exclude possible orphaned temp tables */
/* exclude possible orphaned temp tables */
" n.nspname !~ '^pg_temp_' AND "
" n.nspname !~ '^pg_toast_temp_' AND "
" n.nspname NOT IN ('pg_catalog', 'information_schema')");
" n.nspname !~ '^pg_toast_temp_' AND "
" n.nspname NOT IN ('pg_catalog', 'information_schema')");
ntups = PQntuples(res);
i_nspname = PQfnumber(res, "nspname");
@ -189,7 +189,7 @@ old_8_3_check_for_tsquery_usage(ClusterInfo *cluster)
pg_log(PG_FATAL,
"Your installation contains the \"tsquery\" data type. This data type\n"
"added a new internal field between your old and new clusters so this\n"
"cluster cannot currently be upgraded. You can remove the problem\n"
"cluster cannot currently be upgraded. You can remove the problem\n"
"columns and restart the upgrade. A list of the problem columns is in the\n"
"file:\n"
" %s\n\n", output_path);
@ -328,10 +328,10 @@ old_8_3_rebuild_tsvector_tables(ClusterInfo *cluster, bool check_mode)
" NOT a.attisdropped AND "
" a.atttypid = 'pg_catalog.tsvector'::pg_catalog.regtype AND "
" c.relnamespace = n.oid AND "
/* exclude possible orphaned temp tables */
/* exclude possible orphaned temp tables */
" n.nspname !~ '^pg_temp_' AND "
" n.nspname !~ '^pg_toast_temp_' AND "
" n.nspname NOT IN ('pg_catalog', 'information_schema')");
" n.nspname !~ '^pg_toast_temp_' AND "
" n.nspname NOT IN ('pg_catalog', 'information_schema')");
/*
* This macro is used below to avoid reindexing indexes already rebuilt
@ -527,7 +527,7 @@ old_8_3_invalidate_hash_gin_indexes(ClusterInfo *cluster, bool check_mode)
"must be reindexed with the REINDEX command. The file:\n"
" %s\n"
"when executed by psql by the database superuser will recreate all invalid\n"
"indexes; until then, none of these indexes will be used.\n\n",
"indexes; until then, none of these indexes will be used.\n\n",
output_path);
}
else
@ -648,10 +648,10 @@ old_8_3_invalidate_bpchar_pattern_ops_indexes(ClusterInfo *cluster,
pg_log(PG_WARNING, "\n"
"Your installation contains indexes using \"bpchar_pattern_ops\". These\n"
"indexes have different internal formats between your old and new clusters\n"
"so they must be reindexed with the REINDEX command. The file:\n"
"so they must be reindexed with the REINDEX command. The file:\n"
" %s\n"
"when executed by psql by the database superuser will recreate all invalid\n"
"indexes; until then, none of these indexes will be used.\n\n",
"indexes; until then, none of these indexes will be used.\n\n",
output_path);
}
else
@ -699,10 +699,10 @@ old_8_3_create_sequence_script(ClusterInfo *cluster)
" pg_catalog.pg_namespace n "
"WHERE c.relkind = 'S' AND "
" c.relnamespace = n.oid AND "
/* exclude possible orphaned temp tables */
/* exclude possible orphaned temp tables */
" n.nspname !~ '^pg_temp_' AND "
" n.nspname !~ '^pg_toast_temp_' AND "
" n.nspname NOT IN ('pg_catalog', 'information_schema')");
" n.nspname !~ '^pg_toast_temp_' AND "
" n.nspname NOT IN ('pg_catalog', 'information_schema')");
ntups = PQntuples(res);
i_nspname = PQfnumber(res, "nspname");

View File

@ -66,7 +66,7 @@
typedef struct win32_pthread *pthread_t;
typedef int pthread_attr_t;
static int pthread_create(pthread_t *thread, pthread_attr_t * attr, void *(*start_routine) (void *), void *arg);
static int pthread_create(pthread_t *thread, pthread_attr_t *attr, void *(*start_routine) (void *), void *arg);
static int pthread_join(pthread_t th, void **thread_return);
#elif defined(ENABLE_THREAD_SAFETY)
/* Use platform-dependent pthread capability */
@ -84,7 +84,7 @@ static int pthread_join(pthread_t th, void **thread_return);
typedef struct fork_pthread *pthread_t;
typedef int pthread_attr_t;
static int pthread_create(pthread_t *thread, pthread_attr_t * attr, void *(*start_routine) (void *), void *arg);
static int pthread_create(pthread_t *thread, pthread_attr_t *attr, void *(*start_routine) (void *), void *arg);
static int pthread_join(pthread_t th, void **thread_return);
#endif
@ -198,7 +198,7 @@ typedef struct
instr_time start_time; /* thread start time */
instr_time *exec_elapsed; /* time spent executing cmds (per Command) */
int *exec_count; /* number of cmd executions (per Command) */
unsigned short random_state[3]; /* separate randomness for each thread */
unsigned short random_state[3]; /* separate randomness for each thread */
} TState;
#define INVALID_THREAD ((pthread_t) 0)
@ -1075,7 +1075,7 @@ top:
/*
* getrand() neeeds to be able to subtract max from min and add
* one the result without overflowing. Since we know max > min,
* one the result without overflowing. Since we know max > min,
* we can detect overflow just by checking for a negative result.
* But we must check both that the subtraction doesn't overflow,
* and that adding one to the result doesn't overflow either.
@ -1267,10 +1267,11 @@ init(void)
* versions. Since pgbench has never pretended to be fully TPC-B
* compliant anyway, we stick with the historical behavior.
*/
struct ddlinfo {
char *table;
char *cols;
int declare_fillfactor;
struct ddlinfo
{
char *table;
char *cols;
int declare_fillfactor;
};
struct ddlinfo DDLs[] = {
{
@ -1321,15 +1322,16 @@ init(void)
/* Construct new create table statement. */
opts[0] = '\0';
if (ddl->declare_fillfactor)
snprintf(opts+strlen(opts), 256-strlen(opts),
" with (fillfactor=%d)", fillfactor);
snprintf(opts + strlen(opts), 256 - strlen(opts),
" with (fillfactor=%d)", fillfactor);
if (tablespace != NULL)
{
char *escape_tablespace;
char *escape_tablespace;
escape_tablespace = PQescapeIdentifier(con, tablespace,
strlen(tablespace));
snprintf(opts+strlen(opts), 256-strlen(opts),
" tablespace %s", escape_tablespace);
snprintf(opts + strlen(opts), 256 - strlen(opts),
" tablespace %s", escape_tablespace);
PQfreemem(escape_tablespace);
}
snprintf(buffer, 256, "create%s table %s(%s)%s",
@ -1404,17 +1406,18 @@ init(void)
fprintf(stderr, "set primary key...\n");
for (i = 0; i < lengthof(DDLAFTERs); i++)
{
char buffer[256];
char buffer[256];
strncpy(buffer, DDLAFTERs[i], 256);
if (index_tablespace != NULL)
{
char *escape_tablespace;
char *escape_tablespace;
escape_tablespace = PQescapeIdentifier(con, index_tablespace,
strlen(index_tablespace));
snprintf(buffer+strlen(buffer), 256-strlen(buffer),
" using index tablespace %s", escape_tablespace);
snprintf(buffer + strlen(buffer), 256 - strlen(buffer),
" using index tablespace %s", escape_tablespace);
PQfreemem(escape_tablespace);
}
@ -1861,10 +1864,10 @@ main(int argc, char **argv)
int i;
static struct option long_options[] = {
{"index-tablespace", required_argument, NULL, 3},
{"tablespace", required_argument, NULL, 2},
{"unlogged-tables", no_argument, &unlogged_tables, 1},
{NULL, 0, NULL, 0}
{"index-tablespace", required_argument, NULL, 3},
{"tablespace", required_argument, NULL, 2},
{"unlogged-tables", no_argument, &unlogged_tables, 1},
{NULL, 0, NULL, 0}
};
#ifdef HAVE_GETRLIMIT
@ -2065,10 +2068,10 @@ main(int argc, char **argv)
case 0:
/* This covers long options which take no argument. */
break;
case 2: /* tablespace */
case 2: /* tablespace */
tablespace = optarg;
break;
case 3: /* index-tablespace */
case 3: /* index-tablespace */
index_tablespace = optarg;
break;
default:
@ -2571,7 +2574,7 @@ typedef struct fork_pthread
static int
pthread_create(pthread_t *thread,
pthread_attr_t * attr,
pthread_attr_t *attr,
void *(*start_routine) (void *),
void *arg)
{
@ -2687,7 +2690,7 @@ win32_pthread_run(void *arg)
static int
pthread_create(pthread_t *thread,
pthread_attr_t * attr,
pthread_attr_t *attr,
void *(*start_routine) (void *),
void *arg)
{

View File

@ -34,8 +34,8 @@ char *
px_crypt_md5(const char *pw, const char *salt, char *passwd, unsigned dstlen)
{
static char *magic = "$1$"; /* This string is magic for this algorithm.
* Having it this way, we can get better
* later on */
* Having it this way, we can get better later
* on */
static char *p;
static const char *sp,
*ep;

View File

@ -204,8 +204,9 @@ const char *px_resolve_alias(const PX_Alias *aliases, const char *name);
void px_set_debug_handler(void (*handler) (const char *));
#ifdef PX_DEBUG
void px_debug(const char *fmt, ...)
__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
void
px_debug(const char *fmt,...)
__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
#else
#define px_debug(...)
#endif

View File

@ -95,7 +95,7 @@ pgstatindex(PG_FUNCTION_ARGS)
BlockNumber nblocks;
BlockNumber blkno;
BTIndexStat indexStat;
BufferAccessStrategy bstrategy = GetAccessStrategy(BAS_BULKREAD);
BufferAccessStrategy bstrategy = GetAccessStrategy(BAS_BULKREAD);
if (!superuser())
ereport(ERROR,
@ -160,7 +160,7 @@ pgstatindex(PG_FUNCTION_ARGS)
CHECK_FOR_INTERRUPTS();
/* Read and lock buffer */
buffer = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_NORMAL, bstrategy);
buffer = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_NORMAL, bstrategy);
LockBuffer(buffer, BUFFER_LOCK_SHARE);
page = BufferGetPage(buffer);

View File

@ -62,7 +62,7 @@ typedef struct pgstattuple_type
} pgstattuple_type;
typedef void (*pgstat_page) (pgstattuple_type *, Relation, BlockNumber,
BufferAccessStrategy);
BufferAccessStrategy);
static Datum build_pgstattuple_type(pgstattuple_type *stat,
FunctionCallInfo fcinfo);

View File

@ -32,19 +32,19 @@ void
sepgsql_database_post_create(Oid databaseId, const char *dtemplate)
{
Relation rel;
ScanKeyData skey;
SysScanDesc sscan;
ScanKeyData skey;
SysScanDesc sscan;
HeapTuple tuple;
char *tcontext;
char *ncontext;
char audit_name[NAMEDATALEN + 20];
ObjectAddress object;
Form_pg_database datForm;
ObjectAddress object;
Form_pg_database datForm;
/*
* Oid of the source database is not saved in pg_database catalog,
* so we collect its identifier using contextual information.
* If NULL, its default is "template1" according to createdb().
* Oid of the source database is not saved in pg_database catalog, so we
* collect its identifier using contextual information. If NULL, its
* default is "template1" according to createdb().
*/
if (!dtemplate)
dtemplate = "template1";
@ -56,6 +56,7 @@ sepgsql_database_post_create(Oid databaseId, const char *dtemplate)
tcontext = sepgsql_get_label(object.classId,
object.objectId,
object.objectSubId);
/*
* check db_database:{getattr} permission
*/
@ -67,11 +68,11 @@ sepgsql_database_post_create(Oid databaseId, const char *dtemplate)
true);
/*
* Compute a default security label of the newly created database
* based on a pair of security label of client and source database.
* Compute a default security label of the newly created database based on
* a pair of security label of client and source database.
*
* XXX - uncoming version of libselinux supports to take object
* name to handle special treatment on default security label.
* XXX - uncoming version of libselinux supports to take object name to
* handle special treatment on default security label.
*/
rel = heap_open(DatabaseRelationId, AccessShareLock);
@ -91,6 +92,7 @@ sepgsql_database_post_create(Oid databaseId, const char *dtemplate)
ncontext = sepgsql_compute_create(sepgsql_get_client_label(),
tcontext,
SEPG_CLASS_DB_DATABASE);
/*
* check db_database:{create} permission
*/
@ -126,8 +128,8 @@ sepgsql_database_post_create(Oid databaseId, const char *dtemplate)
void
sepgsql_database_drop(Oid databaseId)
{
ObjectAddress object;
char *audit_name;
ObjectAddress object;
char *audit_name;
/*
* check db_database:{drop} permission
@ -153,8 +155,8 @@ sepgsql_database_drop(Oid databaseId)
void
sepgsql_database_relabel(Oid databaseId, const char *seclabel)
{
ObjectAddress object;
char *audit_name;
ObjectAddress object;
char *audit_name;
object.classId = DatabaseRelationId;
object.objectId = databaseId;
@ -170,6 +172,7 @@ sepgsql_database_relabel(Oid databaseId, const char *seclabel)
SEPG_DB_DATABASE__RELABELFROM,
audit_name,
true);
/*
* check db_database:{relabelto} permission
*/

View File

@ -150,7 +150,7 @@ check_relation_privileges(Oid relOid,
uint32 required,
bool abort)
{
ObjectAddress object;
ObjectAddress object;
char *audit_name;
Bitmapset *columns;
int index;

View File

@ -52,9 +52,9 @@ typedef struct
* command. Elsewhere (including the case of default) NULL.
*/
const char *createdb_dtemplate;
} sepgsql_context_info_t;
} sepgsql_context_info_t;
static sepgsql_context_info_t sepgsql_context_info;
static sepgsql_context_info_t sepgsql_context_info;
/*
* GUC: sepgsql.permissive = (on|off)
@ -101,7 +101,7 @@ sepgsql_object_access(ObjectAccessType access,
{
case DatabaseRelationId:
sepgsql_database_post_create(objectId,
sepgsql_context_info.createdb_dtemplate);
sepgsql_context_info.createdb_dtemplate);
break;
case NamespaceRelationId:
@ -115,9 +115,8 @@ sepgsql_object_access(ObjectAccessType access,
* All cases we want to apply permission checks on
* creation of a new relation are invocation of the
* heap_create_with_catalog via DefineRelation or
* OpenIntoRel.
* Elsewhere, we need neither assignment of security
* label nor permission checks.
* OpenIntoRel. Elsewhere, we need neither assignment
* of security label nor permission checks.
*/
switch (sepgsql_context_info.cmdtype)
{
@ -150,12 +149,12 @@ sepgsql_object_access(ObjectAccessType access,
case OAT_DROP:
{
ObjectAccessDrop *drop_arg = (ObjectAccessDrop *)arg;
ObjectAccessDrop *drop_arg = (ObjectAccessDrop *) arg;
/*
* No need to apply permission checks on object deletion
* due to internal cleanups; such as removal of temporary
* database object on session closed.
* No need to apply permission checks on object deletion due
* to internal cleanups; such as removal of temporary database
* object on session closed.
*/
if ((drop_arg->dropflags & PERFORM_DELETION_INTERNAL) != 0)
break;
@ -219,13 +218,13 @@ sepgsql_exec_check_perms(List *rangeTabls, bool abort)
/*
* sepgsql_executor_start
*
* It saves contextual information during ExecutorStart to distinguish
* It saves contextual information during ExecutorStart to distinguish
* a case with/without permission checks later.
*/
static void
sepgsql_executor_start(QueryDesc *queryDesc, int eflags)
{
sepgsql_context_info_t saved_context_info = sepgsql_context_info;
sepgsql_context_info_t saved_context_info = sepgsql_context_info;
PG_TRY();
{
@ -270,28 +269,29 @@ sepgsql_utility_command(Node *parsetree,
DestReceiver *dest,
char *completionTag)
{
sepgsql_context_info_t saved_context_info = sepgsql_context_info;
ListCell *cell;
sepgsql_context_info_t saved_context_info = sepgsql_context_info;
ListCell *cell;
PG_TRY();
{
/*
* Check command tag to avoid nefarious operations, and save the
* current contextual information to determine whether we should
* apply permission checks here, or not.
* current contextual information to determine whether we should apply
* permission checks here, or not.
*/
sepgsql_context_info.cmdtype = nodeTag(parsetree);
switch (nodeTag(parsetree))
{
case T_CreatedbStmt:
/*
* We hope to reference name of the source database, but it
* does not appear in system catalog. So, we save it here.
*/
foreach (cell, ((CreatedbStmt *) parsetree)->options)
foreach(cell, ((CreatedbStmt *) parsetree)->options)
{
DefElem *defel = (DefElem *) lfirst(cell);
DefElem *defel = (DefElem *) lfirst(cell);
if (strcmp(defel->defname, "template") == 0)
{
@ -303,6 +303,7 @@ sepgsql_utility_command(Node *parsetree,
break;
case T_LoadStmt:
/*
* We reject LOAD command across the board on enforcing mode,
* because a binary module can arbitrarily override hooks.
@ -315,6 +316,7 @@ sepgsql_utility_command(Node *parsetree,
}
break;
default:
/*
* Right now we don't check any other utility commands,
* because it needs more detailed information to make access

View File

@ -58,17 +58,18 @@ static fmgr_hook_type next_fmgr_hook = NULL;
* we use the list client_label_pending of pending_label to keep track of which
* labels were set during the (sub-)transactions.
*/
static char *client_label_peer = NULL; /* set by getpeercon(3) */
static List *client_label_pending = NIL; /* pending list being set by
* sepgsql_setcon() */
static char *client_label_committed = NULL; /* set by sepgsql_setcon(),
* and already committed */
static char *client_label_func = NULL; /* set by trusted procedure */
static char *client_label_peer = NULL; /* set by getpeercon(3) */
static List *client_label_pending = NIL; /* pending list being set by
* sepgsql_setcon() */
static char *client_label_committed = NULL; /* set by sepgsql_setcon(),
* and already committed */
static char *client_label_func = NULL; /* set by trusted procedure */
typedef struct {
SubTransactionId subid;
char *label;
} pending_label;
typedef struct
{
SubTransactionId subid;
char *label;
} pending_label;
/*
* sepgsql_get_client_label
@ -87,7 +88,7 @@ sepgsql_get_client_label(void)
/* uncommitted sepgsql_setcon() value */
if (client_label_pending)
{
pending_label *plabel = llast(client_label_pending);
pending_label *plabel = llast(client_label_pending);
if (plabel->label)
return plabel->label;
@ -104,16 +105,16 @@ sepgsql_get_client_label(void)
* sepgsql_set_client_label
*
* This routine tries to switch the current security label of the client, and
* checks related permissions. The supplied new label shall be added to the
* checks related permissions. The supplied new label shall be added to the
* client_label_pending list, then saved at transaction-commit time to ensure
* transaction-awareness.
*/
static void
sepgsql_set_client_label(const char *new_label)
{
const char *tcontext;
MemoryContext oldcxt;
pending_label *plabel;
const char *tcontext;
MemoryContext oldcxt;
pending_label *plabel;
/* Reset to the initial client label, if NULL */
if (!new_label)
@ -140,9 +141,10 @@ sepgsql_set_client_label(const char *new_label)
SEPG_PROCESS__DYNTRANSITION,
NULL,
true);
/*
* Append the supplied new_label on the pending list until
* the current transaction is committed.
* Append the supplied new_label on the pending list until the current
* transaction is committed.
*/
oldcxt = MemoryContextSwitchTo(CurTransactionContext);
@ -158,7 +160,7 @@ sepgsql_set_client_label(const char *new_label)
/*
* sepgsql_xact_callback
*
* A callback routine of transaction commit/abort/prepare. Commmit or abort
* A callback routine of transaction commit/abort/prepare. Commmit or abort
* changes in the client_label_pending list.
*/
static void
@ -168,8 +170,8 @@ sepgsql_xact_callback(XactEvent event, void *arg)
{
if (client_label_pending != NIL)
{
pending_label *plabel = llast(client_label_pending);
char *new_label;
pending_label *plabel = llast(client_label_pending);
char *new_label;
if (plabel->label)
new_label = MemoryContextStrdup(TopMemoryContext,
@ -181,10 +183,11 @@ sepgsql_xact_callback(XactEvent event, void *arg)
pfree(client_label_committed);
client_label_committed = new_label;
/*
* XXX - Note that items of client_label_pending are allocated
* on CurTransactionContext, thus, all acquired memory region
* shall be released implicitly.
* XXX - Note that items of client_label_pending are allocated on
* CurTransactionContext, thus, all acquired memory region shall
* be released implicitly.
*/
client_label_pending = NIL;
}
@ -212,7 +215,8 @@ sepgsql_subxact_callback(SubXactEvent event, SubTransactionId mySubid,
prev = NULL;
for (cell = list_head(client_label_pending); cell; cell = next)
{
pending_label *plabel = lfirst(cell);
pending_label *plabel = lfirst(cell);
next = lnext(cell);
if (plabel->subid == mySubid)
@ -272,7 +276,7 @@ sepgsql_client_auth(Port *port, int status)
static bool
sepgsql_needs_fmgr_hook(Oid functionId)
{
ObjectAddress object;
ObjectAddress object;
if (next_needs_fmgr_hook &&
(*next_needs_fmgr_hook) (functionId))
@ -340,8 +344,8 @@ sepgsql_fmgr_hook(FmgrHookEventType event,
/*
* process:transition permission between old and new label,
* when user tries to switch security label of the client
* on execution of trusted procedure.
* when user tries to switch security label of the client on
* execution of trusted procedure.
*/
if (stack->new_label)
sepgsql_avc_check_perms_label(stack->new_label,

View File

@ -42,9 +42,9 @@ sepgsql_proc_post_create(Oid functionId)
char *tcontext;
char *ncontext;
int i;
StringInfoData audit_name;
ObjectAddress object;
Form_pg_proc proForm;
StringInfoData audit_name;
ObjectAddress object;
Form_pg_proc proForm;
/*
* Fetch namespace of the new procedure. Because pg_proc entry is not
@ -77,6 +77,7 @@ sepgsql_proc_post_create(Oid functionId)
SEPG_DB_SCHEMA__ADD_NAME,
getObjectDescription(&object),
true);
/*
* XXX - db_language:{implement} also should be checked here
*/
@ -97,9 +98,10 @@ sepgsql_proc_post_create(Oid functionId)
*/
initStringInfo(&audit_name);
appendStringInfo(&audit_name, "function %s(", NameStr(proForm->proname));
for (i=0; i < proForm->pronargs; i++)
for (i = 0; i < proForm->pronargs; i++)
{
Oid typeoid = proForm->proargtypes.values[i];
Oid typeoid = proForm->proargtypes.values[i];
if (i > 0)
appendStringInfoChar(&audit_name, ',');
appendStringInfoString(&audit_name, format_type_be(typeoid));
@ -111,6 +113,7 @@ sepgsql_proc_post_create(Oid functionId)
SEPG_DB_PROCEDURE__CREATE,
audit_name.data,
true);
/*
* Assign the default security label on a new procedure
*/
@ -138,8 +141,8 @@ sepgsql_proc_post_create(Oid functionId)
void
sepgsql_proc_drop(Oid functionId)
{
ObjectAddress object;
char *audit_name;
ObjectAddress object;
char *audit_name;
/*
* check db_schema:{remove_name} permission
@ -156,19 +159,19 @@ sepgsql_proc_drop(Oid functionId)
true);
pfree(audit_name);
/*
* check db_procedure:{drop} permission
*/
/*
* check db_procedure:{drop} permission
*/
object.classId = ProcedureRelationId;
object.objectId = functionId;
object.objectSubId = 0;
audit_name = getObjectDescription(&object);
sepgsql_avc_check_perms(&object,
SEPG_CLASS_DB_PROCEDURE,
SEPG_DB_PROCEDURE__DROP,
audit_name,
true);
sepgsql_avc_check_perms(&object,
SEPG_CLASS_DB_PROCEDURE,
SEPG_DB_PROCEDURE__DROP,
audit_name,
true);
pfree(audit_name);
}
@ -181,8 +184,8 @@ sepgsql_proc_drop(Oid functionId)
void
sepgsql_proc_relabel(Oid functionId, const char *seclabel)
{
ObjectAddress object;
char *audit_name;
ObjectAddress object;
char *audit_name;
object.classId = ProcedureRelationId;
object.objectId = functionId;
@ -198,6 +201,7 @@ sepgsql_proc_relabel(Oid functionId, const char *seclabel)
SEPG_DB_PROCEDURE__RELABELFROM,
audit_name,
true);
/*
* check db_procedure:{relabelto} permission
*/

View File

@ -44,9 +44,9 @@ sepgsql_attribute_post_create(Oid relOid, AttrNumber attnum)
char *scontext;
char *tcontext;
char *ncontext;
char audit_name[2*NAMEDATALEN + 20];
char audit_name[2 * NAMEDATALEN + 20];
ObjectAddress object;
Form_pg_attribute attForm;
Form_pg_attribute attForm;
/*
* Only attributes within regular relation have individual security
@ -84,6 +84,7 @@ sepgsql_attribute_post_create(Oid relOid, AttrNumber attnum)
tcontext = sepgsql_get_label(RelationRelationId, relOid, 0);
ncontext = sepgsql_compute_create(scontext, tcontext,
SEPG_CLASS_DB_COLUMN);
/*
* check db_column:{create} permission
*/
@ -118,8 +119,8 @@ sepgsql_attribute_post_create(Oid relOid, AttrNumber attnum)
void
sepgsql_attribute_drop(Oid relOid, AttrNumber attnum)
{
ObjectAddress object;
char *audit_name;
ObjectAddress object;
char *audit_name;
if (get_rel_relkind(relOid) != RELKIND_RELATION)
return;
@ -151,7 +152,7 @@ sepgsql_attribute_relabel(Oid relOid, AttrNumber attnum,
const char *seclabel)
{
ObjectAddress object;
char *audit_name;
char *audit_name;
if (get_rel_relkind(relOid) != RELKIND_RELATION)
ereport(ERROR,
@ -172,6 +173,7 @@ sepgsql_attribute_relabel(Oid relOid, AttrNumber attnum,
SEPG_DB_COLUMN__RELABELFROM,
audit_name,
true);
/*
* check db_column:{relabelto} permission
*/
@ -203,7 +205,7 @@ sepgsql_relation_post_create(Oid relOid)
char *tcontext; /* schema */
char *rcontext; /* relation */
char *ccontext; /* column */
char audit_name[2*NAMEDATALEN + 20];
char audit_name[2 * NAMEDATALEN + 20];
/*
* Fetch catalog record of the new relation. Because pg_class entry is not
@ -254,6 +256,7 @@ sepgsql_relation_post_create(Oid relOid)
SEPG_DB_SCHEMA__ADD_NAME,
getObjectDescription(&object),
true);
/*
* Compute a default security label when we create a new relation object
* under the specified namespace.
@ -273,6 +276,7 @@ sepgsql_relation_post_create(Oid relOid)
SEPG_DB_DATABASE__CREATE,
audit_name,
true);
/*
* Assign the default security label on the new relation
*/
@ -288,10 +292,10 @@ sepgsql_relation_post_create(Oid relOid)
if (classForm->relkind == RELKIND_RELATION)
{
Relation arel;
ScanKeyData akey;
SysScanDesc ascan;
ScanKeyData akey;
SysScanDesc ascan;
HeapTuple atup;
Form_pg_attribute attForm;
Form_pg_attribute attForm;
arel = heap_open(AttributeRelationId, AccessShareLock);
@ -315,6 +319,7 @@ sepgsql_relation_post_create(Oid relOid)
ccontext = sepgsql_compute_create(scontext,
rcontext,
SEPG_CLASS_DB_COLUMN);
/*
* check db_column:{create} permission
*/
@ -348,10 +353,10 @@ out:
void
sepgsql_relation_drop(Oid relOid)
{
ObjectAddress object;
char *audit_name;
uint16_t tclass = 0;
char relkind;
ObjectAddress object;
char *audit_name;
uint16_t tclass = 0;
char relkind;
relkind = get_rel_relkind(relOid);
if (relkind == RELKIND_RELATION)
@ -398,13 +403,13 @@ sepgsql_relation_drop(Oid relOid)
*/
if (relkind == RELKIND_RELATION)
{
Form_pg_attribute attForm;
Form_pg_attribute attForm;
CatCList *attrList;
HeapTuple atttup;
int i;
attrList = SearchSysCacheList1(ATTNUM, ObjectIdGetDatum(relOid));
for (i=0; i < attrList->n_members; i++)
for (i = 0; i < attrList->n_members; i++)
{
atttup = &attrList->members[i]->tuple;
attForm = (Form_pg_attribute) GETSTRUCT(atttup);
@ -436,7 +441,7 @@ sepgsql_relation_drop(Oid relOid)
void
sepgsql_relation_relabel(Oid relOid, const char *seclabel)
{
ObjectAddress object;
ObjectAddress object;
char *audit_name;
char relkind;
uint16_t tclass = 0;
@ -468,6 +473,7 @@ sepgsql_relation_relabel(Oid relOid, const char *seclabel)
SEPG_DB_TABLE__RELABELFROM,
audit_name,
true);
/*
* check db_xxx:{relabelto} permission
*/

View File

@ -35,22 +35,22 @@ void
sepgsql_schema_post_create(Oid namespaceId)
{
Relation rel;
ScanKeyData skey;
SysScanDesc sscan;
ScanKeyData skey;
SysScanDesc sscan;
HeapTuple tuple;
char *tcontext;
char *ncontext;
char audit_name[NAMEDATALEN + 20];
ObjectAddress object;
Form_pg_namespace nspForm;
ObjectAddress object;
Form_pg_namespace nspForm;
/*
* Compute a default security label when we create a new schema object
* under the working database.
*
* XXX - uncoming version of libselinux supports to take object
* name to handle special treatment on default security label;
* such as special label on "pg_temp" schema.
* XXX - uncoming version of libselinux supports to take object name to
* handle special treatment on default security label; such as special
* label on "pg_temp" schema.
*/
rel = heap_open(NamespaceRelationId, AccessShareLock);
@ -71,6 +71,7 @@ sepgsql_schema_post_create(Oid namespaceId)
ncontext = sepgsql_compute_create(sepgsql_get_client_label(),
tcontext,
SEPG_CLASS_DB_SCHEMA);
/*
* check db_schema:{create}
*/
@ -104,8 +105,8 @@ sepgsql_schema_post_create(Oid namespaceId)
void
sepgsql_schema_drop(Oid namespaceId)
{
ObjectAddress object;
char *audit_name;
ObjectAddress object;
char *audit_name;
/*
* check db_schema:{drop} permission
@ -116,7 +117,7 @@ sepgsql_schema_drop(Oid namespaceId)
audit_name = getObjectDescription(&object);
sepgsql_avc_check_perms(&object,
SEPG_CLASS_DB_SCHEMA,
SEPG_CLASS_DB_SCHEMA,
SEPG_DB_SCHEMA__DROP,
audit_name,
true);
@ -132,8 +133,8 @@ sepgsql_schema_drop(Oid namespaceId)
void
sepgsql_schema_relabel(Oid namespaceId, const char *seclabel)
{
ObjectAddress object;
char *audit_name;
ObjectAddress object;
char *audit_name;
object.classId = NamespaceRelationId;
object.objectId = namespaceId;
@ -149,6 +150,7 @@ sepgsql_schema_relabel(Oid namespaceId, const char *seclabel)
SEPG_DB_SCHEMA__RELABELFROM,
audit_name,
true);
/*
* check db_schema:{relabelto} permission
*/

View File

@ -248,20 +248,21 @@ extern bool sepgsql_check_perms(const char *scontext,
uint32 required,
const char *audit_name,
bool abort);
/*
* uavc.c
*/
#define SEPGSQL_AVC_NOAUDIT ((void *)(-1))
extern bool sepgsql_avc_check_perms_label(const char *tcontext,
uint16 tclass,
uint32 required,
const char *audit_name,
bool abort);
uint16 tclass,
uint32 required,
const char *audit_name,
bool abort);
extern bool sepgsql_avc_check_perms(const ObjectAddress *tobject,
uint16 tclass,
uint32 required,
const char *audit_name,
bool abort);
uint16 tclass,
uint32 required,
const char *audit_name,
bool abort);
extern char *sepgsql_avc_trusted_proc(Oid functionId);
extern void sepgsql_avc_init(void);
@ -269,7 +270,7 @@ extern void sepgsql_avc_init(void);
* label.c
*/
extern char *sepgsql_get_client_label(void);
extern void sepgsql_init_client_label(void);
extern void sepgsql_init_client_label(void);
extern char *sepgsql_get_label(Oid relOid, Oid objOid, int32 subId);
extern void sepgsql_object_relabel(const ObjectAddress *object,
@ -290,7 +291,7 @@ extern bool sepgsql_dml_privileges(List *rangeTabls, bool abort);
* database.c
*/
extern void sepgsql_database_post_create(Oid databaseId,
const char *dtemplate);
const char *dtemplate);
extern void sepgsql_database_drop(Oid databaseId);
extern void sepgsql_database_relabel(Oid databaseId, const char *seclabel);

View File

@ -30,22 +30,22 @@
*/
typedef struct
{
uint32 hash; /* hash value of this cache entry */
char *scontext; /* security context of the subject */
char *tcontext; /* security context of the target */
uint16 tclass; /* object class of the target */
uint32 hash; /* hash value of this cache entry */
char *scontext; /* security context of the subject */
char *tcontext; /* security context of the target */
uint16 tclass; /* object class of the target */
uint32 allowed; /* permissions to be allowed */
uint32 auditallow; /* permissions to be audited on allowed */
uint32 auditdeny; /* permissions to be audited on denied */
uint32 allowed; /* permissions to be allowed */
uint32 auditallow; /* permissions to be audited on allowed */
uint32 auditdeny; /* permissions to be audited on denied */
bool permissive; /* true, if permissive rule */
bool hot_cache; /* true, if recently referenced */
bool permissive; /* true, if permissive rule */
bool hot_cache; /* true, if recently referenced */
bool tcontext_is_valid;
/* true, if tcontext is valid */
char *ncontext; /* temporary scontext on execution of trusted
* procedure, or NULL elsewhere */
} avc_cache;
/* true, if tcontext is valid */
char *ncontext; /* temporary scontext on execution of trusted
* procedure, or NULL elsewhere */
} avc_cache;
/*
* Declaration of static variables
@ -54,12 +54,12 @@ typedef struct
#define AVC_NUM_RECLAIM 16
#define AVC_DEF_THRESHOLD 384
static MemoryContext avc_mem_cxt;
static List *avc_slots[AVC_NUM_SLOTS]; /* avc's hash buckets */
static int avc_num_caches; /* number of caches currently used */
static int avc_lru_hint; /* index of the buckets to be reclaimed next */
static int avc_threshold; /* threshold to launch cache-reclaiming */
static char *avc_unlabeled; /* system 'unlabeled' label */
static MemoryContext avc_mem_cxt;
static List *avc_slots[AVC_NUM_SLOTS]; /* avc's hash buckets */
static int avc_num_caches; /* number of caches currently used */
static int avc_lru_hint; /* index of the buckets to be reclaimed next */
static int avc_threshold; /* threshold to launch cache-reclaiming */
static char *avc_unlabeled; /* system 'unlabeled' label */
/*
* Hash function
@ -67,8 +67,8 @@ static char *avc_unlabeled; /* system 'unlabeled' label */
static uint32
sepgsql_avc_hash(const char *scontext, const char *tcontext, uint16 tclass)
{
return hash_any((const unsigned char *)scontext, strlen(scontext))
^ hash_any((const unsigned char *)tcontext, strlen(tcontext))
return hash_any((const unsigned char *) scontext, strlen(scontext))
^ hash_any((const unsigned char *) tcontext, strlen(tcontext))
^ tclass;
}
@ -88,7 +88,7 @@ sepgsql_avc_reset(void)
/*
* Reclaim caches recently unreferenced
*/
*/
static void
sepgsql_avc_reclaim(void)
{
@ -142,15 +142,15 @@ sepgsql_avc_reclaim(void)
* Access control decisions must be atomic, but multiple system calls may
* be required to make a decision; thus, when referencing the access vector
* cache, we must loop until we complete without an intervening cache flush
* event. In practice, looping even once should be very rare. Callers should
* event. In practice, looping even once should be very rare. Callers should
* do something like this:
*
* sepgsql_avc_check_valid();
* do {
* :
* <reference to uavc>
* :
* } while (!sepgsql_avc_check_valid())
* sepgsql_avc_check_valid();
* do {
* :
* <reference to uavc>
* :
* } while (!sepgsql_avc_check_valid())
*
* -------------------------------------------------------------------------
*/
@ -169,7 +169,7 @@ sepgsql_avc_check_valid(void)
/*
* sepgsql_avc_unlabeled
*
* Returns an alternative label to be applied when no label or an invalid
* Returns an alternative label to be applied when no label or an invalid
* label would otherwise be assigned.
*/
static char *
@ -177,12 +177,12 @@ sepgsql_avc_unlabeled(void)
{
if (!avc_unlabeled)
{
security_context_t unlabeled;
security_context_t unlabeled;
if (security_get_initial_context_raw("unlabeled", &unlabeled) < 0)
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("SELinux: failed to get initial security label: %m")));
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("SELinux: failed to get initial security label: %m")));
PG_TRY();
{
avc_unlabeled = MemoryContextStrdup(avc_mem_cxt, unlabeled);
@ -200,7 +200,7 @@ sepgsql_avc_unlabeled(void)
}
/*
* sepgsql_avc_compute
* sepgsql_avc_compute
*
* A fallback path, when cache mishit. It asks SELinux its access control
* decision for the supplied pair of security context and object class.
@ -208,24 +208,24 @@ sepgsql_avc_unlabeled(void)
static avc_cache *
sepgsql_avc_compute(const char *scontext, const char *tcontext, uint16 tclass)
{
char *ucontext = NULL;
char *ncontext = NULL;
MemoryContext oldctx;
avc_cache *cache;
uint32 hash;
int index;
struct av_decision avd;
char *ucontext = NULL;
char *ncontext = NULL;
MemoryContext oldctx;
avc_cache *cache;
uint32 hash;
int index;
struct av_decision avd;
hash = sepgsql_avc_hash(scontext, tcontext, tclass);
index = hash % AVC_NUM_SLOTS;
/*
* Validation check of the supplied security context.
* Because it always invoke system-call, frequent check should be avoided.
* Unless security policy is reloaded, validation status shall be kept, so
* we also cache whether the supplied security context was valid, or not.
* Validation check of the supplied security context. Because it always
* invoke system-call, frequent check should be avoided. Unless security
* policy is reloaded, validation status shall be kept, so we also cache
* whether the supplied security context was valid, or not.
*/
if (security_check_context_raw((security_context_t)tcontext) != 0)
if (security_check_context_raw((security_context_t) tcontext) != 0)
ucontext = sepgsql_avc_unlabeled();
/*
@ -237,15 +237,14 @@ sepgsql_avc_compute(const char *scontext, const char *tcontext, uint16 tclass)
sepgsql_compute_avd(scontext, ucontext, tclass, &avd);
/*
* It also caches a security label to be switched when a client
* labeled as 'scontext' executes a procedure labeled as 'tcontext',
* not only access control decision on the procedure.
* The security label to be switched shall be computed uniquely on
* a pair of 'scontext' and 'tcontext', thus, it is reasonable to
* cache the new label on avc, and enables to reduce unnecessary
* system calls.
* It shall be referenced at sepgsql_needs_fmgr_hook to check whether
* the supplied function is a trusted procedure, or not.
* It also caches a security label to be switched when a client labeled as
* 'scontext' executes a procedure labeled as 'tcontext', not only access
* control decision on the procedure. The security label to be switched
* shall be computed uniquely on a pair of 'scontext' and 'tcontext',
* thus, it is reasonable to cache the new label on avc, and enables to
* reduce unnecessary system calls. It shall be referenced at
* sepgsql_needs_fmgr_hook to check whether the supplied function is a
* trusted procedure, or not.
*/
if (tclass == SEPG_CLASS_DB_PROCEDURE)
{
@ -269,7 +268,7 @@ sepgsql_avc_compute(const char *scontext, const char *tcontext, uint16 tclass)
cache = palloc0(sizeof(avc_cache));
cache->hash = hash;
cache->hash = hash;
cache->scontext = pstrdup(scontext);
cache->tcontext = pstrdup(tcontext);
cache->tclass = tclass;
@ -314,7 +313,7 @@ sepgsql_avc_lookup(const char *scontext, const char *tcontext, uint16 tclass)
hash = sepgsql_avc_hash(scontext, tcontext, tclass);
index = hash % AVC_NUM_SLOTS;
foreach (cell, avc_slots[index])
foreach(cell, avc_slots[index])
{
cache = lfirst(cell);
@ -348,14 +347,15 @@ sepgsql_avc_check_perms_label(const char *tcontext,
uint16 tclass, uint32 required,
const char *audit_name, bool abort)
{
char *scontext = sepgsql_get_client_label();
char *scontext = sepgsql_get_client_label();
avc_cache *cache;
uint32 denied;
uint32 audited;
bool result;
sepgsql_avc_check_valid();
do {
do
{
result = true;
/*
@ -377,16 +377,16 @@ sepgsql_avc_check_perms_label(const char *tcontext,
audited = (denied ? (denied & ~0) : (required & ~0));
else
audited = denied ? (denied & cache->auditdeny)
: (required & cache->auditallow);
: (required & cache->auditallow);
if (denied)
{
/*
* In permissive mode or permissive domain, violated permissions
* shall be audited to the log files at once, and then implicitly
* allowed to avoid a flood of access denied logs, because
* the purpose of permissive mode/domain is to collect a violation
* log that will make it possible to fix up the security policy.
* allowed to avoid a flood of access denied logs, because the
* purpose of permissive mode/domain is to collect a violation log
* that will make it possible to fix up the security policy.
*/
if (!sepgsql_getenforce() || cache->permissive)
cache->allowed |= required;
@ -397,10 +397,10 @@ sepgsql_avc_check_perms_label(const char *tcontext,
/*
* In the case when we have something auditable actions here,
* sepgsql_audit_log shall be called with text representation of
* security labels for both of subject and object.
* It records this access violation, so DBA will be able to find
* out unexpected security problems later.
* sepgsql_audit_log shall be called with text representation of security
* labels for both of subject and object. It records this access
* violation, so DBA will be able to find out unexpected security problems
* later.
*/
if (audited != 0 &&
audit_name != SEPGSQL_AVC_NOAUDIT &&
@ -428,8 +428,8 @@ sepgsql_avc_check_perms(const ObjectAddress *tobject,
uint16 tclass, uint32 required,
const char *audit_name, bool abort)
{
char *tcontext = GetSecurityLabel(tobject, SEPGSQL_LABEL_TAG);
bool rc;
char *tcontext = GetSecurityLabel(tobject, SEPGSQL_LABEL_TAG);
bool rc;
rc = sepgsql_avc_check_perms_label(tcontext,
tclass, required,
@ -450,10 +450,10 @@ sepgsql_avc_check_perms(const ObjectAddress *tobject,
char *
sepgsql_avc_trusted_proc(Oid functionId)
{
char *scontext = sepgsql_get_client_label();
char *tcontext;
ObjectAddress tobject;
avc_cache *cache;
char *scontext = sepgsql_get_client_label();
char *tcontext;
ObjectAddress tobject;
avc_cache *cache;
tobject.classId = ProcedureRelationId;
tobject.objectId = functionId;
@ -461,7 +461,8 @@ sepgsql_avc_trusted_proc(Oid functionId)
tcontext = GetSecurityLabel(&tobject, SEPGSQL_LABEL_TAG);
sepgsql_avc_check_valid();
do {
do
{
if (tcontext)
cache = sepgsql_avc_lookup(scontext, tcontext,
SEPG_CLASS_DB_PROCEDURE);
@ -492,7 +493,7 @@ sepgsql_avc_exit(int code, Datum arg)
void
sepgsql_avc_init(void)
{
int rc;
int rc;
/*
* All the avc stuff shall be allocated on avc_mem_cxt
@ -508,12 +509,11 @@ sepgsql_avc_init(void)
avc_threshold = AVC_DEF_THRESHOLD;
/*
* SELinux allows to mmap(2) its kernel status page in read-only mode
* to inform userspace applications its status updating (such as
* policy reloading) without system-call invocations.
* This feature is only supported in Linux-2.6.38 or later, however,
* libselinux provides a fallback mode to know its status using
* netlink sockets.
* SELinux allows to mmap(2) its kernel status page in read-only mode to
* inform userspace applications its status updating (such as policy
* reloading) without system-call invocations. This feature is only
* supported in Linux-2.6.38 or later, however, libselinux provides a
* fallback mode to know its status using netlink sockets.
*/
rc = selinux_status_open(1);
if (rc < 0)

View File

@ -536,8 +536,7 @@ check_foreign_key(PG_FUNCTION_ARGS)
/*
* Remember that SPI_prepare places plan in current memory context
* - so, we have to save plan in Top memory context for later
* use.
* - so, we have to save plan in Top memory context for later use.
*/
if (SPI_keepplan(pplan))
/* internal error */

View File

@ -69,7 +69,7 @@ vacuumlo(const char *database, const struct _param * param)
int i;
static char *password = NULL;
bool new_pass;
bool success = true;
bool success = true;
/* Note: password can be carried over from a previous call */
if (param->pg_prompt == TRI_YES && password == NULL)
@ -261,8 +261,8 @@ vacuumlo(const char *database, const struct _param * param)
* We don't want to run each delete as an individual transaction, because
* the commit overhead would be high. However, since 9.0 the backend will
* acquire a lock per deleted LO, so deleting too many LOs per transaction
* risks running out of room in the shared-memory lock table.
* Accordingly, we delete up to transaction_limit LOs per transaction.
* risks running out of room in the shared-memory lock table. Accordingly,
* we delete up to transaction_limit LOs per transaction.
*/
res = PQexec(conn, "begin");
if (PQresultStatus(res) != PGRES_COMMAND_OK)
@ -459,8 +459,8 @@ main(int argc, char **argv)
if (param.transaction_limit < 0)
{
fprintf(stderr,
"%s: transaction limit must not be negative (0 disables)\n",
progname);
"%s: transaction limit must not be negative (0 disables)\n",
progname);
exit(1);
}
break;

View File

@ -702,126 +702,126 @@ xpath_table(PG_FUNCTION_ARGS)
PG_TRY();
{
/* For each row i.e. document returned from SPI */
for (i = 0; i < proc; i++)
{
char *pkey;
char *xmldoc;
xmlXPathContextPtr ctxt;
xmlXPathObjectPtr res;
xmlChar *resstr;
xmlXPathCompExprPtr comppath;
/* For each row i.e. document returned from SPI */
for (i = 0; i < proc; i++)
{
char *pkey;
char *xmldoc;
xmlXPathContextPtr ctxt;
xmlXPathObjectPtr res;
xmlChar *resstr;
xmlXPathCompExprPtr comppath;
/* Extract the row data as C Strings */
spi_tuple = tuptable->vals[i];
pkey = SPI_getvalue(spi_tuple, spi_tupdesc, 1);
xmldoc = SPI_getvalue(spi_tuple, spi_tupdesc, 2);
/* Extract the row data as C Strings */
spi_tuple = tuptable->vals[i];
pkey = SPI_getvalue(spi_tuple, spi_tupdesc, 1);
xmldoc = SPI_getvalue(spi_tuple, spi_tupdesc, 2);
/*
* Clear the values array, so that not-well-formed documents return
* NULL in all columns. Note that this also means that spare columns
* will be NULL.
*/
for (j = 0; j < ret_tupdesc->natts; j++)
values[j] = NULL;
/*
* Clear the values array, so that not-well-formed documents
* return NULL in all columns. Note that this also means that
* spare columns will be NULL.
*/
for (j = 0; j < ret_tupdesc->natts; j++)
values[j] = NULL;
/* Insert primary key */
values[0] = pkey;
/* Insert primary key */
values[0] = pkey;
/* Parse the document */
if (xmldoc)
doctree = xmlParseMemory(xmldoc, strlen(xmldoc));
else /* treat NULL as not well-formed */
/* Parse the document */
if (xmldoc)
doctree = xmlParseMemory(xmldoc, strlen(xmldoc));
else /* treat NULL as not well-formed */
doctree = NULL;
if (doctree == NULL)
{
/* not well-formed, so output all-NULL tuple */
ret_tuple = BuildTupleFromCStrings(attinmeta, values);
tuplestore_puttuple(tupstore, ret_tuple);
heap_freetuple(ret_tuple);
}
else
{
/* New loop here - we have to deal with nodeset results */
rownr = 0;
do
{
/* Now evaluate the set of xpaths. */
had_values = false;
for (j = 0; j < numpaths; j++)
{
ctxt = xmlXPathNewContext(doctree);
ctxt->node = xmlDocGetRootElement(doctree);
/* compile the path */
comppath = xmlXPathCompile(xpaths[j]);
if (comppath == NULL)
xml_ereport(xmlerrcxt, ERROR,
ERRCODE_EXTERNAL_ROUTINE_EXCEPTION,
"XPath Syntax Error");
/* Now evaluate the path expression. */
res = xmlXPathCompiledEval(comppath, ctxt);
xmlXPathFreeCompExpr(comppath);
if (res != NULL)
{
switch (res->type)
{
case XPATH_NODESET:
/* We see if this nodeset has enough nodes */
if (res->nodesetval != NULL &&
rownr < res->nodesetval->nodeNr)
{
resstr = xmlXPathCastNodeToString(res->nodesetval->nodeTab[rownr]);
had_values = true;
}
else
resstr = NULL;
break;
case XPATH_STRING:
resstr = xmlStrdup(res->stringval);
break;
default:
elog(NOTICE, "unsupported XQuery result: %d", res->type);
resstr = xmlStrdup((const xmlChar *) "<unsupported/>");
}
/*
* Insert this into the appropriate column in the
* result tuple.
*/
values[j + 1] = (char *) resstr;
}
xmlXPathFreeContext(ctxt);
}
/* Now add the tuple to the output, if there is one. */
if (had_values)
{
ret_tuple = BuildTupleFromCStrings(attinmeta, values);
tuplestore_puttuple(tupstore, ret_tuple);
heap_freetuple(ret_tuple);
}
rownr++;
} while (had_values);
}
if (doctree != NULL)
xmlFreeDoc(doctree);
doctree = NULL;
if (doctree == NULL)
{
/* not well-formed, so output all-NULL tuple */
ret_tuple = BuildTupleFromCStrings(attinmeta, values);
tuplestore_puttuple(tupstore, ret_tuple);
heap_freetuple(ret_tuple);
if (pkey)
pfree(pkey);
if (xmldoc)
pfree(xmldoc);
}
else
{
/* New loop here - we have to deal with nodeset results */
rownr = 0;
do
{
/* Now evaluate the set of xpaths. */
had_values = false;
for (j = 0; j < numpaths; j++)
{
ctxt = xmlXPathNewContext(doctree);
ctxt->node = xmlDocGetRootElement(doctree);
/* compile the path */
comppath = xmlXPathCompile(xpaths[j]);
if (comppath == NULL)
xml_ereport(xmlerrcxt, ERROR,
ERRCODE_EXTERNAL_ROUTINE_EXCEPTION,
"XPath Syntax Error");
/* Now evaluate the path expression. */
res = xmlXPathCompiledEval(comppath, ctxt);
xmlXPathFreeCompExpr(comppath);
if (res != NULL)
{
switch (res->type)
{
case XPATH_NODESET:
/* We see if this nodeset has enough nodes */
if (res->nodesetval != NULL &&
rownr < res->nodesetval->nodeNr)
{
resstr = xmlXPathCastNodeToString(res->nodesetval->nodeTab[rownr]);
had_values = true;
}
else
resstr = NULL;
break;
case XPATH_STRING:
resstr = xmlStrdup(res->stringval);
break;
default:
elog(NOTICE, "unsupported XQuery result: %d", res->type);
resstr = xmlStrdup((const xmlChar *) "<unsupported/>");
}
/*
* Insert this into the appropriate column in the
* result tuple.
*/
values[j + 1] = (char *) resstr;
}
xmlXPathFreeContext(ctxt);
}
/* Now add the tuple to the output, if there is one. */
if (had_values)
{
ret_tuple = BuildTupleFromCStrings(attinmeta, values);
tuplestore_puttuple(tupstore, ret_tuple);
heap_freetuple(ret_tuple);
}
rownr++;
} while (had_values);
}
if (doctree != NULL)
xmlFreeDoc(doctree);
doctree = NULL;
if (pkey)
pfree(pkey);
if (xmldoc)
pfree(xmldoc);
}
}
PG_CATCH();
{

View File

@ -85,40 +85,40 @@ xslt_process(PG_FUNCTION_ARGS)
{
/* Check to see if document is a file or a literal */
if (VARDATA(doct)[0] == '<')
doctree = xmlParseMemory((char *) VARDATA(doct), VARSIZE(doct) - VARHDRSZ);
else
doctree = xmlParseFile(text_to_cstring(doct));
if (VARDATA(doct)[0] == '<')
doctree = xmlParseMemory((char *) VARDATA(doct), VARSIZE(doct) - VARHDRSZ);
else
doctree = xmlParseFile(text_to_cstring(doct));
if (doctree == NULL)
xml_ereport(xmlerrcxt, ERROR, ERRCODE_EXTERNAL_ROUTINE_EXCEPTION,
"error parsing XML document");
/* Same for stylesheet */
if (VARDATA(ssheet)[0] == '<')
{
ssdoc = xmlParseMemory((char *) VARDATA(ssheet),
VARSIZE(ssheet) - VARHDRSZ);
if (ssdoc == NULL)
if (doctree == NULL)
xml_ereport(xmlerrcxt, ERROR, ERRCODE_EXTERNAL_ROUTINE_EXCEPTION,
"error parsing stylesheet as XML document");
"error parsing XML document");
stylesheet = xsltParseStylesheetDoc(ssdoc);
}
else
stylesheet = xsltParseStylesheetFile((xmlChar *) text_to_cstring(ssheet));
/* Same for stylesheet */
if (VARDATA(ssheet)[0] == '<')
{
ssdoc = xmlParseMemory((char *) VARDATA(ssheet),
VARSIZE(ssheet) - VARHDRSZ);
if (ssdoc == NULL)
xml_ereport(xmlerrcxt, ERROR, ERRCODE_EXTERNAL_ROUTINE_EXCEPTION,
"error parsing stylesheet as XML document");
if (stylesheet == NULL)
xml_ereport(xmlerrcxt, ERROR, ERRCODE_EXTERNAL_ROUTINE_EXCEPTION,
"failed to parse stylesheet");
stylesheet = xsltParseStylesheetDoc(ssdoc);
}
else
stylesheet = xsltParseStylesheetFile((xmlChar *) text_to_cstring(ssheet));
restree = xsltApplyStylesheet(stylesheet, doctree, params);
if (stylesheet == NULL)
xml_ereport(xmlerrcxt, ERROR, ERRCODE_EXTERNAL_ROUTINE_EXCEPTION,
"failed to parse stylesheet");
if (restree == NULL)
xml_ereport(xmlerrcxt, ERROR, ERRCODE_EXTERNAL_ROUTINE_EXCEPTION,
"failed to apply stylesheet");
restree = xsltApplyStylesheet(stylesheet, doctree, params);
resstat = xsltSaveResultToString(&resstr, &reslen, restree, stylesheet);
if (restree == NULL)
xml_ereport(xmlerrcxt, ERROR, ERRCODE_EXTERNAL_ROUTINE_EXCEPTION,
"failed to apply stylesheet");
resstat = xsltSaveResultToString(&resstr, &reslen, restree, stylesheet);
}
PG_CATCH();
{

View File

@ -27,7 +27,7 @@
/* non-export function prototypes */
static void gistfixsplit(GISTInsertState *state, GISTSTATE *giststate);
static bool gistinserttuple(GISTInsertState *state, GISTInsertStack *stack,
GISTSTATE *giststate, IndexTuple tuple, OffsetNumber oldoffnum);
GISTSTATE *giststate, IndexTuple tuple, OffsetNumber oldoffnum);
static bool gistinserttuples(GISTInsertState *state, GISTInsertStack *stack,
GISTSTATE *giststate,
IndexTuple *tuples, int ntup, OffsetNumber oldoffnum,
@ -781,8 +781,8 @@ gistFindPath(Relation r, BlockNumber child, OffsetNumber *downlinkoffnum)
{
/*
* Page was split while we looked elsewhere. We didn't see the
* downlink to the right page when we scanned the parent, so
* add it to the queue now.
* downlink to the right page when we scanned the parent, so add
* it to the queue now.
*
* Put the right page ahead of the queue, so that we visit it
* next. That's important, because if this is the lowest internal
@ -829,7 +829,7 @@ gistFindPath(Relation r, BlockNumber child, OffsetNumber *downlinkoffnum)
elog(ERROR, "failed to re-find parent of a page in index \"%s\", block %u",
RelationGetRelationName(r), child);
return NULL; /* keep compiler quiet */
return NULL; /* keep compiler quiet */
}
/*
@ -1046,7 +1046,7 @@ gistfixsplit(GISTInsertState *state, GISTSTATE *giststate)
*/
static bool
gistinserttuple(GISTInsertState *state, GISTInsertStack *stack,
GISTSTATE *giststate, IndexTuple tuple, OffsetNumber oldoffnum)
GISTSTATE *giststate, IndexTuple tuple, OffsetNumber oldoffnum)
{
return gistinserttuples(state, stack, giststate, &tuple, 1, oldoffnum,
InvalidBuffer, InvalidBuffer, false, false);
@ -1308,7 +1308,7 @@ initGISTstate(Relation index)
giststate = (GISTSTATE *) palloc(sizeof(GISTSTATE));
giststate->scanCxt = scanCxt;
giststate->tempCxt = scanCxt; /* caller must change this if needed */
giststate->tempCxt = scanCxt; /* caller must change this if needed */
giststate->tupdesc = index->rd_att;
for (i = 0; i < index->rd_att->natts; i++)

View File

@ -48,7 +48,7 @@ typedef enum
* before switching to the buffering build
* mode */
GIST_BUFFERING_ACTIVE /* in buffering build mode */
} GistBufferingMode;
} GistBufferingMode;
/* Working state for gistbuild and its callback */
typedef struct
@ -263,7 +263,7 @@ gistValidateBufferingOption(char *value)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("invalid value for \"buffering\" option"),
errdetail("Valid values are \"on\", \"off\", and \"auto\".")));
errdetail("Valid values are \"on\", \"off\", and \"auto\".")));
}
}
@ -567,7 +567,7 @@ gistProcessItup(GISTBuildState *buildstate, IndexTuple itup,
BlockNumber childblkno;
Buffer buffer;
bool result = false;
BlockNumber blkno;
BlockNumber blkno;
int level;
OffsetNumber downlinkoffnum = InvalidOffsetNumber;
BlockNumber parentblkno = InvalidBlockNumber;
@ -623,7 +623,7 @@ gistProcessItup(GISTBuildState *buildstate, IndexTuple itup,
{
gistbufferinginserttuples(buildstate, buffer, level,
&newtup, 1, childoffnum,
InvalidBlockNumber, InvalidOffsetNumber);
InvalidBlockNumber, InvalidOffsetNumber);
/* gistbufferinginserttuples() released the buffer */
}
else
@ -716,26 +716,26 @@ gistbufferinginserttuples(GISTBuildState *buildstate, Buffer buffer, int level,
/*
* All the downlinks on the old root page are now on one of the child
* pages. Visit all the new child pages to memorize the parents of
* the grandchildren.
* pages. Visit all the new child pages to memorize the parents of the
* grandchildren.
*/
if (gfbb->rootlevel > 1)
{
maxoff = PageGetMaxOffsetNumber(page);
for (off = FirstOffsetNumber; off <= maxoff; off++)
{
ItemId iid = PageGetItemId(page, off);
IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid);
ItemId iid = PageGetItemId(page, off);
IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid);
BlockNumber childblkno = ItemPointerGetBlockNumber(&(idxtuple->t_tid));
Buffer childbuf = ReadBuffer(buildstate->indexrel, childblkno);
Buffer childbuf = ReadBuffer(buildstate->indexrel, childblkno);
LockBuffer(childbuf, GIST_SHARE);
gistMemorizeAllDownlinks(buildstate, childbuf);
UnlockReleaseBuffer(childbuf);
/*
* Also remember that the parent of the new child page is
* the root block.
* Also remember that the parent of the new child page is the
* root block.
*/
gistMemorizeParent(buildstate, childblkno, GIST_ROOT_BLKNO);
}
@ -789,8 +789,8 @@ gistbufferinginserttuples(GISTBuildState *buildstate, Buffer buffer, int level,
* Remember the parent of each new child page in our parent map.
* This assumes that the downlinks fit on the parent page. If the
* parent page is split, too, when we recurse up to insert the
* downlinks, the recursive gistbufferinginserttuples() call
* will update the map again.
* downlinks, the recursive gistbufferinginserttuples() call will
* update the map again.
*/
if (level > 0)
gistMemorizeParent(buildstate,
@ -879,8 +879,9 @@ gistBufferingFindCorrectParent(GISTBuildState *buildstate,
if (parent == *parentblkno && *parentblkno != InvalidBlockNumber &&
*downlinkoffnum != InvalidOffsetNumber && *downlinkoffnum <= maxoff)
{
ItemId iid = PageGetItemId(page, *downlinkoffnum);
IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid);
ItemId iid = PageGetItemId(page, *downlinkoffnum);
IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid);
if (ItemPointerGetBlockNumber(&(idxtuple->t_tid)) == childblkno)
{
/* Still there */
@ -889,16 +890,17 @@ gistBufferingFindCorrectParent(GISTBuildState *buildstate,
}
/*
* Downlink was not at the offset where it used to be. Scan the page
* to find it. During normal gist insertions, it might've moved to another
* page, to the right, but during a buffering build, we keep track of
* the parent of each page in the lookup table so we should always know
* what page it's on.
* Downlink was not at the offset where it used to be. Scan the page to
* find it. During normal gist insertions, it might've moved to another
* page, to the right, but during a buffering build, we keep track of the
* parent of each page in the lookup table so we should always know what
* page it's on.
*/
for (off = FirstOffsetNumber; off <= maxoff; off = OffsetNumberNext(off))
{
ItemId iid = PageGetItemId(page, off);
IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid);
ItemId iid = PageGetItemId(page, off);
IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid);
if (ItemPointerGetBlockNumber(&(idxtuple->t_tid)) == childblkno)
{
/* yes!!, found it */
@ -908,7 +910,7 @@ gistBufferingFindCorrectParent(GISTBuildState *buildstate,
}
elog(ERROR, "failed to re-find parent for block %u", childblkno);
return InvalidBuffer; /* keep compiler quiet */
return InvalidBuffer; /* keep compiler quiet */
}
/*
@ -1129,7 +1131,7 @@ gistGetMaxLevel(Relation index)
typedef struct
{
BlockNumber childblkno; /* hash key */
BlockNumber childblkno; /* hash key */
BlockNumber parentblkno;
} ParentMapEntry;
@ -1156,9 +1158,9 @@ gistMemorizeParent(GISTBuildState *buildstate, BlockNumber child, BlockNumber pa
bool found;
entry = (ParentMapEntry *) hash_search(buildstate->parentMap,
(const void *) &child,
HASH_ENTER,
&found);
(const void *) &child,
HASH_ENTER,
&found);
entry->parentblkno = parent;
}
@ -1171,16 +1173,17 @@ gistMemorizeAllDownlinks(GISTBuildState *buildstate, Buffer parentbuf)
OffsetNumber maxoff;
OffsetNumber off;
BlockNumber parentblkno = BufferGetBlockNumber(parentbuf);
Page page = BufferGetPage(parentbuf);
Page page = BufferGetPage(parentbuf);
Assert(!GistPageIsLeaf(page));
maxoff = PageGetMaxOffsetNumber(page);
for (off = FirstOffsetNumber; off <= maxoff; off++)
{
ItemId iid = PageGetItemId(page, off);
IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid);
ItemId iid = PageGetItemId(page, off);
IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid);
BlockNumber childblkno = ItemPointerGetBlockNumber(&(idxtuple->t_tid));
gistMemorizeParent(buildstate, childblkno, parentblkno);
}
}
@ -1193,9 +1196,9 @@ gistGetParent(GISTBuildState *buildstate, BlockNumber child)
/* Find node buffer in hash table */
entry = (ParentMapEntry *) hash_search(buildstate->parentMap,
(const void *) &child,
HASH_FIND,
&found);
(const void *) &child,
HASH_FIND,
&found);
if (!found)
elog(ERROR, "could not find parent of block %d in lookup table", child);

View File

@ -528,7 +528,7 @@ typedef struct
bool isnull[INDEX_MAX_KEYS];
GISTPageSplitInfo *splitinfo;
GISTNodeBuffer *nodeBuffer;
} RelocationBufferInfo;
} RelocationBufferInfo;
/*
* At page split, distribute tuples from the buffer of the split page to

View File

@ -244,7 +244,7 @@ typedef struct
int index;
/* Delta between penalties of entry insertion into different groups */
double delta;
} CommonEntry;
} CommonEntry;
/*
* Context for g_box_consider_split. Contains information about currently
@ -267,7 +267,7 @@ typedef struct
int dim; /* axis of this split */
double range; /* width of general MBR projection to the
* selected axis */
} ConsiderSplitContext;
} ConsiderSplitContext;
/*
* Interval represents projection of box to axis.
@ -276,7 +276,7 @@ typedef struct
{
double lower,
upper;
} SplitInterval;
} SplitInterval;
/*
* Interval comparison function by lower bound of the interval;

View File

@ -124,7 +124,7 @@ gistbeginscan(PG_FUNCTION_ARGS)
so->giststate = giststate;
giststate->tempCxt = createTempGistContext();
so->queue = NULL;
so->queueCxt = giststate->scanCxt; /* see gistrescan */
so->queueCxt = giststate->scanCxt; /* see gistrescan */
/* workspaces with size dependent on numberOfOrderBys: */
so->tmpTreeItem = palloc(GSTIHDRSZ + sizeof(double) * scan->numberOfOrderBys);

View File

@ -581,8 +581,7 @@ gistSplitByKey(Relation r, Page page, IndexTuple *itup, int len, GISTSTATE *gist
if (v->spl_equiv == NULL)
{
/*
* simple case: left and right keys for attno column are
* equal
* simple case: left and right keys for attno column are equal
*/
gistSplitByKey(r, page, itup, len, giststate, v, entryvec, attno + 1);
}

View File

@ -391,7 +391,7 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf,
uint32 ovflbitno;
int32 bitmappage,
bitmapbit;
Bucket bucket PG_USED_FOR_ASSERTS_ONLY;
Bucket bucket PG_USED_FOR_ASSERTS_ONLY;
/* Get information from the doomed page */
_hash_checkpage(rel, ovflbuf, LH_OVERFLOW_PAGE);

View File

@ -223,9 +223,9 @@ heapgetpage(HeapScanDesc scan, BlockNumber page)
}
/*
* Be sure to check for interrupts at least once per page. Checks at
* higher code levels won't be able to stop a seqscan that encounters
* many pages' worth of consecutive dead tuples.
* Be sure to check for interrupts at least once per page. Checks at
* higher code levels won't be able to stop a seqscan that encounters many
* pages' worth of consecutive dead tuples.
*/
CHECK_FOR_INTERRUPTS();
@ -997,8 +997,8 @@ relation_openrv(const RangeVar *relation, LOCKMODE lockmode)
*
* Same as relation_openrv, but with an additional missing_ok argument
* allowing a NULL return rather than an error if the relation is not
* found. (Note that some other causes, such as permissions problems,
* will still result in an ereport.)
* found. (Note that some other causes, such as permissions problems,
* will still result in an ereport.)
* ----------------
*/
Relation
@ -1105,7 +1105,7 @@ heap_openrv(const RangeVar *relation, LOCKMODE lockmode)
* by a RangeVar node
*
* As above, but optionally return NULL instead of failing for
* relation-not-found.
* relation-not-found.
* ----------------
*/
Relation
@ -1588,10 +1588,10 @@ heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer,
/*
* When first_call is true (and thus, skip is initially false) we'll
* return the first tuple we find. But on later passes, heapTuple
* return the first tuple we find. But on later passes, heapTuple
* will initially be pointing to the tuple we returned last time.
* Returning it again would be incorrect (and would loop forever),
* so we skip it and return the next match we find.
* Returning it again would be incorrect (and would loop forever), so
* we skip it and return the next match we find.
*/
if (!skip)
{
@ -1651,7 +1651,7 @@ heap_hot_search(ItemPointer tid, Relation relation, Snapshot snapshot,
{
bool result;
Buffer buffer;
HeapTupleData heapTuple;
HeapTupleData heapTuple;
buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
LockBuffer(buffer, BUFFER_LOCK_SHARE);
@ -1885,14 +1885,14 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
heaptup = heap_prepare_insert(relation, tup, xid, cid, options);
/*
* We're about to do the actual insert -- but check for conflict first,
* to avoid possibly having to roll back work we've just done.
* We're about to do the actual insert -- but check for conflict first, to
* avoid possibly having to roll back work we've just done.
*
* For a heap insert, we only need to check for table-level SSI locks.
* Our new tuple can't possibly conflict with existing tuple locks, and
* heap page locks are only consolidated versions of tuple locks; they do
* not lock "gaps" as index page locks do. So we don't need to identify
* a buffer before making the call.
* For a heap insert, we only need to check for table-level SSI locks. Our
* new tuple can't possibly conflict with existing tuple locks, and heap
* page locks are only consolidated versions of tuple locks; they do not
* lock "gaps" as index page locks do. So we don't need to identify a
* buffer before making the call.
*/
CheckForSerializableConflictIn(relation, NULL, InvalidBuffer);
@ -2123,11 +2123,11 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples,
* We're about to do the actual inserts -- but check for conflict first,
* to avoid possibly having to roll back work we've just done.
*
* For a heap insert, we only need to check for table-level SSI locks.
* Our new tuple can't possibly conflict with existing tuple locks, and
* heap page locks are only consolidated versions of tuple locks; they do
* not lock "gaps" as index page locks do. So we don't need to identify
* a buffer before making the call.
* For a heap insert, we only need to check for table-level SSI locks. Our
* new tuple can't possibly conflict with existing tuple locks, and heap
* page locks are only consolidated versions of tuple locks; they do not
* lock "gaps" as index page locks do. So we don't need to identify a
* buffer before making the call.
*/
CheckForSerializableConflictIn(relation, NULL, InvalidBuffer);
@ -2137,12 +2137,11 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples,
Buffer buffer;
Buffer vmbuffer = InvalidBuffer;
bool all_visible_cleared = false;
int nthispage;
int nthispage;
/*
* Find buffer where at least the next tuple will fit. If the page
* is all-visible, this will also pin the requisite visibility map
* page.
* Find buffer where at least the next tuple will fit. If the page is
* all-visible, this will also pin the requisite visibility map page.
*/
buffer = RelationGetBufferForTuple(relation, heaptuples[ndone]->t_len,
InvalidBuffer, options, bistate,
@ -2358,7 +2357,7 @@ heap_delete(Relation relation, ItemPointer tid,
ItemId lp;
HeapTupleData tp;
Page page;
BlockNumber block;
BlockNumber block;
Buffer buffer;
Buffer vmbuffer = InvalidBuffer;
bool have_tuple_lock = false;
@ -2372,10 +2371,10 @@ heap_delete(Relation relation, ItemPointer tid,
page = BufferGetPage(buffer);
/*
* Before locking the buffer, pin the visibility map page if it appears
* to be necessary. Since we haven't got the lock yet, someone else might
* be in the middle of changing this, so we'll need to recheck after
* we have the lock.
* Before locking the buffer, pin the visibility map page if it appears to
* be necessary. Since we haven't got the lock yet, someone else might be
* in the middle of changing this, so we'll need to recheck after we have
* the lock.
*/
if (PageIsAllVisible(page))
visibilitymap_pin(relation, block, &vmbuffer);
@ -2717,7 +2716,7 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
HeapTupleData oldtup;
HeapTuple heaptup;
Page page;
BlockNumber block;
BlockNumber block;
Buffer buffer,
newbuf,
vmbuffer = InvalidBuffer,
@ -2753,10 +2752,10 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
page = BufferGetPage(buffer);
/*
* Before locking the buffer, pin the visibility map page if it appears
* to be necessary. Since we haven't got the lock yet, someone else might
* be in the middle of changing this, so we'll need to recheck after
* we have the lock.
* Before locking the buffer, pin the visibility map page if it appears to
* be necessary. Since we haven't got the lock yet, someone else might be
* in the middle of changing this, so we'll need to recheck after we have
* the lock.
*/
if (PageIsAllVisible(page))
visibilitymap_pin(relation, block, &vmbuffer);
@ -2900,11 +2899,11 @@ l2:
/*
* If we didn't pin the visibility map page and the page has become all
* visible while we were busy locking the buffer, or during some subsequent
* window during which we had it unlocked, we'll have to unlock and
* re-lock, to avoid holding the buffer lock across an I/O. That's a bit
* unfortunate, esepecially since we'll now have to recheck whether the
* tuple has been locked or updated under us, but hopefully it won't
* visible while we were busy locking the buffer, or during some
* subsequent window during which we had it unlocked, we'll have to unlock
* and re-lock, to avoid holding the buffer lock across an I/O. That's a
* bit unfortunate, esepecially since we'll now have to recheck whether
* the tuple has been locked or updated under us, but hopefully it won't
* happen very often.
*/
if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
@ -3196,11 +3195,11 @@ l2:
/*
* Mark old tuple for invalidation from system caches at next command
* boundary, and mark the new tuple for invalidation in case we abort.
* We have to do this before releasing the buffer because oldtup is in
* the buffer. (heaptup is all in local memory, but it's necessary to
* process both tuple versions in one call to inval.c so we can avoid
* redundant sinval messages.)
* boundary, and mark the new tuple for invalidation in case we abort. We
* have to do this before releasing the buffer because oldtup is in the
* buffer. (heaptup is all in local memory, but it's necessary to process
* both tuple versions in one call to inval.c so we can avoid redundant
* sinval messages.)
*/
CacheInvalidateHeapTuple(relation, &oldtup, heaptup);
@ -4069,7 +4068,7 @@ heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid)
*/
bool
heap_tuple_needs_freeze(HeapTupleHeader tuple, TransactionId cutoff_xid,
Buffer buf)
Buffer buf)
{
TransactionId xid;
@ -4368,9 +4367,9 @@ log_heap_freeze(Relation reln, Buffer buffer,
}
/*
* Perform XLogInsert for a heap-visible operation. 'block' is the block
* Perform XLogInsert for a heap-visible operation. 'block' is the block
* being marked all-visible, and vm_buffer is the buffer containing the
* corresponding visibility map block. Both should have already been modified
* corresponding visibility map block. Both should have already been modified
* and dirtied.
*/
XLogRecPtr
@ -4705,7 +4704,7 @@ heap_xlog_visible(XLogRecPtr lsn, XLogRecord *record)
Page page;
/*
* Read the heap page, if it still exists. If the heap file has been
* Read the heap page, if it still exists. If the heap file has been
* dropped or truncated later in recovery, this might fail. In that case,
* there's no point in doing anything further, since the visibility map
* will have to be cleared out at the same time.
@ -4731,17 +4730,16 @@ heap_xlog_visible(XLogRecPtr lsn, XLogRecord *record)
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
/*
* We don't bump the LSN of the heap page when setting the visibility
* map bit, because that would generate an unworkable volume of
* full-page writes. This exposes us to torn page hazards, but since
* we're not inspecting the existing page contents in any way, we
* don't care.
* We don't bump the LSN of the heap page when setting the visibility map
* bit, because that would generate an unworkable volume of full-page
* writes. This exposes us to torn page hazards, but since we're not
* inspecting the existing page contents in any way, we don't care.
*
* However, all operations that clear the visibility map bit *do* bump
* the LSN, and those operations will only be replayed if the XLOG LSN
* follows the page LSN. Thus, if the page LSN has advanced past our
* XLOG record's LSN, we mustn't mark the page all-visible, because
* the subsequent update won't be replayed to clear the flag.
* However, all operations that clear the visibility map bit *do* bump the
* LSN, and those operations will only be replayed if the XLOG LSN follows
* the page LSN. Thus, if the page LSN has advanced past our XLOG
* record's LSN, we mustn't mark the page all-visible, because the
* subsequent update won't be replayed to clear the flag.
*/
if (!XLByteLE(lsn, PageGetLSN(page)))
{
@ -4772,10 +4770,10 @@ heap_xlog_visible(XLogRecPtr lsn, XLogRecord *record)
* Don't set the bit if replay has already passed this point.
*
* It might be safe to do this unconditionally; if replay has past
* this point, we'll replay at least as far this time as we did before,
* and if this bit needs to be cleared, the record responsible for
* doing so should be again replayed, and clear it. For right now,
* out of an abundance of conservatism, we use the same test here
* this point, we'll replay at least as far this time as we did
* before, and if this bit needs to be cleared, the record responsible
* for doing so should be again replayed, and clear it. For right
* now, out of an abundance of conservatism, we use the same test here
* we did for the heap page; if this results in a dropped bit, no real
* harm is done; and the next VACUUM will fix it.
*/
@ -5183,7 +5181,7 @@ heap_xlog_update(XLogRecPtr lsn, XLogRecord *record, bool hot_update)
if (xlrec->all_visible_cleared)
{
Relation reln = CreateFakeRelcacheEntry(xlrec->target.node);
BlockNumber block = ItemPointerGetBlockNumber(&xlrec->target.tid);
BlockNumber block = ItemPointerGetBlockNumber(&xlrec->target.tid);
Buffer vmbuffer = InvalidBuffer;
visibilitymap_pin(reln, block, &vmbuffer);
@ -5267,7 +5265,7 @@ newt:;
if (xlrec->new_all_visible_cleared)
{
Relation reln = CreateFakeRelcacheEntry(xlrec->target.node);
BlockNumber block = ItemPointerGetBlockNumber(&xlrec->newtid);
BlockNumber block = ItemPointerGetBlockNumber(&xlrec->newtid);
Buffer vmbuffer = InvalidBuffer;
visibilitymap_pin(reln, block, &vmbuffer);
@ -5690,7 +5688,7 @@ heap2_desc(StringInfo buf, uint8 xl_info, char *rec)
else
appendStringInfo(buf, "multi-insert: ");
appendStringInfo(buf, "rel %u/%u/%u; blk %u; %d tuples",
xlrec->node.spcNode, xlrec->node.dbNode, xlrec->node.relNode,
xlrec->node.spcNode, xlrec->node.dbNode, xlrec->node.relNode,
xlrec->blkno, xlrec->ntuples);
}
else

View File

@ -109,8 +109,8 @@ GetVisibilityMapPins(Relation relation, Buffer buffer1, Buffer buffer2,
BlockNumber block1, BlockNumber block2,
Buffer *vmbuffer1, Buffer *vmbuffer2)
{
bool need_to_pin_buffer1;
bool need_to_pin_buffer2;
bool need_to_pin_buffer1;
bool need_to_pin_buffer2;
Assert(BufferIsValid(buffer1));
Assert(buffer2 == InvalidBuffer || buffer1 <= buffer2);
@ -145,7 +145,7 @@ GetVisibilityMapPins(Relation relation, Buffer buffer1, Buffer buffer2,
/*
* If there are two buffers involved and we pinned just one of them,
* it's possible that the second one became all-visible while we were
* busy pinning the first one. If it looks like that's a possible
* busy pinning the first one. If it looks like that's a possible
* scenario, we'll need to make a second pass through this loop.
*/
if (buffer2 == InvalidBuffer || buffer1 == buffer2
@ -302,11 +302,11 @@ RelationGetBufferForTuple(Relation relation, Size len,
* block if one was given, taking suitable care with lock ordering and
* the possibility they are the same block.
*
* If the page-level all-visible flag is set, caller will need to clear
* both that and the corresponding visibility map bit. However, by the
* time we return, we'll have x-locked the buffer, and we don't want to
* do any I/O while in that state. So we check the bit here before
* taking the lock, and pin the page if it appears necessary.
* If the page-level all-visible flag is set, caller will need to
* clear both that and the corresponding visibility map bit. However,
* by the time we return, we'll have x-locked the buffer, and we don't
* want to do any I/O while in that state. So we check the bit here
* before taking the lock, and pin the page if it appears necessary.
* Checking without the lock creates a risk of getting the wrong
* answer, so we'll have to recheck after acquiring the lock.
*/
@ -347,23 +347,24 @@ RelationGetBufferForTuple(Relation relation, Size len,
/*
* We now have the target page (and the other buffer, if any) pinned
* and locked. However, since our initial PageIsAllVisible checks
* were performed before acquiring the lock, the results might now
* be out of date, either for the selected victim buffer, or for the
* other buffer passed by the caller. In that case, we'll need to give
* up our locks, go get the pin(s) we failed to get earlier, and
* and locked. However, since our initial PageIsAllVisible checks
* were performed before acquiring the lock, the results might now be
* out of date, either for the selected victim buffer, or for the
* other buffer passed by the caller. In that case, we'll need to
* give up our locks, go get the pin(s) we failed to get earlier, and
* re-lock. That's pretty painful, but hopefully shouldn't happen
* often.
*
* Note that there's a small possibility that we didn't pin the
* page above but still have the correct page pinned anyway, either
* because we've already made a previous pass through this loop, or
* because caller passed us the right page anyway.
* Note that there's a small possibility that we didn't pin the page
* above but still have the correct page pinned anyway, either because
* we've already made a previous pass through this loop, or because
* caller passed us the right page anyway.
*
* Note also that it's possible that by the time we get the pin and
* retake the buffer locks, the visibility map bit will have been
* cleared by some other backend anyway. In that case, we'll have done
* a bit of extra work for no gain, but there's no real harm done.
* cleared by some other backend anyway. In that case, we'll have
* done a bit of extra work for no gain, but there's no real harm
* done.
*/
if (otherBuffer == InvalidBuffer || buffer <= otherBuffer)
GetVisibilityMapPins(relation, buffer, otherBuffer,

View File

@ -75,7 +75,7 @@ do { \
static void toast_delete_datum(Relation rel, Datum value);
static Datum toast_save_datum(Relation rel, Datum value,
struct varlena *oldexternal, int options);
struct varlena * oldexternal, int options);
static bool toastrel_valueid_exists(Relation toastrel, Oid valueid);
static bool toastid_valueid_exists(Oid toastrelid, Oid valueid);
static struct varlena *toast_fetch_datum(struct varlena * attr);
@ -1233,7 +1233,7 @@ toast_compress_datum(Datum value)
*/
static Datum
toast_save_datum(Relation rel, Datum value,
struct varlena *oldexternal, int options)
struct varlena * oldexternal, int options)
{
Relation toastrel;
Relation toastidx;
@ -1353,7 +1353,7 @@ toast_save_datum(Relation rel, Datum value,
* those versions could easily reference the same toast value.
* When we copy the second or later version of such a row,
* reusing the OID will mean we select an OID that's already
* in the new toast table. Check for that, and if so, just
* in the new toast table. Check for that, and if so, just
* fall through without writing the data again.
*
* While annoying and ugly-looking, this is a good thing

View File

@ -16,7 +16,7 @@
* visibilitymap_pin_ok - check whether correct map page is already pinned
* visibilitymap_set - set a bit in a previously pinned page
* visibilitymap_test - test if a bit is set
* visibilitymap_count - count number of bits set in visibility map
* visibilitymap_count - count number of bits set in visibility map
* visibilitymap_truncate - truncate the visibility map
*
* NOTES
@ -27,7 +27,7 @@
* the sense that we make sure that whenever a bit is set, we know the
* condition is true, but if a bit is not set, it might or might not be true.
*
* Clearing a visibility map bit is not separately WAL-logged. The callers
* Clearing a visibility map bit is not separately WAL-logged. The callers
* must make sure that whenever a bit is cleared, the bit is cleared on WAL
* replay of the updating operation as well.
*
@ -36,9 +36,9 @@
* it may still be the case that every tuple on the page is visible to all
* transactions; we just don't know that for certain. The difficulty is that
* there are two bits which are typically set together: the PD_ALL_VISIBLE bit
* on the page itself, and the visibility map bit. If a crash occurs after the
* on the page itself, and the visibility map bit. If a crash occurs after the
* visibility map page makes it to disk and before the updated heap page makes
* it to disk, redo must set the bit on the heap page. Otherwise, the next
* it to disk, redo must set the bit on the heap page. Otherwise, the next
* insert, update, or delete on the heap page will fail to realize that the
* visibility map bit must be cleared, possibly causing index-only scans to
* return wrong answers.
@ -59,10 +59,10 @@
* the buffer lock over any I/O that may be required to read in the visibility
* map page. To avoid this, we examine the heap page before locking it;
* if the page-level PD_ALL_VISIBLE bit is set, we pin the visibility map
* bit. Then, we lock the buffer. But this creates a race condition: there
* bit. Then, we lock the buffer. But this creates a race condition: there
* is a possibility that in the time it takes to lock the buffer, the
* PD_ALL_VISIBLE bit gets set. If that happens, we have to unlock the
* buffer, pin the visibility map page, and relock the buffer. This shouldn't
* buffer, pin the visibility map page, and relock the buffer. This shouldn't
* happen often, because only VACUUM currently sets visibility map bits,
* and the race will only occur if VACUUM processes a given page at almost
* exactly the same time that someone tries to further modify it.
@ -227,9 +227,9 @@ visibilitymap_pin_ok(BlockNumber heapBlk, Buffer buf)
* visibilitymap_set - set a bit on a previously pinned page
*
* recptr is the LSN of the XLOG record we're replaying, if we're in recovery,
* or InvalidXLogRecPtr in normal running. The page LSN is advanced to the
* or InvalidXLogRecPtr in normal running. The page LSN is advanced to the
* one provided; in normal running, we generate a new XLOG record and set the
* page LSN to that value. cutoff_xid is the largest xmin on the page being
* page LSN to that value. cutoff_xid is the largest xmin on the page being
* marked all-visible; it is needed for Hot Standby, and can be
* InvalidTransactionId if the page contains no tuples.
*
@ -295,10 +295,10 @@ visibilitymap_set(Relation rel, BlockNumber heapBlk, XLogRecPtr recptr,
* releasing *buf after it's done testing and setting bits.
*
* NOTE: This function is typically called without a lock on the heap page,
* so somebody else could change the bit just after we look at it. In fact,
* so somebody else could change the bit just after we look at it. In fact,
* since we don't lock the visibility map page either, it's even possible that
* someone else could have changed the bit just before we look at it, but yet
* we might see the old value. It is the caller's responsibility to deal with
* we might see the old value. It is the caller's responsibility to deal with
* all concurrency issues!
*/
bool
@ -344,7 +344,7 @@ visibilitymap_test(Relation rel, BlockNumber heapBlk, Buffer *buf)
}
/*
* visibilitymap_count - count number of bits set in visibility map
* visibilitymap_count - count number of bits set in visibility map
*
* Note: we ignore the possibility of race conditions when the table is being
* extended concurrently with the call. New pages added to the table aren't
@ -356,16 +356,16 @@ visibilitymap_count(Relation rel)
BlockNumber result = 0;
BlockNumber mapBlock;
for (mapBlock = 0; ; mapBlock++)
for (mapBlock = 0;; mapBlock++)
{
Buffer mapBuffer;
unsigned char *map;
int i;
/*
* Read till we fall off the end of the map. We assume that any
* extra bytes in the last page are zeroed, so we don't bother
* excluding them from the count.
* Read till we fall off the end of the map. We assume that any extra
* bytes in the last page are zeroed, so we don't bother excluding
* them from the count.
*/
mapBuffer = vm_readbuf(rel, mapBlock, false);
if (!BufferIsValid(mapBuffer))
@ -496,11 +496,11 @@ vm_readbuf(Relation rel, BlockNumber blkno, bool extend)
Buffer buf;
/*
* We might not have opened the relation at the smgr level yet, or we might
* have been forced to close it by a sinval message. The code below won't
* necessarily notice relation extension immediately when extend = false,
* so we rely on sinval messages to ensure that our ideas about the size of
* the map aren't too far out of date.
* We might not have opened the relation at the smgr level yet, or we
* might have been forced to close it by a sinval message. The code below
* won't necessarily notice relation extension immediately when extend =
* false, so we rely on sinval messages to ensure that our ideas about the
* size of the map aren't too far out of date.
*/
RelationOpenSmgr(rel);

View File

@ -93,7 +93,7 @@ RelationGetIndexScan(Relation indexRelation, int nkeys, int norderbys)
else
scan->orderByData = NULL;
scan->xs_want_itup = false; /* may be set later */
scan->xs_want_itup = false; /* may be set later */
/*
* During recovery we ignore killed tuples and don't bother to kill them

View File

@ -435,7 +435,7 @@ index_restrpos(IndexScanDesc scan)
ItemPointer
index_getnext_tid(IndexScanDesc scan, ScanDirection direction)
{
FmgrInfo *procedure;
FmgrInfo *procedure;
bool found;
SCAN_CHECKS;
@ -495,7 +495,7 @@ index_getnext_tid(IndexScanDesc scan, ScanDirection direction)
HeapTuple
index_fetch_heap(IndexScanDesc scan)
{
ItemPointer tid = &scan->xs_ctup.t_self;
ItemPointer tid = &scan->xs_ctup.t_self;
bool all_dead = false;
bool got_heap_tuple;
@ -530,8 +530,8 @@ index_fetch_heap(IndexScanDesc scan)
if (got_heap_tuple)
{
/*
* Only in a non-MVCC snapshot can more than one member of the
* HOT chain be visible.
* Only in a non-MVCC snapshot can more than one member of the HOT
* chain be visible.
*/
scan->xs_continue_hot = !IsMVCCSnapshot(scan->xs_snapshot);
pgstat_count_heap_fetch(scan->indexRelation);
@ -544,7 +544,7 @@ index_fetch_heap(IndexScanDesc scan)
/*
* If we scanned a whole HOT chain and found only dead tuples, tell index
* AM to kill its entry for that TID (this will take effect in the next
* amgettuple call, in index_getnext_tid). We do not do this when in
* amgettuple call, in index_getnext_tid). We do not do this when in
* recovery because it may violate MVCC to do so. See comments in
* RelationGetIndexScan().
*/

View File

@ -82,7 +82,7 @@ btint2fastcmp(Datum x, Datum y, SortSupport ssup)
Datum
btint2sortsupport(PG_FUNCTION_ARGS)
{
SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0);
SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0);
ssup->comparator = btint2fastcmp;
PG_RETURN_VOID();
@ -119,7 +119,7 @@ btint4fastcmp(Datum x, Datum y, SortSupport ssup)
Datum
btint4sortsupport(PG_FUNCTION_ARGS)
{
SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0);
SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0);
ssup->comparator = btint4fastcmp;
PG_RETURN_VOID();
@ -156,7 +156,7 @@ btint8fastcmp(Datum x, Datum y, SortSupport ssup)
Datum
btint8sortsupport(PG_FUNCTION_ARGS)
{
SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0);
SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0);
ssup->comparator = btint8fastcmp;
PG_RETURN_VOID();
@ -277,7 +277,7 @@ btoidfastcmp(Datum x, Datum y, SortSupport ssup)
Datum
btoidsortsupport(PG_FUNCTION_ARGS)
{
SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0);
SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0);
ssup->comparator = btoidfastcmp;
PG_RETURN_VOID();
@ -338,7 +338,7 @@ btnamefastcmp(Datum x, Datum y, SortSupport ssup)
Datum
btnamesortsupport(PG_FUNCTION_ARGS)
{
SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0);
SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0);
ssup->comparator = btnamefastcmp;
PG_RETURN_VOID();

View File

@ -1362,7 +1362,7 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack)
* we're in VACUUM and would not otherwise have an XID. Having already
* updated links to the target, ReadNewTransactionId() suffices as an
* upper bound. Any scan having retained a now-stale link is advertising
* in its PGXACT an xmin less than or equal to the value we read here. It
* in its PGXACT an xmin less than or equal to the value we read here. It
* will continue to do so, holding back RecentGlobalXmin, for the duration
* of that scan.
*/

View File

@ -433,7 +433,7 @@ btbeginscan(PG_FUNCTION_ARGS)
/*
* We don't know yet whether the scan will be index-only, so we do not
* allocate the tuple workspace arrays until btrescan. However, we set up
* allocate the tuple workspace arrays until btrescan. However, we set up
* scan->xs_itupdesc whether we'll need it or not, since that's so cheap.
*/
so->currTuples = so->markTuples = NULL;
@ -478,7 +478,7 @@ btrescan(PG_FUNCTION_ARGS)
/*
* Allocate tuple workspace arrays, if needed for an index-only scan and
* not already done in a previous rescan call. To save on palloc
* not already done in a previous rescan call. To save on palloc
* overhead, both workspaces are allocated as one palloc block; only this
* function and btendscan know that.
*

View File

@ -564,11 +564,11 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
ScanKeyEntryInitialize(chosen,
(SK_SEARCHNOTNULL | SK_ISNULL |
(impliesNN->sk_flags &
(SK_BT_DESC | SK_BT_NULLS_FIRST))),
(SK_BT_DESC | SK_BT_NULLS_FIRST))),
curattr,
((impliesNN->sk_flags & SK_BT_NULLS_FIRST) ?
BTGreaterStrategyNumber :
BTLessStrategyNumber),
((impliesNN->sk_flags & SK_BT_NULLS_FIRST) ?
BTGreaterStrategyNumber :
BTLessStrategyNumber),
InvalidOid,
InvalidOid,
InvalidOid,

View File

@ -37,10 +37,10 @@ typedef struct BTSortArrayContext
static Datum _bt_find_extreme_element(IndexScanDesc scan, ScanKey skey,
StrategyNumber strat,
Datum *elems, int nelems);
static int _bt_sort_array_elements(IndexScanDesc scan, ScanKey skey,
static int _bt_sort_array_elements(IndexScanDesc scan, ScanKey skey,
bool reverse,
Datum *elems, int nelems);
static int _bt_compare_array_elements(const void *a, const void *b, void *arg);
static int _bt_compare_array_elements(const void *a, const void *b, void *arg);
static bool _bt_compare_scankey_args(IndexScanDesc scan, ScanKey op,
ScanKey leftarg, ScanKey rightarg,
bool *result);
@ -227,8 +227,8 @@ _bt_preprocess_array_keys(IndexScanDesc scan)
}
/*
* Make a scan-lifespan context to hold array-associated data, or reset
* it if we already have one from a previous rescan cycle.
* Make a scan-lifespan context to hold array-associated data, or reset it
* if we already have one from a previous rescan cycle.
*/
if (so->arrayContext == NULL)
so->arrayContext = AllocSetContextCreate(CurrentMemoryContext,
@ -269,7 +269,7 @@ _bt_preprocess_array_keys(IndexScanDesc scan)
continue;
/*
* First, deconstruct the array into elements. Anything allocated
* First, deconstruct the array into elements. Anything allocated
* here (including a possibly detoasted array value) is in the
* workspace context.
*/
@ -283,7 +283,7 @@ _bt_preprocess_array_keys(IndexScanDesc scan)
&elem_values, &elem_nulls, &num_elems);
/*
* Compress out any null elements. We can ignore them since we assume
* Compress out any null elements. We can ignore them since we assume
* all btree operators are strict.
*/
num_nonnulls = 0;
@ -338,7 +338,7 @@ _bt_preprocess_array_keys(IndexScanDesc scan)
* successive primitive indexscans produce data in index order.
*/
num_elems = _bt_sort_array_elements(scan, cur,
(indoption[cur->sk_attno - 1] & INDOPTION_DESC) != 0,
(indoption[cur->sk_attno - 1] & INDOPTION_DESC) != 0,
elem_values, num_nonnulls);
/*
@ -387,9 +387,10 @@ _bt_find_extreme_element(IndexScanDesc scan, ScanKey skey,
/*
* Look up the appropriate comparison operator in the opfamily.
*
* Note: it's possible that this would fail, if the opfamily is incomplete,
* but it seems quite unlikely that an opfamily would omit non-cross-type
* comparison operators for any datatype that it supports at all.
* Note: it's possible that this would fail, if the opfamily is
* incomplete, but it seems quite unlikely that an opfamily would omit
* non-cross-type comparison operators for any datatype that it supports
* at all.
*/
cmp_op = get_opfamily_member(rel->rd_opfamily[skey->sk_attno - 1],
elemtype,
@ -455,9 +456,10 @@ _bt_sort_array_elements(IndexScanDesc scan, ScanKey skey,
/*
* Look up the appropriate comparison function in the opfamily.
*
* Note: it's possible that this would fail, if the opfamily is incomplete,
* but it seems quite unlikely that an opfamily would omit non-cross-type
* support functions for any datatype that it supports at all.
* Note: it's possible that this would fail, if the opfamily is
* incomplete, but it seems quite unlikely that an opfamily would omit
* non-cross-type support functions for any datatype that it supports at
* all.
*/
cmp_proc = get_opfamily_proc(rel->rd_opfamily[skey->sk_attno - 1],
elemtype,
@ -515,7 +517,7 @@ _bt_compare_array_elements(const void *a, const void *b, void *arg)
* _bt_start_array_keys() -- Initialize array keys at start of a scan
*
* Set up the cur_elem counters and fill in the first sk_argument value for
* each array scankey. We can't do this until we know the scan direction.
* each array scankey. We can't do this until we know the scan direction.
*/
void
_bt_start_array_keys(IndexScanDesc scan, ScanDirection dir)
@ -609,8 +611,8 @@ _bt_advance_array_keys(IndexScanDesc scan, ScanDirection dir)
* so that the index sorts in the desired direction.
*
* One key purpose of this routine is to discover which scan keys must be
* satisfied to continue the scan. It also attempts to eliminate redundant
* keys and detect contradictory keys. (If the index opfamily provides
* satisfied to continue the scan. It also attempts to eliminate redundant
* keys and detect contradictory keys. (If the index opfamily provides
* incomplete sets of cross-type operators, we may fail to detect redundant
* or contradictory keys, but we can survive that.)
*
@ -676,7 +678,7 @@ _bt_advance_array_keys(IndexScanDesc scan, ScanDirection dir)
* Note: the reason we have to copy the preprocessed scan keys into private
* storage is that we are modifying the array based on comparisons of the
* key argument values, which could change on a rescan or after moving to
* new elements of array keys. Therefore we can't overwrite the source data.
* new elements of array keys. Therefore we can't overwrite the source data.
*/
void
_bt_preprocess_keys(IndexScanDesc scan)
@ -781,8 +783,8 @@ _bt_preprocess_keys(IndexScanDesc scan)
* set qual_ok to false and abandon further processing.
*
* We also have to deal with the case of "key IS NULL", which is
* unsatisfiable in combination with any other index condition.
* By the time we get here, that's been classified as an equality
* unsatisfiable in combination with any other index condition. By
* the time we get here, that's been classified as an equality
* check, and we've rejected any combination of it with a regular
* equality condition; but not with other types of conditions.
*/
@ -1421,12 +1423,12 @@ _bt_checkkeys(IndexScanDesc scan,
/*
* Since NULLs are sorted before non-NULLs, we know we have
* reached the lower limit of the range of values for this
* index attr. On a backward scan, we can stop if this qual
* index attr. On a backward scan, we can stop if this qual
* is one of the "must match" subset. We can stop regardless
* of whether the qual is > or <, so long as it's required,
* because it's not possible for any future tuples to pass.
* On a forward scan, however, we must keep going, because we
* may have initially positioned to the start of the index.
* because it's not possible for any future tuples to pass. On
* a forward scan, however, we must keep going, because we may
* have initially positioned to the start of the index.
*/
if ((key->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)) &&
ScanDirectionIsBackward(dir))
@ -1437,11 +1439,11 @@ _bt_checkkeys(IndexScanDesc scan,
/*
* Since NULLs are sorted after non-NULLs, we know we have
* reached the upper limit of the range of values for this
* index attr. On a forward scan, we can stop if this qual is
* one of the "must match" subset. We can stop regardless of
* index attr. On a forward scan, we can stop if this qual is
* one of the "must match" subset. We can stop regardless of
* whether the qual is > or <, so long as it's required,
* because it's not possible for any future tuples to pass.
* On a backward scan, however, we must keep going, because we
* because it's not possible for any future tuples to pass. On
* a backward scan, however, we must keep going, because we
* may have initially positioned to the end of the index.
*/
if ((key->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)) &&
@ -1532,12 +1534,12 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
/*
* Since NULLs are sorted before non-NULLs, we know we have
* reached the lower limit of the range of values for this
* index attr. On a backward scan, we can stop if this qual
* index attr. On a backward scan, we can stop if this qual
* is one of the "must match" subset. We can stop regardless
* of whether the qual is > or <, so long as it's required,
* because it's not possible for any future tuples to pass.
* On a forward scan, however, we must keep going, because we
* may have initially positioned to the start of the index.
* because it's not possible for any future tuples to pass. On
* a forward scan, however, we must keep going, because we may
* have initially positioned to the start of the index.
*/
if ((subkey->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)) &&
ScanDirectionIsBackward(dir))
@ -1548,11 +1550,11 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
/*
* Since NULLs are sorted after non-NULLs, we know we have
* reached the upper limit of the range of values for this
* index attr. On a forward scan, we can stop if this qual is
* one of the "must match" subset. We can stop regardless of
* index attr. On a forward scan, we can stop if this qual is
* one of the "must match" subset. We can stop regardless of
* whether the qual is > or <, so long as it's required,
* because it's not possible for any future tuples to pass.
* On a backward scan, however, we must keep going, because we
* because it's not possible for any future tuples to pass. On
* a backward scan, however, we must keep going, because we
* may have initially positioned to the end of the index.
*/
if ((subkey->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)) &&

View File

@ -24,7 +24,7 @@
/*
* SPPageDesc tracks all info about a page we are inserting into. In some
* situations it actually identifies a tuple, or even a specific node within
* an inner tuple. But any of the fields can be invalid. If the buffer
* an inner tuple. But any of the fields can be invalid. If the buffer
* field is valid, it implies we hold pin and exclusive lock on that buffer.
* page pointer should be valid exactly when buffer is.
*/
@ -129,8 +129,8 @@ spgPageIndexMultiDelete(SpGistState *state, Page page,
int firststate, int reststate,
BlockNumber blkno, OffsetNumber offnum)
{
OffsetNumber firstItem;
OffsetNumber *sortednos;
OffsetNumber firstItem;
OffsetNumber *sortednos;
SpGistDeadTuple tuple = NULL;
int i;
@ -155,8 +155,8 @@ spgPageIndexMultiDelete(SpGistState *state, Page page,
for (i = 0; i < nitems; i++)
{
OffsetNumber itemno = sortednos[i];
int tupstate;
OffsetNumber itemno = sortednos[i];
int tupstate;
tupstate = (itemno == firstItem) ? firststate : reststate;
if (tuple == NULL || tuple->tupstate != tupstate)
@ -200,7 +200,7 @@ saveNodeLink(Relation index, SPPageDesc *parent,
*/
static void
addLeafTuple(Relation index, SpGistState *state, SpGistLeafTuple leafTuple,
SPPageDesc *current, SPPageDesc *parent, bool isNulls, bool isNew)
SPPageDesc *current, SPPageDesc *parent, bool isNulls, bool isNew)
{
XLogRecData rdata[4];
spgxlogAddLeaf xlrec;
@ -230,7 +230,7 @@ addLeafTuple(Relation index, SpGistState *state, SpGistLeafTuple leafTuple,
/* Tuple is not part of a chain */
leafTuple->nextOffset = InvalidOffsetNumber;
current->offnum = SpGistPageAddNewItem(state, current->page,
(Item) leafTuple, leafTuple->size,
(Item) leafTuple, leafTuple->size,
NULL, false);
xlrec.offnumLeaf = current->offnum;
@ -250,9 +250,9 @@ addLeafTuple(Relation index, SpGistState *state, SpGistLeafTuple leafTuple,
else
{
/*
* Tuple must be inserted into existing chain. We mustn't change
* the chain's head address, but we don't need to chase the entire
* chain to put the tuple at the end; we can insert it second.
* Tuple must be inserted into existing chain. We mustn't change the
* chain's head address, but we don't need to chase the entire chain
* to put the tuple at the end; we can insert it second.
*
* Also, it's possible that the "chain" consists only of a DEAD tuple,
* in which case we should replace the DEAD tuple in-place.
@ -261,7 +261,7 @@ addLeafTuple(Relation index, SpGistState *state, SpGistLeafTuple leafTuple,
OffsetNumber offnum;
head = (SpGistLeafTuple) PageGetItem(current->page,
PageGetItemId(current->page, current->offnum));
PageGetItemId(current->page, current->offnum));
if (head->tupstate == SPGIST_LIVE)
{
leafTuple->nextOffset = head->nextOffset;
@ -274,7 +274,7 @@ addLeafTuple(Relation index, SpGistState *state, SpGistLeafTuple leafTuple,
* and set new second element
*/
head = (SpGistLeafTuple) PageGetItem(current->page,
PageGetItemId(current->page, current->offnum));
PageGetItemId(current->page, current->offnum));
head->nextOffset = offnum;
xlrec.offnumLeaf = offnum;
@ -483,7 +483,7 @@ moveLeafs(Relation index, SpGistState *state,
for (i = 0; i < nDelete; i++)
{
it = (SpGistLeafTuple) PageGetItem(current->page,
PageGetItemId(current->page, toDelete[i]));
PageGetItemId(current->page, toDelete[i]));
Assert(it->tupstate == SPGIST_LIVE);
/*
@ -516,12 +516,12 @@ moveLeafs(Relation index, SpGistState *state,
leafptr += newLeafTuple->size;
/*
* Now delete the old tuples, leaving a redirection pointer behind for
* the first one, unless we're doing an index build; in which case there
* can't be any concurrent scan so we need not provide a redirect.
* Now delete the old tuples, leaving a redirection pointer behind for the
* first one, unless we're doing an index build; in which case there can't
* be any concurrent scan so we need not provide a redirect.
*/
spgPageIndexMultiDelete(state, current->page, toDelete, nDelete,
state->isBuild ? SPGIST_PLACEHOLDER : SPGIST_REDIRECT,
state->isBuild ? SPGIST_PLACEHOLDER : SPGIST_REDIRECT,
SPGIST_PLACEHOLDER,
nblkno, r);
@ -575,7 +575,7 @@ setRedirectionTuple(SPPageDesc *current, OffsetNumber position,
SpGistDeadTuple dt;
dt = (SpGistDeadTuple) PageGetItem(current->page,
PageGetItemId(current->page, position));
PageGetItemId(current->page, position));
Assert(dt->tupstate == SPGIST_REDIRECT);
Assert(ItemPointerGetBlockNumber(&dt->pointer) == SPGIST_METAPAGE_BLKNO);
ItemPointerSet(&dt->pointer, blkno, offnum);
@ -640,7 +640,7 @@ checkAllTheSame(spgPickSplitIn *in, spgPickSplitOut *out, bool tooBig,
/* The opclass may not use node labels, but if it does, duplicate 'em */
if (out->nodeLabels)
{
Datum theLabel = out->nodeLabels[theNode];
Datum theLabel = out->nodeLabels[theNode];
out->nodeLabels = (Datum *) palloc(sizeof(Datum) * out->nNodes);
for (i = 0; i < out->nNodes; i++)
@ -754,8 +754,8 @@ doPickSplit(Relation index, SpGistState *state,
{
/*
* We are splitting the root (which up to now is also a leaf page).
* Its tuples are not linked, so scan sequentially to get them all.
* We ignore the original value of current->offnum.
* Its tuples are not linked, so scan sequentially to get them all. We
* ignore the original value of current->offnum.
*/
for (i = FirstOffsetNumber; i <= max; i++)
{
@ -773,7 +773,7 @@ doPickSplit(Relation index, SpGistState *state,
/* we will delete the tuple altogether, so count full space */
spaceToDelete += it->size + sizeof(ItemIdData);
}
else /* tuples on root should be live */
else /* tuples on root should be live */
elog(ERROR, "unexpected SPGiST tuple state: %d", it->tupstate);
}
}
@ -820,7 +820,7 @@ doPickSplit(Relation index, SpGistState *state,
* We may not actually insert new tuple because another picksplit may be
* necessary due to too large value, but we will try to allocate enough
* space to include it; and in any case it has to be included in the input
* for the picksplit function. So don't increment nToInsert yet.
* for the picksplit function. So don't increment nToInsert yet.
*/
in.datums[in.nTuples] = SGLTDATUM(newLeafTuple, state);
heapPtrs[in.nTuples] = newLeafTuple->heapPtr;
@ -878,7 +878,7 @@ doPickSplit(Relation index, SpGistState *state,
/*
* Check to see if the picksplit function failed to separate the values,
* ie, it put them all into the same child node. If so, select allTheSame
* mode and create a random split instead. See comments for
* mode and create a random split instead. See comments for
* checkAllTheSame as to why we need to know if the new leaf tuples could
* fit on one page.
*/
@ -924,8 +924,8 @@ doPickSplit(Relation index, SpGistState *state,
innerTuple->allTheSame = allTheSame;
/*
* Update nodes[] array to point into the newly formed innerTuple, so
* that we can adjust their downlinks below.
* Update nodes[] array to point into the newly formed innerTuple, so that
* we can adjust their downlinks below.
*/
SGITITERATE(innerTuple, i, node)
{
@ -944,13 +944,13 @@ doPickSplit(Relation index, SpGistState *state,
}
/*
* To perform the split, we must insert a new inner tuple, which can't
* go on a leaf page; and unless we are splitting the root page, we
* must then update the parent tuple's downlink to point to the inner
* tuple. If there is room, we'll put the new inner tuple on the same
* page as the parent tuple, otherwise we need another non-leaf buffer.
* But if the parent page is the root, we can't add the new inner tuple
* there, because the root page must have only one inner tuple.
* To perform the split, we must insert a new inner tuple, which can't go
* on a leaf page; and unless we are splitting the root page, we must then
* update the parent tuple's downlink to point to the inner tuple. If
* there is room, we'll put the new inner tuple on the same page as the
* parent tuple, otherwise we need another non-leaf buffer. But if the
* parent page is the root, we can't add the new inner tuple there,
* because the root page must have only one inner tuple.
*/
xlrec.initInner = false;
if (parent->buffer != InvalidBuffer &&
@ -965,9 +965,9 @@ doPickSplit(Relation index, SpGistState *state,
{
/* Send tuple to page with next triple parity (see README) */
newInnerBuffer = SpGistGetBuffer(index,
GBUF_INNER_PARITY(parent->blkno + 1) |
GBUF_INNER_PARITY(parent->blkno + 1) |
(isNulls ? GBUF_NULLS : 0),
innerTuple->size + sizeof(ItemIdData),
innerTuple->size + sizeof(ItemIdData),
&xlrec.initInner);
}
else
@ -977,22 +977,22 @@ doPickSplit(Relation index, SpGistState *state,
}
/*
* Because a WAL record can't involve more than four buffers, we can
* only afford to deal with two leaf pages in each picksplit action,
* ie the current page and at most one other.
* Because a WAL record can't involve more than four buffers, we can only
* afford to deal with two leaf pages in each picksplit action, ie the
* current page and at most one other.
*
* The new leaf tuples converted from the existing ones should require
* the same or less space, and therefore should all fit onto one page
* The new leaf tuples converted from the existing ones should require the
* same or less space, and therefore should all fit onto one page
* (although that's not necessarily the current page, since we can't
* delete the old tuples but only replace them with placeholders).
* However, the incoming new tuple might not also fit, in which case
* we might need another picksplit cycle to reduce it some more.
* However, the incoming new tuple might not also fit, in which case we
* might need another picksplit cycle to reduce it some more.
*
* If there's not room to put everything back onto the current page,
* then we decide on a per-node basis which tuples go to the new page.
* (We do it like that because leaf tuple chains can't cross pages,
* so we must place all leaf tuples belonging to the same parent node
* on the same page.)
* If there's not room to put everything back onto the current page, then
* we decide on a per-node basis which tuples go to the new page. (We do
* it like that because leaf tuple chains can't cross pages, so we must
* place all leaf tuples belonging to the same parent node on the same
* page.)
*
* If we are splitting the root page (turning it from a leaf page into an
* inner page), then no leaf tuples can go back to the current page; they
@ -1037,12 +1037,13 @@ doPickSplit(Relation index, SpGistState *state,
int newspace;
newLeafBuffer = SpGistGetBuffer(index,
GBUF_LEAF | (isNulls ? GBUF_NULLS : 0),
GBUF_LEAF | (isNulls ? GBUF_NULLS : 0),
Min(totalLeafSizes,
SPGIST_PAGE_CAPACITY),
&xlrec.initDest);
/*
* Attempt to assign node groups to the two pages. We might fail to
* Attempt to assign node groups to the two pages. We might fail to
* do so, even if totalLeafSizes is less than the available space,
* because we can't split a group across pages.
*/
@ -1054,12 +1055,12 @@ doPickSplit(Relation index, SpGistState *state,
{
if (leafSizes[i] <= curspace)
{
nodePageSelect[i] = 0; /* signifies current page */
nodePageSelect[i] = 0; /* signifies current page */
curspace -= leafSizes[i];
}
else
{
nodePageSelect[i] = 1; /* signifies new leaf page */
nodePageSelect[i] = 1; /* signifies new leaf page */
newspace -= leafSizes[i];
}
}
@ -1075,7 +1076,7 @@ doPickSplit(Relation index, SpGistState *state,
else if (includeNew)
{
/* We must exclude the new leaf tuple from the split */
int nodeOfNewTuple = out.mapTuplesToNodes[in.nTuples - 1];
int nodeOfNewTuple = out.mapTuplesToNodes[in.nTuples - 1];
leafSizes[nodeOfNewTuple] -=
newLeafs[in.nTuples - 1]->size + sizeof(ItemIdData);
@ -1087,12 +1088,12 @@ doPickSplit(Relation index, SpGistState *state,
{
if (leafSizes[i] <= curspace)
{
nodePageSelect[i] = 0; /* signifies current page */
nodePageSelect[i] = 0; /* signifies current page */
curspace -= leafSizes[i];
}
else
{
nodePageSelect[i] = 1; /* signifies new leaf page */
nodePageSelect[i] = 1; /* signifies new leaf page */
newspace -= leafSizes[i];
}
}
@ -1204,7 +1205,7 @@ doPickSplit(Relation index, SpGistState *state,
for (i = 0; i < nToInsert; i++)
{
SpGistLeafTuple it = newLeafs[i];
Buffer leafBuffer;
Buffer leafBuffer;
BlockNumber leafBlock;
OffsetNumber newoffset;
@ -1584,12 +1585,12 @@ spgAddNodeAction(Relation index, SpGistState *state,
xlrec.nodeI = parent->node;
/*
* obtain new buffer with the same parity as current, since it will
* be a child of same parent tuple
* obtain new buffer with the same parity as current, since it will be
* a child of same parent tuple
*/
current->buffer = SpGistGetBuffer(index,
GBUF_INNER_PARITY(current->blkno),
newInnerTuple->size + sizeof(ItemIdData),
newInnerTuple->size + sizeof(ItemIdData),
&xlrec.newPage);
current->blkno = BufferGetBlockNumber(current->buffer);
current->page = BufferGetPage(current->buffer);
@ -1597,15 +1598,15 @@ spgAddNodeAction(Relation index, SpGistState *state,
xlrec.blknoNew = current->blkno;
/*
* Let's just make real sure new current isn't same as old. Right
* now that's impossible, but if SpGistGetBuffer ever got smart enough
* to delete placeholder tuples before checking space, maybe it
* wouldn't be impossible. The case would appear to work except that
* WAL replay would be subtly wrong, so I think a mere assert isn't
* enough here.
* Let's just make real sure new current isn't same as old. Right now
* that's impossible, but if SpGistGetBuffer ever got smart enough to
* delete placeholder tuples before checking space, maybe it wouldn't
* be impossible. The case would appear to work except that WAL
* replay would be subtly wrong, so I think a mere assert isn't enough
* here.
*/
if (xlrec.blknoNew == xlrec.blkno)
elog(ERROR, "SPGiST new buffer shouldn't be same as old buffer");
if (xlrec.blknoNew == xlrec.blkno)
elog(ERROR, "SPGiST new buffer shouldn't be same as old buffer");
/*
* New current and parent buffer will both be modified; but note that
@ -1707,9 +1708,9 @@ spgSplitNodeAction(Relation index, SpGistState *state,
Assert(!SpGistPageStoresNulls(current->page));
/*
* Construct new prefix tuple, containing a single node with the
* specified label. (We'll update the node's downlink to point to the
* new postfix tuple, below.)
* Construct new prefix tuple, containing a single node with the specified
* label. (We'll update the node's downlink to point to the new postfix
* tuple, below.)
*/
node = spgFormNodeTuple(state, out->result.splitTuple.nodeLabel, false);
@ -1888,9 +1889,9 @@ spgdoinsert(Relation index, SpGistState *state,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("index row size %lu exceeds maximum %lu for index \"%s\"",
(unsigned long) (leafSize - sizeof(ItemIdData)),
(unsigned long) (SPGIST_PAGE_CAPACITY - sizeof(ItemIdData)),
(unsigned long) (SPGIST_PAGE_CAPACITY - sizeof(ItemIdData)),
RelationGetRelationName(index)),
errhint("Values larger than a buffer page cannot be indexed.")));
errhint("Values larger than a buffer page cannot be indexed.")));
/* Initialize "current" to the appropriate root page */
current.blkno = isnull ? SPGIST_NULL_BLKNO : SPGIST_ROOT_BLKNO;
@ -1920,7 +1921,7 @@ spgdoinsert(Relation index, SpGistState *state,
if (current.blkno == InvalidBlockNumber)
{
/*
* Create a leaf page. If leafSize is too large to fit on a page,
* Create a leaf page. If leafSize is too large to fit on a page,
* we won't actually use the page yet, but it simplifies the API
* for doPickSplit to always have a leaf page at hand; so just
* quietly limit our request to a page size.
@ -1968,7 +1969,7 @@ spgdoinsert(Relation index, SpGistState *state,
}
else if ((sizeToSplit =
checkSplitConditions(index, state, &current,
&nToSplit)) < SPGIST_PAGE_CAPACITY / 2 &&
&nToSplit)) < SPGIST_PAGE_CAPACITY / 2 &&
nToSplit < 64 &&
leafTuple->size + sizeof(ItemIdData) + sizeToSplit <= SPGIST_PAGE_CAPACITY)
{
@ -2077,8 +2078,8 @@ spgdoinsert(Relation index, SpGistState *state,
}
/*
* Loop around and attempt to insert the new leafDatum
* at "current" (which might reference an existing child
* Loop around and attempt to insert the new leafDatum at
* "current" (which might reference an existing child
* tuple, or might be invalid to force us to find a new
* page for the tuple).
*
@ -2102,8 +2103,8 @@ spgdoinsert(Relation index, SpGistState *state,
out.result.addNode.nodeLabel);
/*
* Retry insertion into the enlarged node. We assume
* that we'll get a MatchNode result this time.
* Retry insertion into the enlarged node. We assume that
* we'll get a MatchNode result this time.
*/
goto process_inner_tuple;
break;

View File

@ -123,7 +123,7 @@ spgbuild(PG_FUNCTION_ARGS)
buildstate.spgstate.isBuild = true;
buildstate.tmpCtx = AllocSetContextCreate(CurrentMemoryContext,
"SP-GiST build temporary context",
"SP-GiST build temporary context",
ALLOCSET_DEFAULT_MINSIZE,
ALLOCSET_DEFAULT_INITSIZE,
ALLOCSET_DEFAULT_MAXSIZE);

View File

@ -135,12 +135,12 @@ spg_kd_picksplit(PG_FUNCTION_ARGS)
/*
* Note: points that have coordinates exactly equal to coord may get
* classified into either node, depending on where they happen to fall
* in the sorted list. This is okay as long as the inner_consistent
* function descends into both sides for such cases. This is better
* than the alternative of trying to have an exact boundary, because
* it keeps the tree balanced even when we have many instances of the
* same point value. So we should never trigger the allTheSame logic.
* classified into either node, depending on where they happen to fall in
* the sorted list. This is okay as long as the inner_consistent function
* descends into both sides for such cases. This is better than the
* alternative of trying to have an exact boundary, because it keeps the
* tree balanced even when we have many instances of the same point value.
* So we should never trigger the allTheSame logic.
*/
for (i = 0; i < in->nTuples; i++)
{

View File

@ -253,8 +253,8 @@ spg_quad_inner_consistent(PG_FUNCTION_ARGS)
boxQuery = DatumGetBoxP(in->scankeys[i].sk_argument);
if (DatumGetBool(DirectFunctionCall2(box_contain_pt,
PointerGetDatum(boxQuery),
PointerGetDatum(centroid))))
PointerGetDatum(boxQuery),
PointerGetDatum(centroid))))
{
/* centroid is in box, so all quadrants are OK */
}

View File

@ -24,7 +24,7 @@
typedef void (*storeRes_func) (SpGistScanOpaque so, ItemPointer heapPtr,
Datum leafValue, bool isnull, bool recheck);
Datum leafValue, bool isnull, bool recheck);
typedef struct ScanStackEntry
{
@ -88,7 +88,7 @@ resetSpGistScanOpaque(SpGistScanOpaque so)
if (so->want_itup)
{
/* Must pfree IndexTuples to avoid memory leak */
int i;
int i;
for (i = 0; i < so->nPtrs; i++)
pfree(so->indexTups[i]);
@ -102,7 +102,7 @@ resetSpGistScanOpaque(SpGistScanOpaque so)
* Sets searchNulls, searchNonNulls, numberOfKeys, keyData fields of *so.
*
* The point here is to eliminate null-related considerations from what the
* opclass consistent functions need to deal with. We assume all SPGiST-
* opclass consistent functions need to deal with. We assume all SPGiST-
* indexable operators are strict, so any null RHS value makes the scan
* condition unsatisfiable. We also pull out any IS NULL/IS NOT NULL
* conditions; their effect is reflected into searchNulls/searchNonNulls.
@ -177,6 +177,7 @@ spgbeginscan(PG_FUNCTION_ARGS)
{
Relation rel = (Relation) PG_GETARG_POINTER(0);
int keysz = PG_GETARG_INT32(1);
/* ScanKey scankey = (ScanKey) PG_GETARG_POINTER(2); */
IndexScanDesc scan;
SpGistScanOpaque so;
@ -457,7 +458,7 @@ redirect:
MemoryContext oldCtx;
innerTuple = (SpGistInnerTuple) PageGetItem(page,
PageGetItemId(page, offset));
PageGetItemId(page, offset));
if (innerTuple->tupstate != SPGIST_LIVE)
{
@ -522,7 +523,7 @@ redirect:
for (i = 0; i < out.nNodes; i++)
{
int nodeN = out.nodeNumbers[i];
int nodeN = out.nodeNumbers[i];
Assert(nodeN >= 0 && nodeN < in.nNodes);
if (ItemPointerIsValid(&nodes[nodeN]->t_tid))
@ -598,7 +599,7 @@ storeGettuple(SpGistScanOpaque so, ItemPointer heapPtr,
if (so->want_itup)
{
/*
* Reconstruct desired IndexTuple. We have to copy the datum out of
* Reconstruct desired IndexTuple. We have to copy the datum out of
* the temp context anyway, so we may as well create the tuple here.
*/
so->indexTups[so->nPtrs] = index_form_tuple(so->indexTupDesc,
@ -636,7 +637,7 @@ spggettuple(PG_FUNCTION_ARGS)
if (so->want_itup)
{
/* Must pfree IndexTuples to avoid memory leak */
int i;
int i;
for (i = 0; i < so->nPtrs; i++)
pfree(so->indexTups[i]);

View File

@ -26,7 +26,7 @@
* In the worst case, a inner tuple in a text suffix tree could have as many
* as 256 nodes (one for each possible byte value). Each node can take 16
* bytes on MAXALIGN=8 machines. The inner tuple must fit on an index page
* of size BLCKSZ. Rather than assuming we know the exact amount of overhead
* of size BLCKSZ. Rather than assuming we know the exact amount of overhead
* imposed by page headers, tuple headers, etc, we leave 100 bytes for that
* (the actual overhead should be no more than 56 bytes at this writing, so
* there is slop in this number). The upshot is that the maximum safe prefix
@ -209,9 +209,9 @@ spg_text_choose(PG_FUNCTION_ARGS)
{
/*
* Descend to existing node. (If in->allTheSame, the core code will
* ignore our nodeN specification here, but that's OK. We still
* have to provide the correct levelAdd and restDatum values, and
* those are the same regardless of which node gets chosen by core.)
* ignore our nodeN specification here, but that's OK. We still have
* to provide the correct levelAdd and restDatum values, and those are
* the same regardless of which node gets chosen by core.)
*/
out->resultType = spgMatchNode;
out->result.matchNode.nodeN = i;
@ -227,10 +227,10 @@ spg_text_choose(PG_FUNCTION_ARGS)
else if (in->allTheSame)
{
/*
* Can't use AddNode action, so split the tuple. The upper tuple
* has the same prefix as before and uses an empty node label for
* the lower tuple. The lower tuple has no prefix and the same
* node labels as the original tuple.
* Can't use AddNode action, so split the tuple. The upper tuple has
* the same prefix as before and uses an empty node label for the
* lower tuple. The lower tuple has no prefix and the same node
* labels as the original tuple.
*/
out->resultType = spgSplitTuple;
out->result.splitTuple.prefixHasPrefix = in->hasPrefix;
@ -315,13 +315,13 @@ spg_text_picksplit(PG_FUNCTION_ARGS)
if (commonLen < VARSIZE_ANY_EXHDR(texti))
nodes[i].c = *(uint8 *) (VARDATA_ANY(texti) + commonLen);
else
nodes[i].c = '\0'; /* use \0 if string is all common */
nodes[i].c = '\0'; /* use \0 if string is all common */
nodes[i].i = i;
nodes[i].d = in->datums[i];
}
/*
* Sort by label bytes so that we can group the values into nodes. This
* Sort by label bytes so that we can group the values into nodes. This
* also ensures that the nodes are ordered by label value, allowing the
* use of binary search in searchChar.
*/
@ -371,7 +371,7 @@ spg_text_inner_consistent(PG_FUNCTION_ARGS)
/*
* Reconstruct values represented at this tuple, including parent data,
* prefix of this tuple if any, and the node label if any. in->level
* prefix of this tuple if any, and the node label if any. in->level
* should be the length of the previously reconstructed value, and the
* number of bytes added here is prefixSize or prefixSize + 1.
*
@ -381,7 +381,7 @@ spg_text_inner_consistent(PG_FUNCTION_ARGS)
* long-format reconstructed values.
*/
Assert(in->level == 0 ? DatumGetPointer(in->reconstructedValue) == NULL :
VARSIZE_ANY_EXHDR(DatumGetPointer(in->reconstructedValue)) == in->level);
VARSIZE_ANY_EXHDR(DatumGetPointer(in->reconstructedValue)) == in->level);
maxReconstrLen = in->level + 1;
if (in->hasPrefix)
@ -530,7 +530,7 @@ spg_text_leaf_consistent(PG_FUNCTION_ARGS)
}
else
{
text *fullText = palloc(VARHDRSZ + fullLen);
text *fullText = palloc(VARHDRSZ + fullLen);
SET_VARSIZE(fullText, VARHDRSZ + fullLen);
fullValue = VARDATA(fullText);

View File

@ -235,7 +235,7 @@ SpGistUpdateMetaPage(Relation index)
*
* When requesting an inner page, if we get one with the wrong parity,
* we just release the buffer and try again. We will get a different page
* because GetFreeIndexPage will have marked the page used in FSM. The page
* because GetFreeIndexPage will have marked the page used in FSM. The page
* is entered in our local lastUsedPages cache, so there's some hope of
* making use of it later in this session, but otherwise we rely on VACUUM
* to eventually re-enter the page in FSM, making it available for recycling.
@ -245,7 +245,7 @@ SpGistUpdateMetaPage(Relation index)
*
* When we return a buffer to the caller, the page is *not* entered into
* the lastUsedPages cache; we expect the caller will do so after it's taken
* whatever space it will use. This is because after the caller has used up
* whatever space it will use. This is because after the caller has used up
* some space, the page might have less space than whatever was cached already
* so we'd rather not trash the old cache entry.
*/
@ -275,7 +275,7 @@ allocNewBuffer(Relation index, int flags)
else
{
BlockNumber blkno = BufferGetBlockNumber(buffer);
int blkFlags = GBUF_INNER_PARITY(blkno);
int blkFlags = GBUF_INNER_PARITY(blkno);
if ((flags & GBUF_PARITY_MASK) == blkFlags)
{
@ -317,7 +317,7 @@ SpGistGetBuffer(Relation index, int flags, int needSpace, bool *isNew)
/*
* If possible, increase the space request to include relation's
* fillfactor. This ensures that when we add unrelated tuples to a page,
* fillfactor. This ensures that when we add unrelated tuples to a page,
* we try to keep 100-fillfactor% available for adding tuples that are
* related to the ones already on it. But fillfactor mustn't cause an
* error for requests that would otherwise be legal.
@ -664,7 +664,7 @@ spgFormInnerTuple(SpGistState *state, bool hasPrefix, Datum prefix,
errmsg("SPGiST inner tuple size %lu exceeds maximum %lu",
(unsigned long) size,
(unsigned long) (SPGIST_PAGE_CAPACITY - sizeof(ItemIdData))),
errhint("Values larger than a buffer page cannot be indexed.")));
errhint("Values larger than a buffer page cannot be indexed.")));
/*
* Check for overflow of header fields --- probably can't fail if the
@ -801,7 +801,7 @@ SpGistPageAddNewItem(SpGistState *state, Page page, Item item, Size size,
for (; i <= maxoff; i++)
{
SpGistDeadTuple it = (SpGistDeadTuple) PageGetItem(page,
PageGetItemId(page, i));
PageGetItemId(page, i));
if (it->tupstate == SPGIST_PLACEHOLDER)
{

View File

@ -31,8 +31,8 @@
/* Entry in pending-list of TIDs we need to revisit */
typedef struct spgVacPendingItem
{
ItemPointerData tid; /* redirection target to visit */
bool done; /* have we dealt with this? */
ItemPointerData tid; /* redirection target to visit */
bool done; /* have we dealt with this? */
struct spgVacPendingItem *next; /* list link */
} spgVacPendingItem;
@ -46,10 +46,10 @@ typedef struct spgBulkDeleteState
void *callback_state;
/* Additional working state */
SpGistState spgstate; /* for SPGiST operations that need one */
spgVacPendingItem *pendingList; /* TIDs we need to (re)visit */
TransactionId myXmin; /* for detecting newly-added redirects */
TransactionId OldestXmin; /* for deciding a redirect is obsolete */
SpGistState spgstate; /* for SPGiST operations that need one */
spgVacPendingItem *pendingList; /* TIDs we need to (re)visit */
TransactionId myXmin; /* for detecting newly-added redirects */
TransactionId OldestXmin; /* for deciding a redirect is obsolete */
BlockNumber lastFilledBlock; /* last non-deletable block */
} spgBulkDeleteState;
@ -213,7 +213,7 @@ vacuumLeafPage(spgBulkDeleteState *bds, Relation index, Buffer buffer,
* Figure out exactly what we have to do. We do this separately from
* actually modifying the page, mainly so that we have a representation
* that can be dumped into WAL and then the replay code can do exactly
* the same thing. The output of this step consists of six arrays
* the same thing. The output of this step consists of six arrays
* describing four kinds of operations, to be performed in this order:
*
* toDead[]: tuple numbers to be replaced with DEAD tuples
@ -276,8 +276,8 @@ vacuumLeafPage(spgBulkDeleteState *bds, Relation index, Buffer buffer,
else if (prevLive == InvalidOffsetNumber)
{
/*
* This is the first live tuple in the chain. It has
* to move to the head position.
* This is the first live tuple in the chain. It has to move
* to the head position.
*/
moveSrc[xlrec.nMove] = j;
moveDest[xlrec.nMove] = i;
@ -289,7 +289,7 @@ vacuumLeafPage(spgBulkDeleteState *bds, Relation index, Buffer buffer,
else
{
/*
* Second or later live tuple. Arrange to re-chain it to the
* Second or later live tuple. Arrange to re-chain it to the
* previous live one, if there was a gap.
*/
if (interveningDeletable)
@ -353,11 +353,11 @@ vacuumLeafPage(spgBulkDeleteState *bds, Relation index, Buffer buffer,
InvalidBlockNumber, InvalidOffsetNumber);
/*
* We implement the move step by swapping the item pointers of the
* source and target tuples, then replacing the newly-source tuples
* with placeholders. This is perhaps unduly friendly with the page
* data representation, but it's fast and doesn't risk page overflow
* when a tuple to be relocated is large.
* We implement the move step by swapping the item pointers of the source
* and target tuples, then replacing the newly-source tuples with
* placeholders. This is perhaps unduly friendly with the page data
* representation, but it's fast and doesn't risk page overflow when a
* tuple to be relocated is large.
*/
for (i = 0; i < xlrec.nMove; i++)
{
@ -518,7 +518,7 @@ vacuumRedirectAndPlaceholder(Relation index, Buffer buffer,
*/
for (i = max;
i >= FirstOffsetNumber &&
(opaque->nRedirection > 0 || !hasNonPlaceholder);
(opaque->nRedirection > 0 || !hasNonPlaceholder);
i--)
{
SpGistDeadTuple dt;
@ -651,9 +651,9 @@ spgvacuumpage(spgBulkDeleteState *bds, BlockNumber blkno)
/*
* The root pages must never be deleted, nor marked as available in FSM,
* because we don't want them ever returned by a search for a place to
* put a new tuple. Otherwise, check for empty/deletable page, and
* make sure FSM knows about it.
* because we don't want them ever returned by a search for a place to put
* a new tuple. Otherwise, check for empty/deletable page, and make sure
* FSM knows about it.
*/
if (!SpGistBlockIsRoot(blkno))
{
@ -688,7 +688,7 @@ spgprocesspending(spgBulkDeleteState *bds)
Relation index = bds->info->index;
spgVacPendingItem *pitem;
spgVacPendingItem *nitem;
BlockNumber blkno;
BlockNumber blkno;
Buffer buffer;
Page page;
@ -741,11 +741,11 @@ spgprocesspending(spgBulkDeleteState *bds)
else
{
/*
* On an inner page, visit the referenced inner tuple and add
* all its downlinks to the pending list. We might have pending
* items for more than one inner tuple on the same page (in fact
* this is pretty likely given the way space allocation works),
* so get them all while we are here.
* On an inner page, visit the referenced inner tuple and add all
* its downlinks to the pending list. We might have pending items
* for more than one inner tuple on the same page (in fact this is
* pretty likely given the way space allocation works), so get
* them all while we are here.
*/
for (nitem = pitem; nitem != NULL; nitem = nitem->next)
{
@ -774,7 +774,7 @@ spgprocesspending(spgBulkDeleteState *bds)
{
/* transfer attention to redirect point */
spgAddPendingTID(bds,
&((SpGistDeadTuple) innerTuple)->pointer);
&((SpGistDeadTuple) innerTuple)->pointer);
}
else
elog(ERROR, "unexpected SPGiST tuple state: %d",
@ -825,8 +825,8 @@ spgvacuumscan(spgBulkDeleteState *bds)
* physical order (we hope the kernel will cooperate in providing
* read-ahead for speed). It is critical that we visit all leaf pages,
* including ones added after we start the scan, else we might fail to
* delete some deletable tuples. See more extensive comments about
* this in btvacuumscan().
* delete some deletable tuples. See more extensive comments about this
* in btvacuumscan().
*/
blkno = SPGIST_METAPAGE_BLKNO + 1;
for (;;)

View File

@ -40,7 +40,7 @@ fillFakeState(SpGistState *state, spgxlogState stateSrc)
}
/*
* Add a leaf tuple, or replace an existing placeholder tuple. This is used
* Add a leaf tuple, or replace an existing placeholder tuple. This is used
* to replay SpGistPageAddNewItem() operations. If the offset points at an
* existing tuple, it had better be a placeholder tuple.
*/
@ -50,7 +50,7 @@ addOrReplaceTuple(Page page, Item tuple, int size, OffsetNumber offset)
if (offset <= PageGetMaxOffsetNumber(page))
{
SpGistDeadTuple dt = (SpGistDeadTuple) PageGetItem(page,
PageGetItemId(page, offset));
PageGetItemId(page, offset));
if (dt->tupstate != SPGIST_PLACEHOLDER)
elog(ERROR, "SPGiST tuple to be replaced is not a placeholder");
@ -126,7 +126,7 @@ spgRedoAddLeaf(XLogRecPtr lsn, XLogRecord *record)
if (xldata->newPage)
SpGistInitBuffer(buffer,
SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0));
SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0));
if (!XLByteLE(lsn, PageGetLSN(page)))
{
@ -143,7 +143,7 @@ spgRedoAddLeaf(XLogRecPtr lsn, XLogRecord *record)
SpGistLeafTuple head;
head = (SpGistLeafTuple) PageGetItem(page,
PageGetItemId(page, xldata->offnumHeadLeaf));
PageGetItemId(page, xldata->offnumHeadLeaf));
Assert(head->nextOffset == leafTuple->nextOffset);
head->nextOffset = xldata->offnumLeaf;
}
@ -154,7 +154,7 @@ spgRedoAddLeaf(XLogRecPtr lsn, XLogRecord *record)
PageIndexTupleDelete(page, xldata->offnumLeaf);
if (PageAddItem(page,
(Item) leafTuple, leafTuple->size,
xldata->offnumLeaf, false, false) != xldata->offnumLeaf)
xldata->offnumLeaf, false, false) != xldata->offnumLeaf)
elog(ERROR, "failed to add item of size %u to SPGiST index page",
leafTuple->size);
}
@ -180,7 +180,7 @@ spgRedoAddLeaf(XLogRecPtr lsn, XLogRecord *record)
SpGistInnerTuple tuple;
tuple = (SpGistInnerTuple) PageGetItem(page,
PageGetItemId(page, xldata->offnumParent));
PageGetItemId(page, xldata->offnumParent));
spgUpdateNodeLink(tuple, xldata->nodeI,
xldata->blknoLeaf, xldata->offnumLeaf);
@ -229,7 +229,7 @@ spgRedoMoveLeafs(XLogRecPtr lsn, XLogRecord *record)
if (xldata->newPage)
SpGistInitBuffer(buffer,
SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0));
SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0));
if (!XLByteLE(lsn, PageGetLSN(page)))
{
@ -261,7 +261,7 @@ spgRedoMoveLeafs(XLogRecPtr lsn, XLogRecord *record)
if (!XLByteLE(lsn, PageGetLSN(page)))
{
spgPageIndexMultiDelete(&state, page, toDelete, xldata->nMoves,
state.isBuild ? SPGIST_PLACEHOLDER : SPGIST_REDIRECT,
state.isBuild ? SPGIST_PLACEHOLDER : SPGIST_REDIRECT,
SPGIST_PLACEHOLDER,
xldata->blknoDst,
toInsert[nInsert - 1]);
@ -286,7 +286,7 @@ spgRedoMoveLeafs(XLogRecPtr lsn, XLogRecord *record)
SpGistInnerTuple tuple;
tuple = (SpGistInnerTuple) PageGetItem(page,
PageGetItemId(page, xldata->offnumParent));
PageGetItemId(page, xldata->offnumParent));
spgUpdateNodeLink(tuple, xldata->nodeI,
xldata->blknoDst, toInsert[nInsert - 1]);
@ -413,7 +413,7 @@ spgRedoAddNode(XLogRecPtr lsn, XLogRecord *record)
}
/*
* Update parent downlink. Since parent could be in either of the
* Update parent downlink. Since parent could be in either of the
* previous two buffers, it's a bit tricky to determine which BKP bit
* applies.
*/
@ -435,7 +435,7 @@ spgRedoAddNode(XLogRecPtr lsn, XLogRecord *record)
SpGistInnerTuple innerTuple;
innerTuple = (SpGistInnerTuple) PageGetItem(page,
PageGetItemId(page, xldata->offnumParent));
PageGetItemId(page, xldata->offnumParent));
spgUpdateNodeLink(innerTuple, xldata->nodeI,
xldata->blknoNew, xldata->offnumNew);
@ -504,7 +504,7 @@ spgRedoSplitTuple(XLogRecPtr lsn, XLogRecord *record)
{
PageIndexTupleDelete(page, xldata->offnumPrefix);
if (PageAddItem(page, (Item) prefixTuple, prefixTuple->size,
xldata->offnumPrefix, false, false) != xldata->offnumPrefix)
xldata->offnumPrefix, false, false) != xldata->offnumPrefix)
elog(ERROR, "failed to add item of size %u to SPGiST index page",
prefixTuple->size);
@ -571,7 +571,7 @@ spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record)
page = (Page) BufferGetPage(srcBuffer);
SpGistInitBuffer(srcBuffer,
SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0));
SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0));
/* don't update LSN etc till we're done with it */
}
else
@ -587,8 +587,8 @@ spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record)
{
/*
* We have it a bit easier here than in doPickSplit(),
* because we know the inner tuple's location already,
* so we can inject the correct redirection tuple now.
* because we know the inner tuple's location already, so
* we can inject the correct redirection tuple now.
*/
if (!state.isBuild)
spgPageIndexMultiDelete(&state, page,
@ -627,7 +627,7 @@ spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record)
page = (Page) BufferGetPage(destBuffer);
SpGistInitBuffer(destBuffer,
SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0));
SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0));
/* don't update LSN etc till we're done with it */
}
else
@ -707,9 +707,9 @@ spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record)
SpGistInnerTuple parent;
parent = (SpGistInnerTuple) PageGetItem(page,
PageGetItemId(page, xldata->offnumParent));
PageGetItemId(page, xldata->offnumParent));
spgUpdateNodeLink(parent, xldata->nodeI,
xldata->blknoInner, xldata->offnumInner);
xldata->blknoInner, xldata->offnumInner);
}
PageSetLSN(page, lsn);
@ -742,9 +742,9 @@ spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record)
SpGistInnerTuple parent;
parent = (SpGistInnerTuple) PageGetItem(page,
PageGetItemId(page, xldata->offnumParent));
PageGetItemId(page, xldata->offnumParent));
spgUpdateNodeLink(parent, xldata->nodeI,
xldata->blknoInner, xldata->offnumInner);
xldata->blknoInner, xldata->offnumInner);
PageSetLSN(page, lsn);
PageSetTLI(page, ThisTimeLineID);
@ -803,7 +803,7 @@ spgRedoVacuumLeaf(XLogRecPtr lsn, XLogRecord *record)
spgPageIndexMultiDelete(&state, page,
toPlaceholder, xldata->nPlaceholder,
SPGIST_PLACEHOLDER, SPGIST_PLACEHOLDER,
SPGIST_PLACEHOLDER, SPGIST_PLACEHOLDER,
InvalidBlockNumber,
InvalidOffsetNumber);
@ -821,7 +821,7 @@ spgRedoVacuumLeaf(XLogRecPtr lsn, XLogRecord *record)
spgPageIndexMultiDelete(&state, page,
moveSrc, xldata->nMove,
SPGIST_PLACEHOLDER, SPGIST_PLACEHOLDER,
SPGIST_PLACEHOLDER, SPGIST_PLACEHOLDER,
InvalidBlockNumber,
InvalidOffsetNumber);
@ -906,7 +906,7 @@ spgRedoVacuumRedirect(XLogRecPtr lsn, XLogRecord *record)
SpGistDeadTuple dt;
dt = (SpGistDeadTuple) PageGetItem(page,
PageGetItemId(page, itemToPlaceholder[i]));
PageGetItemId(page, itemToPlaceholder[i]));
Assert(dt->tupstate == SPGIST_REDIRECT);
dt->tupstate = SPGIST_PLACEHOLDER;
ItemPointerSetInvalid(&dt->pointer);

View File

@ -417,7 +417,7 @@ TransactionIdGetStatus(TransactionId xid, XLogRecPtr *lsn)
* Testing during the PostgreSQL 9.2 development cycle revealed that on a
* large multi-processor system, it was possible to have more CLOG page
* requests in flight at one time than the numebr of CLOG buffers which existed
* at that time, which was hardcoded to 8. Further testing revealed that
* at that time, which was hardcoded to 8. Further testing revealed that
* performance dropped off with more than 32 CLOG buffers, possibly because
* the linear buffer search algorithm doesn't scale well.
*

View File

@ -903,12 +903,12 @@ SlruSelectLRUPage(SlruCtl ctl, int pageno)
{
int slotno;
int cur_count;
int bestvalidslot = 0; /* keep compiler quiet */
int bestvalidslot = 0; /* keep compiler quiet */
int best_valid_delta = -1;
int best_valid_page_number = 0; /* keep compiler quiet */
int bestinvalidslot = 0; /* keep compiler quiet */
int best_valid_page_number = 0; /* keep compiler quiet */
int bestinvalidslot = 0; /* keep compiler quiet */
int best_invalid_delta = -1;
int best_invalid_page_number = 0; /* keep compiler quiet */
int best_invalid_page_number = 0; /* keep compiler quiet */
/* See if page already has a buffer assigned */
for (slotno = 0; slotno < shared->num_slots; slotno++)
@ -920,15 +920,15 @@ SlruSelectLRUPage(SlruCtl ctl, int pageno)
/*
* If we find any EMPTY slot, just select that one. Else choose a
* victim page to replace. We normally take the least recently used
* victim page to replace. We normally take the least recently used
* valid page, but we will never take the slot containing
* latest_page_number, even if it appears least recently used. We
* latest_page_number, even if it appears least recently used. We
* will select a slot that is already I/O busy only if there is no
* other choice: a read-busy slot will not be least recently used once
* the read finishes, and waiting for an I/O on a write-busy slot is
* inferior to just picking some other slot. Testing shows the slot
* we pick instead will often be clean, allowing us to begin a read
* at once.
* we pick instead will often be clean, allowing us to begin a read at
* once.
*
* Normally the page_lru_count values will all be different and so
* there will be a well-defined LRU page. But since we allow
@ -997,10 +997,10 @@ SlruSelectLRUPage(SlruCtl ctl, int pageno)
/*
* If all pages (except possibly the latest one) are I/O busy, we'll
* have to wait for an I/O to complete and then retry. In that unhappy
* case, we choose to wait for the I/O on the least recently used slot,
* on the assumption that it was likely initiated first of all the I/Os
* in progress and may therefore finish first.
* have to wait for an I/O to complete and then retry. In that
* unhappy case, we choose to wait for the I/O on the least recently
* used slot, on the assumption that it was likely initiated first of
* all the I/Os in progress and may therefore finish first.
*/
if (best_valid_delta < 0)
{
@ -1168,20 +1168,20 @@ restart:;
/*
* SlruScanDirectory callback
* This callback reports true if there's any segment prior to the one
* containing the page passed as "data".
* This callback reports true if there's any segment prior to the one
* containing the page passed as "data".
*/
bool
SlruScanDirCbReportPresence(SlruCtl ctl, char *filename, int segpage, void *data)
{
int cutoffPage = *(int *) data;
int cutoffPage = *(int *) data;
cutoffPage -= cutoffPage % SLRU_PAGES_PER_SEGMENT;
if (ctl->PagePrecedes(segpage, cutoffPage))
return true; /* found one; don't iterate any more */
return true; /* found one; don't iterate any more */
return false; /* keep going */
return false; /* keep going */
}
/*
@ -1191,8 +1191,8 @@ SlruScanDirCbReportPresence(SlruCtl ctl, char *filename, int segpage, void *data
static bool
SlruScanDirCbDeleteCutoff(SlruCtl ctl, char *filename, int segpage, void *data)
{
char path[MAXPGPATH];
int cutoffPage = *(int *) data;
char path[MAXPGPATH];
int cutoffPage = *(int *) data;
if (ctl->PagePrecedes(segpage, cutoffPage))
{
@ -1202,7 +1202,7 @@ SlruScanDirCbDeleteCutoff(SlruCtl ctl, char *filename, int segpage, void *data)
unlink(path);
}
return false; /* keep going */
return false; /* keep going */
}
/*
@ -1212,14 +1212,14 @@ SlruScanDirCbDeleteCutoff(SlruCtl ctl, char *filename, int segpage, void *data)
bool
SlruScanDirCbDeleteAll(SlruCtl ctl, char *filename, int segpage, void *data)
{
char path[MAXPGPATH];
char path[MAXPGPATH];
snprintf(path, MAXPGPATH, "%s/%s", ctl->Dir, filename);
ereport(DEBUG2,
(errmsg("removing file \"%s\"", path)));
unlink(path);
return false; /* keep going */
return false; /* keep going */
}
/*

View File

@ -360,8 +360,9 @@ static void
GXactLoadSubxactData(GlobalTransaction gxact, int nsubxacts,
TransactionId *children)
{
PGPROC *proc = &ProcGlobal->allProcs[gxact->pgprocno];
PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno];
PGPROC *proc = &ProcGlobal->allProcs[gxact->pgprocno];
PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno];
/* We need no extra lock since the GXACT isn't valid yet */
if (nsubxacts > PGPROC_MAX_CACHED_SUBXIDS)
{
@ -410,7 +411,7 @@ LockGXact(const char *gid, Oid user)
for (i = 0; i < TwoPhaseState->numPrepXacts; i++)
{
GlobalTransaction gxact = TwoPhaseState->prepXacts[i];
PGPROC *proc = &ProcGlobal->allProcs[gxact->pgprocno];
PGPROC *proc = &ProcGlobal->allProcs[gxact->pgprocno];
/* Ignore not-yet-valid GIDs */
if (!gxact->valid)
@ -523,7 +524,7 @@ TransactionIdIsPrepared(TransactionId xid)
for (i = 0; i < TwoPhaseState->numPrepXacts; i++)
{
GlobalTransaction gxact = TwoPhaseState->prepXacts[i];
PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno];
PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno];
if (gxact->valid && pgxact->xid == xid)
{
@ -648,8 +649,8 @@ pg_prepared_xact(PG_FUNCTION_ARGS)
while (status->array != NULL && status->currIdx < status->ngxacts)
{
GlobalTransaction gxact = &status->array[status->currIdx++];
PGPROC *proc = &ProcGlobal->allProcs[gxact->pgprocno];
PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno];
PGPROC *proc = &ProcGlobal->allProcs[gxact->pgprocno];
PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno];
Datum values[5];
bool nulls[5];
HeapTuple tuple;
@ -719,7 +720,7 @@ TwoPhaseGetDummyProc(TransactionId xid)
for (i = 0; i < TwoPhaseState->numPrepXacts; i++)
{
GlobalTransaction gxact = TwoPhaseState->prepXacts[i];
PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno];
PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno];
if (pgxact->xid == xid)
{
@ -850,8 +851,8 @@ save_state_data(const void *data, uint32 len)
void
StartPrepare(GlobalTransaction gxact)
{
PGPROC *proc = &ProcGlobal->allProcs[gxact->pgprocno];
PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno];
PGPROC *proc = &ProcGlobal->allProcs[gxact->pgprocno];
PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno];
TransactionId xid = pgxact->xid;
TwoPhaseFileHeader hdr;
TransactionId *children;
@ -1063,9 +1064,9 @@ EndPrepare(GlobalTransaction gxact)
errmsg("could not close two-phase state file: %m")));
/*
* Mark the prepared transaction as valid. As soon as xact.c marks MyPgXact
* as not running our XID (which it will do immediately after this
* function returns), others can commit/rollback the xact.
* Mark the prepared transaction as valid. As soon as xact.c marks
* MyPgXact as not running our XID (which it will do immediately after
* this function returns), others can commit/rollback the xact.
*
* NB: a side effect of this is to make a dummy ProcArray entry for the
* prepared XID. This must happen before we clear the XID from MyPgXact,
@ -1551,7 +1552,7 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon)
for (i = 0; i < TwoPhaseState->numPrepXacts; i++)
{
GlobalTransaction gxact = TwoPhaseState->prepXacts[i];
PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno];
PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno];
if (gxact->valid &&
XLByteLE(gxact->prepare_lsn, redo_horizon))
@ -1707,7 +1708,7 @@ PrescanPreparedTransactions(TransactionId **xids_p, int *nxids_p)
* XID, and they may force us to advance nextXid.
*
* We don't expect anyone else to modify nextXid, hence we don't
* need to hold a lock while examining it. We still acquire the
* need to hold a lock while examining it. We still acquire the
* lock to modify it, though.
*/
subxids = (TransactionId *)

View File

@ -174,8 +174,8 @@ GetNewTransactionId(bool isSubXact)
* latestCompletedXid is present in the ProcArray, which is essential for
* correct OldestXmin tracking; see src/backend/access/transam/README.
*
* XXX by storing xid into MyPgXact without acquiring ProcArrayLock, we are
* relying on fetch/store of an xid to be atomic, else other backends
* XXX by storing xid into MyPgXact without acquiring ProcArrayLock, we
* are relying on fetch/store of an xid to be atomic, else other backends
* might see a partially-set xid here. But holding both locks at once
* would be a nasty concurrency hit. So for now, assume atomicity.
*

View File

@ -1019,6 +1019,7 @@ RecordTransactionCommit(void)
XLogRecData rdata[4];
int lastrdata = 0;
xl_xact_commit xlrec;
/*
* Set flags required for recovery processing of commits.
*/
@ -1073,7 +1074,8 @@ RecordTransactionCommit(void)
{
XLogRecData rdata[2];
int lastrdata = 0;
xl_xact_commit_compact xlrec;
xl_xact_commit_compact xlrec;
xlrec.xact_time = xactStopTimestamp;
xlrec.nsubxacts = nchildren;
rdata[0].data = (char *) (&xlrec);
@ -2102,7 +2104,7 @@ PrepareTransaction(void)
if (XactHasExportedSnapshots())
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot PREPARE a transaction that has exported snapshots")));
errmsg("cannot PREPARE a transaction that has exported snapshots")));
/* Prevent cancel/die interrupt while cleaning up */
HOLD_INTERRUPTS();
@ -2602,10 +2604,10 @@ CommitTransactionCommand(void)
break;
/*
* We were issued a RELEASE command, so we end the
* current subtransaction and return to the parent transaction.
* The parent might be ended too, so repeat till we find an
* INPROGRESS transaction or subtransaction.
* We were issued a RELEASE command, so we end the current
* subtransaction and return to the parent transaction. The parent
* might be ended too, so repeat till we find an INPROGRESS
* transaction or subtransaction.
*/
case TBLOCK_SUBRELEASE:
do
@ -2623,9 +2625,9 @@ CommitTransactionCommand(void)
* hierarchy and perform final commit. We do this by rolling up
* any subtransactions into their parent, which leads to O(N^2)
* operations with respect to resource owners - this isn't that
* bad until we approach a thousands of savepoints but is necessary
* for correctness should after triggers create new resource
* owners.
* bad until we approach a thousands of savepoints but is
* necessary for correctness should after triggers create new
* resource owners.
*/
case TBLOCK_SUBCOMMIT:
do
@ -4551,11 +4553,11 @@ xactGetCommittedChildren(TransactionId **ptr)
*/
static void
xact_redo_commit_internal(TransactionId xid, XLogRecPtr lsn,
TransactionId *sub_xids, int nsubxacts,
SharedInvalidationMessage *inval_msgs, int nmsgs,
RelFileNode *xnodes, int nrels,
Oid dbId, Oid tsId,
uint32 xinfo)
TransactionId *sub_xids, int nsubxacts,
SharedInvalidationMessage *inval_msgs, int nmsgs,
RelFileNode *xnodes, int nrels,
Oid dbId, Oid tsId,
uint32 xinfo)
{
TransactionId max_xid;
int i;
@ -4659,12 +4661,13 @@ xact_redo_commit_internal(TransactionId xid, XLogRecPtr lsn,
XLogFlush(lsn);
}
/*
* Utility function to call xact_redo_commit_internal after breaking down xlrec
*/
static void
xact_redo_commit(xl_xact_commit *xlrec,
TransactionId xid, XLogRecPtr lsn)
TransactionId xid, XLogRecPtr lsn)
{
TransactionId *subxacts;
SharedInvalidationMessage *inval_msgs;
@ -4675,11 +4678,11 @@ xact_redo_commit(xl_xact_commit *xlrec,
inval_msgs = (SharedInvalidationMessage *) &(subxacts[xlrec->nsubxacts]);
xact_redo_commit_internal(xid, lsn, subxacts, xlrec->nsubxacts,
inval_msgs, xlrec->nmsgs,
xlrec->xnodes, xlrec->nrels,
xlrec->dbId,
xlrec->tsId,
xlrec->xinfo);
inval_msgs, xlrec->nmsgs,
xlrec->xnodes, xlrec->nrels,
xlrec->dbId,
xlrec->tsId,
xlrec->xinfo);
}
/*
@ -4687,14 +4690,14 @@ xact_redo_commit(xl_xact_commit *xlrec,
*/
static void
xact_redo_commit_compact(xl_xact_commit_compact *xlrec,
TransactionId xid, XLogRecPtr lsn)
TransactionId xid, XLogRecPtr lsn)
{
xact_redo_commit_internal(xid, lsn, xlrec->subxacts, xlrec->nsubxacts,
NULL, 0, /* inval msgs */
NULL, 0, /* relfilenodes */
InvalidOid, /* dbId */
InvalidOid, /* tsId */
0); /* xinfo */
NULL, 0, /* inval msgs */
NULL, 0, /* relfilenodes */
InvalidOid, /* dbId */
InvalidOid, /* tsId */
0); /* xinfo */
}
/*

View File

@ -344,10 +344,10 @@ typedef struct XLogCtlInsert
/*
* fullPageWrites is the master copy used by all backends to determine
* whether to write full-page to WAL, instead of using process-local
* one. This is required because, when full_page_writes is changed
* by SIGHUP, we must WAL-log it before it actually affects
* WAL-logging by backends. Checkpointer sets at startup or after SIGHUP.
* whether to write full-page to WAL, instead of using process-local one.
* This is required because, when full_page_writes is changed by SIGHUP,
* we must WAL-log it before it actually affects WAL-logging by backends.
* Checkpointer sets at startup or after SIGHUP.
*/
bool fullPageWrites;
@ -455,8 +455,11 @@ typedef struct XLogCtlData
XLogRecPtr recoveryLastRecPtr;
/* timestamp of last COMMIT/ABORT record replayed (or being replayed) */
TimestampTz recoveryLastXTime;
/* timestamp of when we started replaying the current chunk of WAL data,
* only relevant for replication or archive recovery */
/*
* timestamp of when we started replaying the current chunk of WAL data,
* only relevant for replication or archive recovery
*/
TimestampTz currentChunkStartTime;
/* end of the last record restored from the archive */
XLogRecPtr restoreLastRecPtr;
@ -580,7 +583,7 @@ static bool updateMinRecoveryPoint = true;
* to replay all the WAL, so reachedConsistency is never set. During archive
* recovery, the database is consistent once minRecoveryPoint is reached.
*/
bool reachedConsistency = false;
bool reachedConsistency = false;
static bool InRedo = false;
@ -750,8 +753,8 @@ XLogInsert(RmgrId rmid, uint8 info, XLogRecData *rdata)
* insert lock, but it seems better to avoid doing CRC calculations while
* holding the lock.
*
* We add entries for backup blocks to the chain, so that they don't
* need any special treatment in the critical section where the chunks are
* We add entries for backup blocks to the chain, so that they don't need
* any special treatment in the critical section where the chunks are
* copied into the WAL buffers. Those entries have to be unlinked from the
* chain if we have to loop back here.
*/
@ -896,10 +899,10 @@ begin:;
/*
* Calculate CRC of the data, including all the backup blocks
*
* Note that the record header isn't added into the CRC initially since
* we don't know the prev-link yet. Thus, the CRC will represent the CRC
* of the whole record in the order: rdata, then backup blocks, then
* record header.
* Note that the record header isn't added into the CRC initially since we
* don't know the prev-link yet. Thus, the CRC will represent the CRC of
* the whole record in the order: rdata, then backup blocks, then record
* header.
*/
INIT_CRC32(rdata_crc);
for (rdt = rdata; rdt != NULL; rdt = rdt->next)
@ -948,10 +951,10 @@ begin:;
}
/*
* Also check to see if fullPageWrites or forcePageWrites was just turned on;
* if we weren't already doing full-page writes then go back and recompute.
* (If it was just turned off, we could recompute the record without full pages,
* but we choose not to bother.)
* Also check to see if fullPageWrites or forcePageWrites was just turned
* on; if we weren't already doing full-page writes then go back and
* recompute. (If it was just turned off, we could recompute the record
* without full pages, but we choose not to bother.)
*/
if ((Insert->fullPageWrites || Insert->forcePageWrites) && !doPageWrites)
{
@ -1575,15 +1578,15 @@ AdvanceXLInsertBuffer(bool new_segment)
* WAL records beginning in this page have removable backup blocks. This
* allows the WAL archiver to know whether it is safe to compress archived
* WAL data by transforming full-block records into the non-full-block
* format. It is sufficient to record this at the page level because we
* format. It is sufficient to record this at the page level because we
* force a page switch (in fact a segment switch) when starting a backup,
* so the flag will be off before any records can be written during the
* backup. At the end of a backup, the last page will be marked as all
* backup. At the end of a backup, the last page will be marked as all
* unsafe when perhaps only part is unsafe, but at worst the archiver
* would miss the opportunity to compress a few records.
*/
if (!Insert->forcePageWrites)
NewPage->xlp_info |= XLP_BKP_REMOVABLE;
NewPage ->xlp_info |= XLP_BKP_REMOVABLE;
/*
* If first page of an XLOG segment file, make it a long header.
@ -1827,11 +1830,11 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible, bool xlog_switch)
Write->lastSegSwitchTime = (pg_time_t) time(NULL);
/*
* Request a checkpoint if we've consumed too
* much xlog since the last one. For speed, we first check
* using the local copy of RedoRecPtr, which might be out of
* date; if it looks like a checkpoint is needed, forcibly
* update RedoRecPtr and recheck.
* Request a checkpoint if we've consumed too much xlog since
* the last one. For speed, we first check using the local
* copy of RedoRecPtr, which might be out of date; if it looks
* like a checkpoint is needed, forcibly update RedoRecPtr and
* recheck.
*/
if (IsUnderPostmaster &&
XLogCheckpointNeeded(openLogId, openLogSeg))
@ -1931,7 +1934,7 @@ XLogSetAsyncXactLSN(XLogRecPtr asyncXactLSN)
/*
* If the WALWriter is sleeping, we should kick it to make it come out of
* low-power mode. Otherwise, determine whether there's a full page of
* low-power mode. Otherwise, determine whether there's a full page of
* WAL available to write.
*/
if (!sleeping)
@ -1945,9 +1948,9 @@ XLogSetAsyncXactLSN(XLogRecPtr asyncXactLSN)
}
/*
* Nudge the WALWriter: it has a full page of WAL to write, or we want
* it to come out of low-power mode so that this async commit will reach
* disk within the expected amount of time.
* Nudge the WALWriter: it has a full page of WAL to write, or we want it
* to come out of low-power mode so that this async commit will reach disk
* within the expected amount of time.
*/
if (ProcGlobal->walwriterLatch)
SetLatch(ProcGlobal->walwriterLatch);
@ -2076,8 +2079,8 @@ XLogFlush(XLogRecPtr record)
WriteRqstPtr = record;
/*
* Now wait until we get the write lock, or someone else does the
* flush for us.
* Now wait until we get the write lock, or someone else does the flush
* for us.
*/
for (;;)
{
@ -2182,7 +2185,7 @@ XLogFlush(XLogRecPtr record)
* block, and flush through the latest one of those. Thus, if async commits
* are not being used, we will flush complete blocks only. We can guarantee
* that async commits reach disk after at most three cycles; normally only
* one or two. (When flushing complete blocks, we allow XLogWrite to write
* one or two. (When flushing complete blocks, we allow XLogWrite to write
* "flexibly", meaning it can stop at the end of the buffer ring; this makes a
* difference only with very high load or long wal_writer_delay, but imposes
* one extra cycle for the worst case for async commits.)
@ -2273,7 +2276,8 @@ XLogBackgroundFlush(void)
/*
* If we wrote something then we have something to send to standbys also,
* otherwise the replication delay become around 7s with just async commit.
* otherwise the replication delay become around 7s with just async
* commit.
*/
if (wrote_something)
WalSndWakeup();
@ -2776,17 +2780,17 @@ XLogFileRead(uint32 log, uint32 seg, int emode, TimeLineID tli,
}
/*
* If the segment was fetched from archival storage, replace
* the existing xlog segment (if any) with the archival version.
* If the segment was fetched from archival storage, replace the existing
* xlog segment (if any) with the archival version.
*/
if (source == XLOG_FROM_ARCHIVE)
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
XLogRecPtr endptr;
char xlogfpath[MAXPGPATH];
bool reload = false;
struct stat statbuf;
XLogRecPtr endptr;
char xlogfpath[MAXPGPATH];
bool reload = false;
struct stat statbuf;
XLogFilePath(xlogfpath, tli, log, seg);
if (stat(xlogfpath, &statbuf) == 0)
@ -2801,9 +2805,9 @@ XLogFileRead(uint32 log, uint32 seg, int emode, TimeLineID tli,
if (rename(path, xlogfpath) < 0)
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not rename file \"%s\" to \"%s\": %m",
path, xlogfpath)));
(errcode_for_file_access(),
errmsg("could not rename file \"%s\" to \"%s\": %m",
path, xlogfpath)));
/*
* If the existing segment was replaced, since walsenders might have
@ -3812,7 +3816,7 @@ ReadRecord(XLogRecPtr *RecPtr, int emode, bool fetching_ckpt)
RecPtr = &tmpRecPtr;
/*
* RecPtr is pointing to end+1 of the previous WAL record. We must
* RecPtr is pointing to end+1 of the previous WAL record. We must
* advance it if necessary to where the next record starts. First,
* align to next page if no more records can fit on the current page.
*/
@ -5389,10 +5393,10 @@ readRecoveryCommandFile(void)
}
if (rtli)
ereport(DEBUG2,
(errmsg_internal("recovery_target_timeline = %u", rtli)));
(errmsg_internal("recovery_target_timeline = %u", rtli)));
else
ereport(DEBUG2,
(errmsg_internal("recovery_target_timeline = latest")));
(errmsg_internal("recovery_target_timeline = latest")));
}
else if (strcmp(item->name, "recovery_target_xid") == 0)
{
@ -5404,7 +5408,7 @@ readRecoveryCommandFile(void)
item->value)));
ereport(DEBUG2,
(errmsg_internal("recovery_target_xid = %u",
recoveryTargetXid)));
recoveryTargetXid)));
recoveryTarget = RECOVERY_TARGET_XID;
}
else if (strcmp(item->name, "recovery_target_time") == 0)
@ -5428,7 +5432,7 @@ readRecoveryCommandFile(void)
Int32GetDatum(-1)));
ereport(DEBUG2,
(errmsg_internal("recovery_target_time = '%s'",
timestamptz_to_str(recoveryTargetTime))));
timestamptz_to_str(recoveryTargetTime))));
}
else if (strcmp(item->name, "recovery_target_name") == 0)
{
@ -5576,13 +5580,13 @@ exitArchiveRecovery(TimeLineID endTLI, uint32 endLogId, uint32 endLogSeg)
}
/*
* If we are establishing a new timeline, we have to copy data from
* the last WAL segment of the old timeline to create a starting WAL
* segment for the new timeline.
* If we are establishing a new timeline, we have to copy data from the
* last WAL segment of the old timeline to create a starting WAL segment
* for the new timeline.
*
* Notify the archiver that the last WAL segment of the old timeline
* is ready to copy to archival storage. Otherwise, it is not archived
* for a while.
* Notify the archiver that the last WAL segment of the old timeline is
* ready to copy to archival storage. Otherwise, it is not archived for a
* while.
*/
if (endTLI != ThisTimeLineID)
{
@ -5604,8 +5608,8 @@ exitArchiveRecovery(TimeLineID endTLI, uint32 endLogId, uint32 endLogSeg)
XLogArchiveCleanup(xlogpath);
/*
* Since there might be a partial WAL segment named RECOVERYXLOG,
* get rid of it.
* Since there might be a partial WAL segment named RECOVERYXLOG, get rid
* of it.
*/
snprintf(recoveryPath, MAXPGPATH, XLOGDIR "/RECOVERYXLOG");
unlink(recoveryPath); /* ignore any error */
@ -6323,11 +6327,11 @@ StartupXLOG(void)
/*
* Set backupStartPoint if we're starting recovery from a base backup.
*
* Set backupEndPoint and use minRecoveryPoint as the backup end location
* if we're starting recovery from a base backup which was taken from
* the standby. In this case, the database system status in pg_control must
* indicate DB_IN_ARCHIVE_RECOVERY. If not, which means that backup
* is corrupted, so we cancel recovery.
* Set backupEndPoint and use minRecoveryPoint as the backup end
* location if we're starting recovery from a base backup which was
* taken from the standby. In this case, the database system status in
* pg_control must indicate DB_IN_ARCHIVE_RECOVERY. If not, which
* means that backup is corrupted, so we cancel recovery.
*/
if (haveBackupLabel)
{
@ -6340,7 +6344,7 @@ StartupXLOG(void)
ereport(FATAL,
(errmsg("backup_label contains inconsistent data with control file"),
errhint("This means that the backup is corrupted and you will "
"have to use another backup for recovery.")));
"have to use another backup for recovery.")));
ControlFile->backupEndPoint = ControlFile->minRecoveryPoint;
}
}
@ -6383,15 +6387,15 @@ StartupXLOG(void)
/*
* We're in recovery, so unlogged relations may be trashed and must be
* reset. This should be done BEFORE allowing Hot Standby connections,
* so that read-only backends don't try to read whatever garbage is
* left over from before.
* reset. This should be done BEFORE allowing Hot Standby
* connections, so that read-only backends don't try to read whatever
* garbage is left over from before.
*/
ResetUnloggedRelations(UNLOGGED_RELATION_CLEANUP);
/*
* Likewise, delete any saved transaction snapshot files that got
* left behind by crashed backends.
* Likewise, delete any saved transaction snapshot files that got left
* behind by crashed backends.
*/
DeleteAllExportedSnapshotFiles();
@ -6489,10 +6493,11 @@ StartupXLOG(void)
/*
* Let postmaster know we've started redo now, so that it can launch
* checkpointer to perform restartpoints. We don't bother during crash
* recovery as restartpoints can only be performed during archive
* recovery. And we'd like to keep crash recovery simple, to avoid
* introducing bugs that could affect you when recovering after crash.
* checkpointer to perform restartpoints. We don't bother during
* crash recovery as restartpoints can only be performed during
* archive recovery. And we'd like to keep crash recovery simple, to
* avoid introducing bugs that could affect you when recovering after
* crash.
*
* After this point, we can no longer assume that we're the only
* process in addition to postmaster! Also, fsync requests are
@ -6649,8 +6654,8 @@ StartupXLOG(void)
{
/*
* We have reached the end of base backup, the point where
* the minimum recovery point in pg_control indicates.
* The data on disk is now consistent. Reset backupStartPoint
* the minimum recovery point in pg_control indicates. The
* data on disk is now consistent. Reset backupStartPoint
* and backupEndPoint.
*/
elog(DEBUG1, "end of backup reached");
@ -6863,9 +6868,9 @@ StartupXLOG(void)
oldestActiveXID = PrescanPreparedTransactions(NULL, NULL);
/*
* Update full_page_writes in shared memory and write an
* XLOG_FPW_CHANGE record before resource manager writes cleanup
* WAL records or checkpoint record is written.
* Update full_page_writes in shared memory and write an XLOG_FPW_CHANGE
* record before resource manager writes cleanup WAL records or checkpoint
* record is written.
*/
Insert->fullPageWrites = lastFullPageWrites;
LocalSetXLogInsertAllowed();
@ -6954,8 +6959,8 @@ StartupXLOG(void)
LWLockRelease(ProcArrayLock);
/*
* Start up the commit log and subtrans, if not already done for
* hot standby.
* Start up the commit log and subtrans, if not already done for hot
* standby.
*/
if (standbyState == STANDBY_DISABLED)
{
@ -7705,9 +7710,9 @@ CreateCheckPoint(int flags)
checkPoint.time = (pg_time_t) time(NULL);
/*
* For Hot Standby, derive the oldestActiveXid before we fix the redo pointer.
* This allows us to begin accumulating changes to assemble our starting
* snapshot of locks and transactions.
* For Hot Standby, derive the oldestActiveXid before we fix the redo
* pointer. This allows us to begin accumulating changes to assemble our
* starting snapshot of locks and transactions.
*/
if (!shutdown && XLogStandbyInfoActive())
checkPoint.oldestActiveXid = GetOldestActiveTransactionId();
@ -8062,7 +8067,7 @@ RecoveryRestartPoint(const CheckPoint *checkPoint)
volatile XLogCtlData *xlogctl = XLogCtl;
/*
* Is it safe to restartpoint? We must ask each of the resource managers
* Is it safe to restartpoint? We must ask each of the resource managers
* whether they have any partial state information that might prevent a
* correct restart from this point. If so, we skip this opportunity, but
* return at the next checkpoint record for another try.
@ -8082,10 +8087,11 @@ RecoveryRestartPoint(const CheckPoint *checkPoint)
}
/*
* Also refrain from creating a restartpoint if we have seen any references
* to non-existent pages. Restarting recovery from the restartpoint would
* not see the references, so we would lose the cross-check that the pages
* belonged to a relation that was dropped later.
* Also refrain from creating a restartpoint if we have seen any
* references to non-existent pages. Restarting recovery from the
* restartpoint would not see the references, so we would lose the
* cross-check that the pages belonged to a relation that was dropped
* later.
*/
if (XLogHaveInvalidPages())
{
@ -8098,8 +8104,8 @@ RecoveryRestartPoint(const CheckPoint *checkPoint)
}
/*
* Copy the checkpoint record to shared memory, so that checkpointer
* can work out the next time it wants to perform a restartpoint.
* Copy the checkpoint record to shared memory, so that checkpointer can
* work out the next time it wants to perform a restartpoint.
*/
SpinLockAcquire(&xlogctl->info_lck);
XLogCtl->lastCheckPointRecPtr = ReadRecPtr;
@ -8493,8 +8499,8 @@ UpdateFullPageWrites(void)
* Do nothing if full_page_writes has not been changed.
*
* It's safe to check the shared full_page_writes without the lock,
* because we assume that there is no concurrently running process
* which can update it.
* because we assume that there is no concurrently running process which
* can update it.
*/
if (fullPageWrites == Insert->fullPageWrites)
return;
@ -8505,8 +8511,8 @@ UpdateFullPageWrites(void)
* It's always safe to take full page images, even when not strictly
* required, but not the other round. So if we're setting full_page_writes
* to true, first set it true and then write the WAL record. If we're
* setting it to false, first write the WAL record and then set the
* global flag.
* setting it to false, first write the WAL record and then set the global
* flag.
*/
if (fullPageWrites)
{
@ -8516,12 +8522,12 @@ UpdateFullPageWrites(void)
}
/*
* Write an XLOG_FPW_CHANGE record. This allows us to keep
* track of full_page_writes during archive recovery, if required.
* Write an XLOG_FPW_CHANGE record. This allows us to keep track of
* full_page_writes during archive recovery, if required.
*/
if (XLogStandbyInfoActive() && !RecoveryInProgress())
{
XLogRecData rdata;
XLogRecData rdata;
rdata.data = (char *) (&fullPageWrites);
rdata.len = sizeof(bool);
@ -8561,7 +8567,7 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record)
/*
* We used to try to take the maximum of ShmemVariableCache->nextOid
* and the recorded nextOid, but that fails if the OID counter wraps
* around. Since no OID allocation should be happening during replay
* around. Since no OID allocation should be happening during replay
* anyway, better to just believe the record exactly. We still take
* OidGenLock while setting the variable, just in case.
*/
@ -8597,7 +8603,7 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record)
!XLogRecPtrIsInvalid(ControlFile->backupStartPoint) &&
XLogRecPtrIsInvalid(ControlFile->backupEndPoint))
ereport(PANIC,
(errmsg("online backup was canceled, recovery cannot continue")));
(errmsg("online backup was canceled, recovery cannot continue")));
/*
* If we see a shutdown checkpoint, we know that nothing was running
@ -8797,9 +8803,9 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record)
memcpy(&fpw, XLogRecGetData(record), sizeof(bool));
/*
* Update the LSN of the last replayed XLOG_FPW_CHANGE record
* so that do_pg_start_backup() and do_pg_stop_backup() can check
* whether full_page_writes has been disabled during online backup.
* Update the LSN of the last replayed XLOG_FPW_CHANGE record so that
* do_pg_start_backup() and do_pg_stop_backup() can check whether
* full_page_writes has been disabled during online backup.
*/
if (!fpw)
{
@ -8825,7 +8831,7 @@ xlog_desc(StringInfo buf, uint8 xl_info, char *rec)
CheckPoint *checkpoint = (CheckPoint *) rec;
appendStringInfo(buf, "checkpoint: redo %X/%X; "
"tli %u; fpw %s; xid %u/%u; oid %u; multi %u; offset %u; "
"tli %u; fpw %s; xid %u/%u; oid %u; multi %u; offset %u; "
"oldest xid %u in DB %u; oldest running xid %u; %s",
checkpoint->redo.xlogid, checkpoint->redo.xrecoff,
checkpoint->ThisTimeLineID,
@ -9115,8 +9121,8 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile)
errhint("WAL control functions cannot be executed during recovery.")));
/*
* During recovery, we don't need to check WAL level. Because, if WAL level
* is not sufficient, it's impossible to get here during recovery.
* During recovery, we don't need to check WAL level. Because, if WAL
* level is not sufficient, it's impossible to get here during recovery.
*/
if (!backup_started_in_recovery && !XLogIsNeeded())
ereport(ERROR,
@ -9179,7 +9185,7 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile)
* old timeline IDs. That would otherwise happen if you called
* pg_start_backup() right after restoring from a PITR archive: the
* first WAL segment containing the startup checkpoint has pages in
* the beginning with the old timeline ID. That can cause trouble at
* the beginning with the old timeline ID. That can cause trouble at
* recovery: we won't have a history file covering the old timeline if
* pg_xlog directory was not included in the base backup and the WAL
* archive was cleared too before starting the backup.
@ -9202,17 +9208,18 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile)
bool checkpointfpw;
/*
* Force a CHECKPOINT. Aside from being necessary to prevent torn
* Force a CHECKPOINT. Aside from being necessary to prevent torn
* page problems, this guarantees that two successive backup runs
* will have different checkpoint positions and hence different
* history file names, even if nothing happened in between.
*
* During recovery, establish a restartpoint if possible. We use the last
* restartpoint as the backup starting checkpoint. This means that two
* successive backup runs can have same checkpoint positions.
* During recovery, establish a restartpoint if possible. We use
* the last restartpoint as the backup starting checkpoint. This
* means that two successive backup runs can have same checkpoint
* positions.
*
* Since the fact that we are executing do_pg_start_backup() during
* recovery means that checkpointer is running, we can use
* Since the fact that we are executing do_pg_start_backup()
* during recovery means that checkpointer is running, we can use
* RequestCheckpoint() to establish a restartpoint.
*
* We use CHECKPOINT_IMMEDIATE only if requested by user (via
@ -9237,12 +9244,12 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile)
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
XLogRecPtr recptr;
XLogRecPtr recptr;
/*
* Check to see if all WAL replayed during online backup (i.e.,
* since last restartpoint used as backup starting checkpoint)
* contain full-page writes.
* Check to see if all WAL replayed during online backup
* (i.e., since last restartpoint used as backup starting
* checkpoint) contain full-page writes.
*/
SpinLockAcquire(&xlogctl->info_lck);
recptr = xlogctl->lastFpwDisableRecPtr;
@ -9250,20 +9257,20 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile)
if (!checkpointfpw || XLByteLE(startpoint, recptr))
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("WAL generated with full_page_writes=off was replayed "
"since last restartpoint"),
errhint("This means that the backup being taken on standby "
"is corrupt and should not be used. "
"Enable full_page_writes and run CHECKPOINT on the master, "
"and then try an online backup again.")));
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("WAL generated with full_page_writes=off was replayed "
"since last restartpoint"),
errhint("This means that the backup being taken on standby "
"is corrupt and should not be used. "
"Enable full_page_writes and run CHECKPOINT on the master, "
"and then try an online backup again.")));
/*
* During recovery, since we don't use the end-of-backup WAL
* record and don't write the backup history file, the starting WAL
* location doesn't need to be unique. This means that two base
* backups started at the same time might use the same checkpoint
* as starting locations.
* record and don't write the backup history file, the
* starting WAL location doesn't need to be unique. This means
* that two base backups started at the same time might use
* the same checkpoint as starting locations.
*/
gotUniqueStartpoint = true;
}
@ -9443,8 +9450,8 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive)
errhint("WAL control functions cannot be executed during recovery.")));
/*
* During recovery, we don't need to check WAL level. Because, if WAL level
* is not sufficient, it's impossible to get here during recovery.
* During recovery, we don't need to check WAL level. Because, if WAL
* level is not sufficient, it's impossible to get here during recovery.
*/
if (!backup_started_in_recovery && !XLogIsNeeded())
ereport(ERROR,
@ -9537,9 +9544,9 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive)
remaining = strchr(labelfile, '\n') + 1; /* %n is not portable enough */
/*
* Parse the BACKUP FROM line. If we are taking an online backup from
* the standby, we confirm that the standby has not been promoted
* during the backup.
* Parse the BACKUP FROM line. If we are taking an online backup from the
* standby, we confirm that the standby has not been promoted during the
* backup.
*/
ptr = strstr(remaining, "BACKUP FROM:");
if (!ptr || sscanf(ptr, "BACKUP FROM: %19s\n", backupfrom) != 1)
@ -9555,30 +9562,30 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive)
"Try taking another online backup.")));
/*
* During recovery, we don't write an end-of-backup record. We assume
* that pg_control was backed up last and its minimum recovery
* point can be available as the backup end location. Since we don't
* have an end-of-backup record, we use the pg_control value to check
* whether we've reached the end of backup when starting recovery from
* this backup. We have no way of checking if pg_control wasn't backed
* up last however.
* During recovery, we don't write an end-of-backup record. We assume that
* pg_control was backed up last and its minimum recovery point can be
* available as the backup end location. Since we don't have an
* end-of-backup record, we use the pg_control value to check whether
* we've reached the end of backup when starting recovery from this
* backup. We have no way of checking if pg_control wasn't backed up last
* however.
*
* We don't force a switch to new WAL file and wait for all the required
* files to be archived. This is okay if we use the backup to start
* the standby. But, if it's for an archive recovery, to ensure all the
* required files are available, a user should wait for them to be archived,
* or include them into the backup.
* files to be archived. This is okay if we use the backup to start the
* standby. But, if it's for an archive recovery, to ensure all the
* required files are available, a user should wait for them to be
* archived, or include them into the backup.
*
* We return the current minimum recovery point as the backup end
* location. Note that it's would be bigger than the exact backup end
* location if the minimum recovery point is updated since the backup
* of pg_control. This is harmless for current uses.
* location if the minimum recovery point is updated since the backup of
* pg_control. This is harmless for current uses.
*
* XXX currently a backup history file is for informational and debug
* purposes only. It's not essential for an online backup. Furthermore,
* even if it's created, it will not be archived during recovery because
* an archiver is not invoked. So it doesn't seem worthwhile to write
* a backup history file during recovery.
* an archiver is not invoked. So it doesn't seem worthwhile to write a
* backup history file during recovery.
*/
if (backup_started_in_recovery)
{
@ -9597,12 +9604,12 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive)
if (XLByteLE(startpoint, recptr))
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("WAL generated with full_page_writes=off was replayed "
"during online backup"),
errhint("This means that the backup being taken on standby "
"is corrupt and should not be used. "
"Enable full_page_writes and run CHECKPOINT on the master, "
"and then try an online backup again.")));
errmsg("WAL generated with full_page_writes=off was replayed "
"during online backup"),
errhint("This means that the backup being taken on standby "
"is corrupt and should not be used. "
"Enable full_page_writes and run CHECKPOINT on the master, "
"and then try an online backup again.")));
LWLockAcquire(ControlFileLock, LW_SHARED);
@ -9905,10 +9912,11 @@ read_backup_label(XLogRecPtr *checkPointLoc, bool *backupEndRequired,
ereport(FATAL,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("invalid data in file \"%s\"", BACKUP_LABEL_FILE)));
/*
* BACKUP METHOD and BACKUP FROM lines are new in 9.2. We can't
* restore from an older backup anyway, but since the information on it
* is not strictly required, don't error out if it's missing for some reason.
* BACKUP METHOD and BACKUP FROM lines are new in 9.2. We can't restore
* from an older backup anyway, but since the information on it is not
* strictly required, don't error out if it's missing for some reason.
*/
if (fscanf(lfp, "BACKUP METHOD: %19s\n", backuptype) == 1)
{
@ -10050,8 +10058,8 @@ XLogPageRead(XLogRecPtr *RecPtr, int emode, bool fetching_ckpt,
if (readFile >= 0 && !XLByteInSeg(*RecPtr, readId, readSeg))
{
/*
* Request a restartpoint if we've replayed too much
* xlog since the last one.
* Request a restartpoint if we've replayed too much xlog since the
* last one.
*/
if (StandbyMode && bgwriterLaunched)
{

View File

@ -80,10 +80,10 @@ log_invalid_page(RelFileNode node, ForkNumber forkno, BlockNumber blkno,
/*
* Once recovery has reached a consistent state, the invalid-page table
* should be empty and remain so. If a reference to an invalid page is
* found after consistency is reached, PANIC immediately. This might
* seem aggressive, but it's better than letting the invalid reference
* linger in the hash table until the end of recovery and PANIC there,
* which might come only much later if this is a standby server.
* found after consistency is reached, PANIC immediately. This might seem
* aggressive, but it's better than letting the invalid reference linger
* in the hash table until the end of recovery and PANIC there, which
* might come only much later if this is a standby server.
*/
if (reachedConsistency)
{

View File

@ -186,10 +186,10 @@ merge_acl_with_grant(Acl *old_acl, bool is_grant,
foreach(j, grantees)
{
AclItem aclitem;
AclItem aclitem;
Acl *newer_acl;
aclitem. ai_grantee = lfirst_oid(j);
aclitem.ai_grantee = lfirst_oid(j);
/*
* Grant options can only be granted to individual roles, not PUBLIC.
@ -202,7 +202,7 @@ merge_acl_with_grant(Acl *old_acl, bool is_grant,
(errcode(ERRCODE_INVALID_GRANT_OPERATION),
errmsg("grant options can only be granted to roles")));
aclitem. ai_grantor = grantorId;
aclitem.ai_grantor = grantorId;
/*
* The asymmetry in the conditions here comes from the spec. In
@ -3073,7 +3073,7 @@ ExecGrant_Type(InternalGrant *istmt)
ereport(ERROR,
(errcode(ERRCODE_INVALID_GRANT_OPERATION),
errmsg("cannot set privileges of array types"),
errhint("Set the privileges of the element type instead.")));
errhint("Set the privileges of the element type instead.")));
/* Used GRANT DOMAIN on a non-domain? */
if (istmt->objtype == ACL_OBJECT_DOMAIN &&
@ -4184,7 +4184,7 @@ pg_type_aclmask(Oid type_oid, Oid roleid, AclMode mask, AclMaskHow how)
/* "True" array types don't manage permissions of their own */
if (typeForm->typelem != 0 && typeForm->typlen == -1)
{
Oid elttype_oid = typeForm->typelem;
Oid elttype_oid = typeForm->typelem;
ReleaseSysCache(tuple);

View File

@ -173,7 +173,7 @@ static void reportDependentObjects(const ObjectAddresses *targetObjects,
int msglevel,
const ObjectAddress *origObject);
static void deleteOneObject(const ObjectAddress *object,
Relation depRel, int32 flags);
Relation depRel, int32 flags);
static void doDeletion(const ObjectAddress *object, int flags);
static void AcquireDeletionLock(const ObjectAddress *object, int flags);
static void ReleaseDeletionLock(const ObjectAddress *object);
@ -352,7 +352,8 @@ performMultipleDeletions(const ObjectAddresses *objects,
free_object_addresses(targetObjects);
/*
* We closed depRel earlier in deleteOneObject if doing a drop concurrently
* We closed depRel earlier in deleteOneObject if doing a drop
* concurrently
*/
if ((flags & PERFORM_DELETION_CONCURRENTLY) != PERFORM_DELETION_CONCURRENTLY)
heap_close(depRel, RowExclusiveLock);
@ -424,7 +425,7 @@ deleteWhatDependsOn(const ObjectAddress *object,
* Since this function is currently only used to clean out temporary
* schemas, we pass PERFORM_DELETION_INTERNAL here, indicating that
* the operation is an automatic system operation rather than a user
* action. If, in the future, this function is used for other
* action. If, in the future, this function is used for other
* purposes, we might need to revisit this.
*/
deleteOneObject(thisobj, depRel, PERFORM_DELETION_INTERNAL);
@ -514,12 +515,12 @@ findDependentObjects(const ObjectAddress *object,
/*
* The target object might be internally dependent on some other object
* (its "owner"), and/or be a member of an extension (also considered its
* owner). If so, and if we aren't recursing from the owning object, we
* owner). If so, and if we aren't recursing from the owning object, we
* have to transform this deletion request into a deletion request of the
* owning object. (We'll eventually recurse back to this object, but the
* owning object has to be visited first so it will be deleted after.)
* The way to find out about this is to scan the pg_depend entries that
* show what this object depends on.
* owning object has to be visited first so it will be deleted after.) The
* way to find out about this is to scan the pg_depend entries that show
* what this object depends on.
*/
ScanKeyInit(&key[0],
Anum_pg_depend_classid,
@ -577,7 +578,7 @@ findDependentObjects(const ObjectAddress *object,
/*
* Exception 1a: if the owning object is listed in
* pendingObjects, just release the caller's lock and
* return. We'll eventually complete the DROP when we
* return. We'll eventually complete the DROP when we
* reach that entry in the pending list.
*/
if (pendingObjects &&
@ -593,8 +594,8 @@ findDependentObjects(const ObjectAddress *object,
* Exception 1b: if the owning object is the extension
* currently being created/altered, it's okay to continue
* with the deletion. This allows dropping of an
* extension's objects within the extension's scripts,
* as well as corner cases such as dropping a transient
* extension's objects within the extension's scripts, as
* well as corner cases such as dropping a transient
* object created within such a script.
*/
if (creating_extension &&
@ -618,8 +619,8 @@ findDependentObjects(const ObjectAddress *object,
* it's okay to continue with the deletion. This holds when
* recursing from a whole object that includes the nominal
* other end as a component, too. Since there can be more
* than one "owning" object, we have to allow matches that
* are more than one level down in the stack.
* than one "owning" object, we have to allow matches that are
* more than one level down in the stack.
*/
if (stack_address_present_add_flags(&otherObject, 0, stack))
break;
@ -630,7 +631,7 @@ findDependentObjects(const ObjectAddress *object,
* owning object.
*
* First, release caller's lock on this object and get
* deletion lock on the owning object. (We must release
* deletion lock on the owning object. (We must release
* caller's lock to avoid deadlock against a concurrent
* deletion of the owning object.)
*/
@ -999,7 +1000,8 @@ deleteOneObject(const ObjectAddress *object, Relation depRel, int flags)
/* DROP hook of the objects being removed */
if (object_access_hook)
{
ObjectAccessDrop drop_arg;
ObjectAccessDrop drop_arg;
drop_arg.dropflags = flags;
InvokeObjectAccessHook(OAT_DROP, object->classId, object->objectId,
object->objectSubId, &drop_arg);
@ -1049,8 +1051,8 @@ deleteOneObject(const ObjectAddress *object, Relation depRel, int flags)
object->objectSubId);
/*
* Close depRel if we are doing a drop concurrently because it
* commits the transaction, so we don't want dangling references.
* Close depRel if we are doing a drop concurrently because it commits the
* transaction, so we don't want dangling references.
*/
if ((flags & PERFORM_DELETION_CONCURRENTLY) == PERFORM_DELETION_CONCURRENTLY)
heap_close(depRel, RowExclusiveLock);
@ -1093,8 +1095,8 @@ doDeletion(const ObjectAddress *object, int flags)
if (relKind == RELKIND_INDEX)
{
bool concurrent = ((flags & PERFORM_DELETION_CONCURRENTLY)
== PERFORM_DELETION_CONCURRENTLY);
bool concurrent = ((flags & PERFORM_DELETION_CONCURRENTLY)
== PERFORM_DELETION_CONCURRENTLY);
Assert(object->objectSubId == 0);
index_drop(object->objectId, concurrent);

View File

@ -1957,7 +1957,7 @@ StoreRelCheck(Relation rel, char *ccname, Node *expr,
ccsrc, /* Source form of check constraint */
is_local, /* conislocal */
inhcount, /* coninhcount */
is_no_inherit); /* connoinherit */
is_no_inherit); /* connoinherit */
pfree(ccbin);
pfree(ccsrc);
@ -1998,7 +1998,7 @@ StoreConstraints(Relation rel, List *cooked_constraints)
break;
case CONSTR_CHECK:
StoreRelCheck(rel, con->name, con->expr, !con->skip_validation,
con->is_local, con->inhcount, con->is_no_inherit);
con->is_local, con->inhcount, con->is_no_inherit);
numchecks++;
break;
default:
@ -2345,8 +2345,8 @@ MergeWithExistingConstraint(Relation rel, char *ccname, Node *expr,
}
/* OK to update the tuple */
ereport(NOTICE,
(errmsg("merging constraint \"%s\" with inherited definition",
ccname)));
(errmsg("merging constraint \"%s\" with inherited definition",
ccname)));
simple_heap_update(conDesc, &tup->t_self, tup);
CatalogUpdateIndexes(conDesc, tup);
break;

View File

@ -1155,7 +1155,7 @@ index_constraint_create(Relation heapRelation,
NULL,
NULL,
true, /* islocal */
0, /* inhcount */
0, /* inhcount */
false); /* noinherit */
/*
@ -1324,8 +1324,8 @@ index_drop(Oid indexId, bool concurrent)
CheckTableNotInUse(userIndexRelation, "DROP INDEX");
/*
* Drop Index concurrently is similar in many ways to creating an
* index concurrently, so some actions are similar to DefineIndex()
* Drop Index concurrently is similar in many ways to creating an index
* concurrently, so some actions are similar to DefineIndex()
*/
if (concurrent)
{
@ -1339,7 +1339,7 @@ index_drop(Oid indexId, bool concurrent)
indexRelation = heap_open(IndexRelationId, RowExclusiveLock);
tuple = SearchSysCacheCopy1(INDEXRELID,
ObjectIdGetDatum(indexId));
ObjectIdGetDatum(indexId));
if (!HeapTupleIsValid(tuple))
elog(ERROR, "cache lookup failed for index %u", indexId);
indexForm = (Form_pg_index) GETSTRUCT(tuple);
@ -1373,15 +1373,15 @@ index_drop(Oid indexId, bool concurrent)
* will be marked not indisvalid, so that no one else tries to either
* insert into it or use it for queries.
*
* We must commit our current transaction so that the index update becomes
* visible; then start another. Note that all the data structures we just
* built are lost in the commit. The only data we keep past here are the
* relation IDs.
* We must commit our current transaction so that the index update
* becomes visible; then start another. Note that all the data
* structures we just built are lost in the commit. The only data we
* keep past here are the relation IDs.
*
* Before committing, get a session-level lock on the table, to ensure
* that neither it nor the index can be dropped before we finish. This
* cannot block, even if someone else is waiting for access, because we
* already have the same lock within our transaction.
* cannot block, even if someone else is waiting for access, because
* we already have the same lock within our transaction.
*/
LockRelationIdForSession(&heaprelid, ShareUpdateExclusiveLock);
LockRelationIdForSession(&indexrelid, ShareUpdateExclusiveLock);
@ -1391,23 +1391,23 @@ index_drop(Oid indexId, bool concurrent)
StartTransactionCommand();
/*
* Now we must wait until no running transaction could have the table open
* with the old list of indexes. To do this, inquire which xacts
* currently would conflict with AccessExclusiveLock on the table -- ie,
* which ones have a lock of any kind on the table. Then wait for each of
* these xacts to commit or abort. Note we do not need to worry about
* xacts that open the table for writing after this point; they will see
* the index as invalid when they open the relation.
* Now we must wait until no running transaction could have the table
* open with the old list of indexes. To do this, inquire which xacts
* currently would conflict with AccessExclusiveLock on the table --
* ie, which ones have a lock of any kind on the table. Then wait for
* each of these xacts to commit or abort. Note we do not need to
* worry about xacts that open the table for writing after this point;
* they will see the index as invalid when they open the relation.
*
* Note: the reason we use actual lock acquisition here, rather than just
* checking the ProcArray and sleeping, is that deadlock is possible if
* one of the transactions in question is blocked trying to acquire an
* exclusive lock on our table. The lock code will detect deadlock and
* error out properly.
* Note: the reason we use actual lock acquisition here, rather than
* just checking the ProcArray and sleeping, is that deadlock is
* possible if one of the transactions in question is blocked trying
* to acquire an exclusive lock on our table. The lock code will
* detect deadlock and error out properly.
*
* Note: GetLockConflicts() never reports our own xid, hence we need not
* check for that. Also, prepared xacts are not reported, which is fine
* since they certainly aren't going to do anything more.
* Note: GetLockConflicts() never reports our own xid, hence we need
* not check for that. Also, prepared xacts are not reported, which
* is fine since they certainly aren't going to do anything more.
*/
old_lockholders = GetLockConflicts(&heaplocktag, AccessExclusiveLock);
@ -1786,7 +1786,7 @@ index_update_stats(Relation rel,
if (rd_rel->relkind != RELKIND_INDEX)
relallvisible = visibilitymap_count(rel);
else /* don't bother for indexes */
else /* don't bother for indexes */
relallvisible = 0;
if (rd_rel->relpages != (int32) relpages)

View File

@ -226,7 +226,7 @@ Datum pg_is_other_temp_schema(PG_FUNCTION_ARGS);
Oid
RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode,
bool missing_ok, bool nowait,
RangeVarGetRelidCallback callback, void *callback_arg)
RangeVarGetRelidCallback callback, void *callback_arg)
{
uint64 inval_count;
Oid relId;
@ -247,20 +247,20 @@ RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode,
}
/*
* DDL operations can change the results of a name lookup. Since all
* such operations will generate invalidation messages, we keep track
* of whether any such messages show up while we're performing the
* operation, and retry until either (1) no more invalidation messages
* show up or (2) the answer doesn't change.
* DDL operations can change the results of a name lookup. Since all such
* operations will generate invalidation messages, we keep track of
* whether any such messages show up while we're performing the operation,
* and retry until either (1) no more invalidation messages show up or (2)
* the answer doesn't change.
*
* But if lockmode = NoLock, then we assume that either the caller is OK
* with the answer changing under them, or that they already hold some
* appropriate lock, and therefore return the first answer we get without
* checking for invalidation messages. Also, if the requested lock is
* checking for invalidation messages. Also, if the requested lock is
* already held, no LockRelationOid will not AcceptInvalidationMessages,
* so we may fail to notice a change. We could protect against that case
* by calling AcceptInvalidationMessages() before beginning this loop,
* but that would add a significant amount overhead, so for now we don't.
* by calling AcceptInvalidationMessages() before beginning this loop, but
* that would add a significant amount overhead, so for now we don't.
*/
for (;;)
{
@ -282,17 +282,18 @@ RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode,
if (relation->relpersistence == RELPERSISTENCE_TEMP)
{
if (!OidIsValid(myTempNamespace))
relId = InvalidOid; /* this probably can't happen? */
relId = InvalidOid; /* this probably can't happen? */
else
{
if (relation->schemaname)
{
Oid namespaceId;
Oid namespaceId;
namespaceId = LookupExplicitNamespace(relation->schemaname);
if (namespaceId != myTempNamespace)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
errmsg("temporary tables cannot specify a schema name")));
errmsg("temporary tables cannot specify a schema name")));
}
relId = get_relname_relid(relation->relname, myTempNamespace);
@ -315,12 +316,12 @@ RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode,
/*
* Invoke caller-supplied callback, if any.
*
* This callback is a good place to check permissions: we haven't taken
* the table lock yet (and it's really best to check permissions before
* locking anything!), but we've gotten far enough to know what OID we
* think we should lock. Of course, concurrent DDL might change things
* while we're waiting for the lock, but in that case the callback will
* be invoked again for the new OID.
* This callback is a good place to check permissions: we haven't
* taken the table lock yet (and it's really best to check permissions
* before locking anything!), but we've gotten far enough to know what
* OID we think we should lock. Of course, concurrent DDL might
* change things while we're waiting for the lock, but in that case
* the callback will be invoked again for the new OID.
*/
if (callback)
callback(relation, relId, oldRelId, callback_arg);
@ -328,21 +329,21 @@ RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode,
/*
* If no lock requested, we assume the caller knows what they're
* doing. They should have already acquired a heavyweight lock on
* this relation earlier in the processing of this same statement,
* so it wouldn't be appropriate to AcceptInvalidationMessages()
* here, as that might pull the rug out from under them.
* this relation earlier in the processing of this same statement, so
* it wouldn't be appropriate to AcceptInvalidationMessages() here, as
* that might pull the rug out from under them.
*/
if (lockmode == NoLock)
break;
/*
* If, upon retry, we get back the same OID we did last time, then
* the invalidation messages we processed did not change the final
* answer. So we're done.
* If, upon retry, we get back the same OID we did last time, then the
* invalidation messages we processed did not change the final answer.
* So we're done.
*
* If we got a different OID, we've locked the relation that used to
* have this name rather than the one that does now. So release
* the lock.
* have this name rather than the one that does now. So release the
* lock.
*/
if (retry)
{
@ -384,8 +385,8 @@ RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode,
break;
/*
* Something may have changed. Let's repeat the name lookup, to
* make sure this name still references the same relation it did
* Something may have changed. Let's repeat the name lookup, to make
* sure this name still references the same relation it did
* previously.
*/
retry = true;
@ -550,8 +551,8 @@ RangeVarGetAndCheckCreationNamespace(RangeVar *relation,
relid = InvalidOid;
/*
* In bootstrap processing mode, we don't bother with permissions
* or locking. Permissions might not be working yet, and locking is
* In bootstrap processing mode, we don't bother with permissions or
* locking. Permissions might not be working yet, and locking is
* unnecessary.
*/
if (IsBootstrapProcessingMode())

View File

@ -75,10 +75,10 @@
*/
typedef struct
{
Oid class_oid; /* oid of catalog */
Oid oid_index_oid; /* oid of index on system oid column */
int oid_catcache_id; /* id of catcache on system oid column */
AttrNumber attnum_namespace; /* attnum of namespace field */
Oid class_oid; /* oid of catalog */
Oid oid_index_oid; /* oid of index on system oid column */
int oid_catcache_id; /* id of catcache on system oid column */
AttrNumber attnum_namespace; /* attnum of namespace field */
} ObjectPropertyType;
static ObjectPropertyType ObjectProperty[] =
@ -286,13 +286,13 @@ get_object_address(ObjectType objtype, List *objname, List *objargs,
for (;;)
{
/*
* Remember this value, so that, after looking up the object name
* and locking it, we can check whether any invalidation messages
* have been processed that might require a do-over.
* Remember this value, so that, after looking up the object name and
* locking it, we can check whether any invalidation messages have
* been processed that might require a do-over.
*/
inval_count = SharedInvalidMessageCounter;
/* Look up object address. */
/* Look up object address. */
switch (objtype)
{
case OBJECT_INDEX:
@ -367,7 +367,7 @@ get_object_address(ObjectType objtype, List *objname, List *objargs,
case OBJECT_OPCLASS:
case OBJECT_OPFAMILY:
address = get_object_address_opcf(objtype,
objname, objargs, missing_ok);
objname, objargs, missing_ok);
break;
case OBJECT_LARGEOBJECT:
Assert(list_length(objname) == 1);
@ -377,10 +377,10 @@ get_object_address(ObjectType objtype, List *objname, List *objargs,
if (!LargeObjectExists(address.objectId))
{
if (!missing_ok)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("large object %u does not exist",
address.objectId)));
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("large object %u does not exist",
address.objectId)));
}
break;
case OBJECT_CAST:
@ -475,8 +475,8 @@ get_object_address(ObjectType objtype, List *objname, List *objargs,
* At this point, we've resolved the name to an OID and locked the
* corresponding database object. However, it's possible that by the
* time we acquire the lock on the object, concurrent DDL has modified
* the database in such a way that the name we originally looked up
* no longer resolves to that OID.
* the database in such a way that the name we originally looked up no
* longer resolves to that OID.
*
* We can be certain that this isn't an issue if (a) no shared
* invalidation messages have been processed or (b) we've locked a
@ -488,12 +488,12 @@ get_object_address(ObjectType objtype, List *objname, List *objargs,
* the relation, which is enough to freeze out any concurrent DDL.
*
* In all other cases, however, it's possible that the name we looked
* up no longer refers to the object we locked, so we retry the
* lookup and see whether we get the same answer.
* up no longer refers to the object we locked, so we retry the lookup
* and see whether we get the same answer.
*/
if (inval_count == SharedInvalidMessageCounter || relation != NULL)
break;
old_address = address;
if (inval_count == SharedInvalidMessageCounter || relation != NULL)
break;
old_address = address;
}
/* Return the object address and the relation. */
@ -621,7 +621,7 @@ get_relation_by_qualified_name(ObjectType objtype, List *objname,
bool missing_ok)
{
Relation relation;
ObjectAddress address;
ObjectAddress address;
address.classId = RelationRelationId;
address.objectId = InvalidOid;
@ -721,8 +721,8 @@ get_object_address_relobject(ObjectType objtype, List *objname,
address.objectSubId = 0;
/*
* Caller is expecting to get back the relation, even though we
* didn't end up using it to find the rule.
* Caller is expecting to get back the relation, even though we didn't
* end up using it to find the rule.
*/
if (OidIsValid(address.objectId))
relation = heap_open(reloid, AccessShareLock);
@ -768,7 +768,7 @@ get_object_address_relobject(ObjectType objtype, List *objname,
if (!OidIsValid(address.objectId))
{
heap_close(relation, AccessShareLock);
relation = NULL; /* department of accident prevention */
relation = NULL; /* department of accident prevention */
return address;
}
}
@ -834,9 +834,10 @@ static ObjectAddress
get_object_address_type(ObjectType objtype,
List *objname, bool missing_ok)
{
ObjectAddress address;
ObjectAddress address;
TypeName *typename;
Type tup;
Type tup;
typename = makeTypeNameFromNameList(objname);
address.classId = TypeRelationId;
@ -1083,7 +1084,7 @@ get_object_namespace(const ObjectAddress *address)
HeapTuple tuple;
bool isnull;
Oid oid;
ObjectPropertyType *property;
ObjectPropertyType *property;
/* If not owned by a namespace, just return InvalidOid. */
property = get_object_property_data(address->classId);
@ -1122,5 +1123,5 @@ get_object_property_data(Oid class_id)
return &ObjectProperty[index];
elog(ERROR, "unrecognized class id: %u", class_id);
return NULL; /* not reached */
return NULL; /* not reached */
}

View File

@ -831,8 +831,8 @@ get_domain_constraint_oid(Oid typid, const char *conname, bool missing_ok)
if (OidIsValid(conOid))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
errmsg("domain \"%s\" has multiple constraints named \"%s\"",
format_type_be(typid), conname)));
errmsg("domain \"%s\" has multiple constraints named \"%s\"",
format_type_be(typid), conname)));
conOid = HeapTupleGetOid(tuple);
}
}

View File

@ -150,7 +150,7 @@ recordDependencyOnCurrentExtension(const ObjectAddress *object,
/* Only need to check for existing membership if isReplace */
if (isReplace)
{
Oid oldext;
Oid oldext;
oldext = getExtensionOfObject(object->classId, object->objectId);
if (OidIsValid(oldext))

View File

@ -228,7 +228,7 @@ ProcedureCreate(const char *procedureName,
/*
* Do not allow polymorphic return type unless at least one input argument
* is polymorphic. ANYRANGE return type is even stricter: must have an
* is polymorphic. ANYRANGE return type is even stricter: must have an
* ANYRANGE input (since we can't deduce the specific range type from
* ANYELEMENT). Also, do not allow return type INTERNAL unless at least
* one input argument is INTERNAL.

View File

@ -1287,7 +1287,7 @@ shdepReassignOwned(List *roleids, Oid newrole)
ereport(ERROR,
(errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST),
errmsg("cannot reassign ownership of objects owned by %s because they are required by the database system",
getObjectDescription(&obj))));
getObjectDescription(&obj))));
/*
* There's no need to tell the whole truth, which is that we

View File

@ -500,8 +500,8 @@ smgr_redo(XLogRecPtr lsn, XLogRecord *record)
/*
* Forcibly create relation if it doesn't exist (which suggests that
* it was dropped somewhere later in the WAL sequence). As in
* XLogReadBuffer, we prefer to recreate the rel and replay the log
* as best we can until the drop is seen.
* XLogReadBuffer, we prefer to recreate the rel and replay the log as
* best we can until the drop is seen.
*/
smgrcreate(reln, MAIN_FORKNUM, true);

View File

@ -96,11 +96,11 @@ static void compute_index_stats(Relation onerel, double totalrows,
MemoryContext col_context);
static VacAttrStats *examine_attribute(Relation onerel, int attnum,
Node *index_expr);
static int acquire_sample_rows(Relation onerel, int elevel,
static int acquire_sample_rows(Relation onerel, int elevel,
HeapTuple *rows, int targrows,
double *totalrows, double *totaldeadrows);
static int compare_rows(const void *a, const void *b);
static int acquire_inherited_sample_rows(Relation onerel, int elevel,
static int acquire_inherited_sample_rows(Relation onerel, int elevel,
HeapTuple *rows, int targrows,
double *totalrows, double *totaldeadrows);
static void update_attstats(Oid relid, bool inh,
@ -118,7 +118,7 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt, BufferAccessStrategy bstrategy)
Relation onerel;
int elevel;
AcquireSampleRowsFunc acquirefunc = NULL;
BlockNumber relpages = 0;
BlockNumber relpages = 0;
/* Select logging level */
if (vacstmt->options & VACOPT_VERBOSE)
@ -205,8 +205,8 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt, BufferAccessStrategy bstrategy)
}
/*
* Check that it's a plain table or foreign table; we used to do this
* in get_rel_oids() but seems safer to check after we've locked the
* Check that it's a plain table or foreign table; we used to do this in
* get_rel_oids() but seems safer to check after we've locked the
* relation.
*/
if (onerel->rd_rel->relkind == RELKIND_RELATION)
@ -235,8 +235,8 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt, BufferAccessStrategy bstrategy)
if (!ok)
{
ereport(WARNING,
(errmsg("skipping \"%s\" --- cannot analyze this foreign table",
RelationGetRelationName(onerel))));
(errmsg("skipping \"%s\" --- cannot analyze this foreign table",
RelationGetRelationName(onerel))));
relation_close(onerel, ShareUpdateExclusiveLock);
return;
}
@ -464,8 +464,8 @@ do_analyze_rel(Relation onerel, VacuumStmt *vacstmt,
/*
* Determine how many rows we need to sample, using the worst case from
* all analyzable columns. We use a lower bound of 100 rows to avoid
* possible overflow in Vitter's algorithm. (Note: that will also be
* the target in the corner case where there are no analyzable columns.)
* possible overflow in Vitter's algorithm. (Note: that will also be the
* target in the corner case where there are no analyzable columns.)
*/
targrows = 100;
for (i = 0; i < attr_cnt; i++)
@ -1337,7 +1337,7 @@ anl_get_next_S(double t, int n, double *stateptr)
double V,
quot;
V = anl_random_fract(); /* Generate V */
V = anl_random_fract(); /* Generate V */
S = 0;
t += 1;
/* Note: "num" in Vitter's code is always equal to t - n */
@ -1398,7 +1398,7 @@ anl_get_next_S(double t, int n, double *stateptr)
y *= numer / denom;
denom -= 1;
}
W = exp(-log(anl_random_fract()) / n); /* Generate W in advance */
W = exp(-log(anl_random_fract()) / n); /* Generate W in advance */
if (exp(log(y) / n) <= (t + X) / t)
break;
}

View File

@ -594,10 +594,10 @@ make_new_heap(Oid OIDOldHeap, Oid NewTableSpace)
OldHeapDesc = RelationGetDescr(OldHeap);
/*
* Note that the NewHeap will not
* receive any of the defaults or constraints associated with the OldHeap;
* we don't need 'em, and there's no reason to spend cycles inserting them
* into the catalogs only to delete them.
* Note that the NewHeap will not receive any of the defaults or
* constraints associated with the OldHeap; we don't need 'em, and there's
* no reason to spend cycles inserting them into the catalogs only to
* delete them.
*/
/*

View File

@ -150,7 +150,7 @@ typedef struct CopyStateData
Oid *typioparams; /* array of element types for in_functions */
int *defmap; /* array of default att numbers */
ExprState **defexprs; /* array of default att expressions */
bool volatile_defexprs; /* is any of defexprs volatile? */
bool volatile_defexprs; /* is any of defexprs volatile? */
/*
* These variables are used to reduce overhead in textual COPY FROM.
@ -566,11 +566,11 @@ CopyGetData(CopyState cstate, void *databuf, int minread, int maxread)
if (mtype == EOF)
ereport(ERROR,
(errcode(ERRCODE_CONNECTION_FAILURE),
errmsg("unexpected EOF on client connection with an open transaction")));
errmsg("unexpected EOF on client connection with an open transaction")));
if (pq_getmessage(cstate->fe_msgbuf, 0))
ereport(ERROR,
(errcode(ERRCODE_CONNECTION_FAILURE),
errmsg("unexpected EOF on client connection with an open transaction")));
errmsg("unexpected EOF on client connection with an open transaction")));
switch (mtype)
{
case 'd': /* CopyData */
@ -1861,6 +1861,7 @@ CopyFrom(CopyState cstate)
uint64 processed = 0;
bool useHeapMultiInsert;
int nBufferedTuples = 0;
#define MAX_BUFFERED_TUPLES 1000
HeapTuple *bufferedTuples = NULL; /* initialize to silence warning */
Size bufferedTuplesSize = 0;
@ -1968,8 +1969,8 @@ CopyFrom(CopyState cstate)
* processed and prepared for insertion are not there.
*/
if ((resultRelInfo->ri_TrigDesc != NULL &&
(resultRelInfo->ri_TrigDesc->trig_insert_before_row ||
resultRelInfo->ri_TrigDesc->trig_insert_instead_row)) ||
(resultRelInfo->ri_TrigDesc->trig_insert_before_row ||
resultRelInfo->ri_TrigDesc->trig_insert_instead_row)) ||
cstate->volatile_defexprs)
{
useHeapMultiInsert = false;
@ -2162,8 +2163,8 @@ CopyFromInsertBatch(CopyState cstate, EState *estate, CommandId mycid,
int i;
/*
* heap_multi_insert leaks memory, so switch to short-lived memory
* context before calling it.
* heap_multi_insert leaks memory, so switch to short-lived memory context
* before calling it.
*/
oldcontext = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
heap_multi_insert(cstate->rel,
@ -2175,14 +2176,14 @@ CopyFromInsertBatch(CopyState cstate, EState *estate, CommandId mycid,
MemoryContextSwitchTo(oldcontext);
/*
* If there are any indexes, update them for all the inserted tuples,
* and run AFTER ROW INSERT triggers.
* If there are any indexes, update them for all the inserted tuples, and
* run AFTER ROW INSERT triggers.
*/
if (resultRelInfo->ri_NumIndices > 0)
{
for (i = 0; i < nBufferedTuples; i++)
{
List *recheckIndexes;
List *recheckIndexes;
ExecStoreTuple(bufferedTuples[i], myslot, InvalidBuffer, false);
recheckIndexes =
@ -2194,6 +2195,7 @@ CopyFromInsertBatch(CopyState cstate, EState *estate, CommandId mycid,
list_free(recheckIndexes);
}
}
/*
* There's no indexes, but see if we need to run AFTER ROW INSERT triggers
* anyway.

View File

@ -62,12 +62,12 @@ void
ExecCreateTableAs(CreateTableAsStmt *stmt, const char *queryString,
ParamListInfo params, char *completionTag)
{
Query *query = (Query *) stmt->query;
Query *query = (Query *) stmt->query;
IntoClause *into = stmt->into;
DestReceiver *dest;
List *rewritten;
List *rewritten;
PlannedStmt *plan;
QueryDesc *queryDesc;
QueryDesc *queryDesc;
ScanDirection dir;
/*
@ -98,9 +98,9 @@ ExecCreateTableAs(CreateTableAsStmt *stmt, const char *queryString,
* plancache.c.
*
* Because the rewriter and planner tend to scribble on the input, we make
* a preliminary copy of the source querytree. This prevents problems in
* a preliminary copy of the source querytree. This prevents problems in
* the case that CTAS is in a portal or plpgsql function and is executed
* repeatedly. (See also the same hack in EXPLAIN and PREPARE.)
* repeatedly. (See also the same hack in EXPLAIN and PREPARE.)
*/
rewritten = QueryRewrite((Query *) copyObject(stmt->query));
@ -115,10 +115,10 @@ ExecCreateTableAs(CreateTableAsStmt *stmt, const char *queryString,
/*
* Use a snapshot with an updated command ID to ensure this query sees
* results of any previously executed queries. (This could only matter
* if the planner executed an allegedly-stable function that changed
* the database contents, but let's do it anyway to be parallel to the
* EXPLAIN code path.)
* results of any previously executed queries. (This could only matter if
* the planner executed an allegedly-stable function that changed the
* database contents, but let's do it anyway to be parallel to the EXPLAIN
* code path.)
*/
PushCopiedSnapshot(GetActiveSnapshot());
UpdateActiveSnapshotCommandId();
@ -211,12 +211,12 @@ intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
DR_intorel *myState = (DR_intorel *) self;
IntoClause *into = myState->into;
CreateStmt *create;
Oid intoRelationId;
Relation intoRelationDesc;
Oid intoRelationId;
Relation intoRelationDesc;
RangeTblEntry *rte;
Datum toast_options;
ListCell *lc;
int attnum;
ListCell *lc;
int attnum;
static char *validnsps[] = HEAP_RELOPT_NAMESPACES;
Assert(into != NULL); /* else somebody forgot to set it */
@ -237,8 +237,8 @@ intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
create->if_not_exists = false;
/*
* Build column definitions using "pre-cooked" type and collation info.
* If a column name list was specified in CREATE TABLE AS, override the
* Build column definitions using "pre-cooked" type and collation info. If
* a column name list was specified in CREATE TABLE AS, override the
* column names derived from the query. (Too few column names are OK, too
* many are not.)
*/
@ -246,8 +246,8 @@ intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
for (attnum = 0; attnum < typeinfo->natts; attnum++)
{
Form_pg_attribute attribute = typeinfo->attrs[attnum];
ColumnDef *col = makeNode(ColumnDef);
TypeName *coltype = makeNode(TypeName);
ColumnDef *col = makeNode(ColumnDef);
TypeName *coltype = makeNode(TypeName);
if (lc)
{
@ -280,9 +280,9 @@ intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
/*
* It's possible that the column is of a collatable type but the
* collation could not be resolved, so double-check. (We must
* check this here because DefineRelation would adopt the type's
* default collation rather than complaining.)
* collation could not be resolved, so double-check. (We must check
* this here because DefineRelation would adopt the type's default
* collation rather than complaining.)
*/
if (!OidIsValid(col->collOid) &&
type_is_collatable(coltype->typeOid))
@ -297,8 +297,8 @@ intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
if (lc != NULL)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("CREATE TABLE AS specifies too many column names")));
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("CREATE TABLE AS specifies too many column names")));
/*
* Actually create the target table
@ -342,7 +342,7 @@ intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
for (attnum = 1; attnum <= intoRelationDesc->rd_att->natts; attnum++)
rte->modifiedCols = bms_add_member(rte->modifiedCols,
attnum - FirstLowInvalidHeapAttributeNumber);
attnum - FirstLowInvalidHeapAttributeNumber);
ExecCheckRTPerms(list_make1(rte), true);

View File

@ -695,8 +695,8 @@ check_encoding_locale_matches(int encoding, const char *collate, const char *cty
errmsg("encoding \"%s\" does not match locale \"%s\"",
pg_encoding_to_char(encoding),
ctype),
errdetail("The chosen LC_CTYPE setting requires encoding \"%s\".",
pg_encoding_to_char(ctype_encoding))));
errdetail("The chosen LC_CTYPE setting requires encoding \"%s\".",
pg_encoding_to_char(ctype_encoding))));
if (!(collate_encoding == encoding ||
collate_encoding == PG_SQL_ASCII ||
@ -710,8 +710,8 @@ check_encoding_locale_matches(int encoding, const char *collate, const char *cty
errmsg("encoding \"%s\" does not match locale \"%s\"",
pg_encoding_to_char(encoding),
collate),
errdetail("The chosen LC_COLLATE setting requires encoding \"%s\".",
pg_encoding_to_char(collate_encoding))));
errdetail("The chosen LC_COLLATE setting requires encoding \"%s\".",
pg_encoding_to_char(collate_encoding))));
}
/* Error cleanup callback for createdb */
@ -784,7 +784,8 @@ dropdb(const char *dbname, bool missing_ok)
/* DROP hook for the database being removed */
if (object_access_hook)
{
ObjectAccessDrop drop_arg;
ObjectAccessDrop drop_arg;
memset(&drop_arg, 0, sizeof(ObjectAccessDrop));
InvokeObjectAccessHook(OAT_DROP,
DatabaseRelationId, db_id, 0, &drop_arg);
@ -831,8 +832,7 @@ dropdb(const char *dbname, bool missing_ok)
ReleaseSysCache(tup);
/*
* Delete any comments or security labels associated with
* the database.
* Delete any comments or security labels associated with the database.
*/
DeleteSharedComments(db_id, DatabaseRelationId);
DeleteSharedSecurityLabel(db_id, DatabaseRelationId);
@ -860,18 +860,18 @@ dropdb(const char *dbname, bool missing_ok)
pgstat_drop_database(db_id);
/*
* Tell checkpointer to forget any pending fsync and unlink requests for files
* in the database; else the fsyncs will fail at next checkpoint, or
* Tell checkpointer to forget any pending fsync and unlink requests for
* files in the database; else the fsyncs will fail at next checkpoint, or
* worse, it will delete files that belong to a newly created database
* with the same OID.
*/
ForgetDatabaseFsyncRequests(db_id);
/*
* Force a checkpoint to make sure the checkpointer has received the message
* sent by ForgetDatabaseFsyncRequests. On Windows, this also ensures that
* background procs don't hold any open files, which would cause rmdir() to
* fail.
* Force a checkpoint to make sure the checkpointer has received the
* message sent by ForgetDatabaseFsyncRequests. On Windows, this also
* ensures that background procs don't hold any open files, which would
* cause rmdir() to fail.
*/
RequestCheckpoint(CHECKPOINT_IMMEDIATE | CHECKPOINT_FORCE | CHECKPOINT_WAIT);

View File

@ -30,7 +30,7 @@
#include "utils/syscache.h"
static void does_not_exist_skipping(ObjectType objtype,
List *objname, List *objargs);
List *objname, List *objargs);
/*
* Drop one or more objects.
@ -54,7 +54,7 @@ RemoveObjects(DropStmt *stmt)
foreach(cell1, stmt->objects)
{
ObjectAddress address;
ObjectAddress address;
List *objname = lfirst(cell1);
List *objargs = NIL;
Relation relation = NULL;
@ -97,8 +97,8 @@ RemoveObjects(DropStmt *stmt)
if (((Form_pg_proc) GETSTRUCT(tup))->proisagg)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("\"%s\" is an aggregate function",
NameListToString(objname)),
errmsg("\"%s\" is an aggregate function",
NameListToString(objname)),
errhint("Use DROP AGGREGATE to drop aggregate functions.")));
ReleaseSysCache(tup);
@ -149,7 +149,7 @@ does_not_exist_skipping(ObjectType objtype, List *objname, List *objargs)
break;
case OBJECT_CONVERSION:
msg = gettext_noop("conversion \"%s\" does not exist, skipping");
name = NameListToString(objname);
name = NameListToString(objname);
break;
case OBJECT_SCHEMA:
msg = gettext_noop("schema \"%s\" does not exist, skipping");
@ -196,9 +196,9 @@ does_not_exist_skipping(ObjectType objtype, List *objname, List *objargs)
case OBJECT_CAST:
msg = gettext_noop("cast from type %s to type %s does not exist, skipping");
name = format_type_be(typenameTypeId(NULL,
(TypeName *) linitial(objname)));
(TypeName *) linitial(objname)));
args = format_type_be(typenameTypeId(NULL,
(TypeName *) linitial(objargs)));
(TypeName *) linitial(objargs)));
break;
case OBJECT_TRIGGER:
msg = gettext_noop("trigger \"%s\" for table \"%s\" does not exist, skipping");
@ -231,7 +231,7 @@ does_not_exist_skipping(ObjectType objtype, List *objname, List *objargs)
args = strVal(linitial(objargs));
break;
default:
elog(ERROR, "unexpected object type (%d)", (int)objtype);
elog(ERROR, "unexpected object type (%d)", (int) objtype);
break;
}

View File

@ -117,7 +117,7 @@ ExplainQuery(ExplainStmt *stmt, const char *queryString,
TupOutputState *tstate;
List *rewritten;
ListCell *lc;
bool timing_set = false;
bool timing_set = false;
/* Initialize ExplainState. */
ExplainInitState(&es);
@ -169,7 +169,7 @@ ExplainQuery(ExplainStmt *stmt, const char *queryString,
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("EXPLAIN option BUFFERS requires ANALYZE")));
/* if the timing was not set explicitly, set default value */
es.timing = (timing_set) ? es.timing : es.analyze;
@ -340,9 +340,9 @@ ExplainOneUtility(Node *utilityStmt, IntoClause *into, ExplainState *es,
if (IsA(utilityStmt, CreateTableAsStmt))
{
/*
* We have to rewrite the contained SELECT and then pass it back
* to ExplainOneQuery. It's probably not really necessary to copy
* the contained parsetree another time, but let's be safe.
* We have to rewrite the contained SELECT and then pass it back to
* ExplainOneQuery. It's probably not really necessary to copy the
* contained parsetree another time, but let's be safe.
*/
CreateTableAsStmt *ctas = (CreateTableAsStmt *) utilityStmt;
List *rewritten;
@ -1021,7 +1021,7 @@ ExplainNode(PlanState *planstate, List *ancestors,
{
if (planstate->instrument->need_timer)
appendStringInfo(es->str,
" (actual time=%.3f..%.3f rows=%.0f loops=%.0f)",
" (actual time=%.3f..%.3f rows=%.0f loops=%.0f)",
startup_sec, total_sec, rows, nloops);
else
appendStringInfo(es->str,
@ -1095,7 +1095,7 @@ ExplainNode(PlanState *planstate, List *ancestors,
planstate, es);
if (es->analyze)
ExplainPropertyLong("Heap Fetches",
((IndexOnlyScanState *) planstate)->ioss_HeapFetches, es);
((IndexOnlyScanState *) planstate)->ioss_HeapFetches, es);
break;
case T_BitmapIndexScan:
show_scan_qual(((BitmapIndexScan *) plan)->indexqualorig,
@ -1237,7 +1237,7 @@ ExplainNode(PlanState *planstate, List *ancestors,
bool has_temp = (usage->temp_blks_read > 0 ||
usage->temp_blks_written > 0);
bool has_timing = (!INSTR_TIME_IS_ZERO(usage->blk_read_time) ||
!INSTR_TIME_IS_ZERO(usage->blk_write_time));
!INSTR_TIME_IS_ZERO(usage->blk_write_time));
/* Show only positive counter values. */
if (has_shared || has_local || has_temp)
@ -1301,10 +1301,10 @@ ExplainNode(PlanState *planstate, List *ancestors,
appendStringInfoString(es->str, "I/O Timings:");
if (!INSTR_TIME_IS_ZERO(usage->blk_read_time))
appendStringInfo(es->str, " read=%0.3f",
INSTR_TIME_GET_MILLISEC(usage->blk_read_time));
INSTR_TIME_GET_MILLISEC(usage->blk_read_time));
if (!INSTR_TIME_IS_ZERO(usage->blk_write_time))
appendStringInfo(es->str, " write=%0.3f",
INSTR_TIME_GET_MILLISEC(usage->blk_write_time));
INSTR_TIME_GET_MILLISEC(usage->blk_write_time));
appendStringInfoChar(es->str, '\n');
}
}

View File

@ -899,8 +899,8 @@ execute_extension_script(Oid extensionOid, ExtensionControlFile *control,
{
t_sql = DirectFunctionCall3(replace_text,
t_sql,
CStringGetTextDatum("MODULE_PATHNAME"),
CStringGetTextDatum(control->module_pathname));
CStringGetTextDatum("MODULE_PATHNAME"),
CStringGetTextDatum(control->module_pathname));
}
/* And now back to C string */
@ -1585,14 +1585,14 @@ RemoveExtensionById(Oid extId)
* might write "DROP EXTENSION foo" in foo's own script files, as because
* errors in dependency management in extension script files could give
* rise to cases where an extension is dropped as a result of recursing
* from some contained object. Because of that, we must test for the case
* from some contained object. Because of that, we must test for the case
* here, not at some higher level of the DROP EXTENSION command.
*/
if (extId == CurrentExtensionObject)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("cannot drop extension \"%s\" because it is being modified",
get_extension_name(extId))));
errmsg("cannot drop extension \"%s\" because it is being modified",
get_extension_name(extId))));
rel = heap_open(ExtensionRelationId, RowExclusiveLock);

View File

@ -166,7 +166,7 @@ transformGenericOptions(Oid catalogId,
if (OidIsValid(fdwvalidator))
{
Datum valarg = result;
Datum valarg = result;
/*
* Pass a null options list as an empty array, so that validators
@ -215,13 +215,13 @@ RenameForeignDataWrapper(const char *oldname, const char *newname)
if (!HeapTupleIsValid(tup))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("foreign-data wrapper \"%s\" does not exist", oldname)));
errmsg("foreign-data wrapper \"%s\" does not exist", oldname)));
/* make sure the new name doesn't exist */
if (SearchSysCacheExists1(FOREIGNDATAWRAPPERNAME, CStringGetDatum(newname)))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
errmsg("foreign-data wrapper \"%s\" already exists", newname)));
errmsg("foreign-data wrapper \"%s\" already exists", newname)));
/* must be owner of FDW */
if (!pg_foreign_data_wrapper_ownercheck(HeapTupleGetOid(tup), GetUserId()))
@ -364,7 +364,7 @@ AlterForeignDataWrapperOwner_oid(Oid fwdId, Oid newOwnerId)
if (!HeapTupleIsValid(tup))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("foreign-data wrapper with OID %u does not exist", fwdId)));
errmsg("foreign-data wrapper with OID %u does not exist", fwdId)));
AlterForeignDataWrapperOwner_internal(rel, tup, newOwnerId);

View File

@ -890,9 +890,9 @@ CreateFunction(CreateFunctionStmt *stmt, const char *queryString)
ReleaseSysCache(languageTuple);
/*
* Only superuser is allowed to create leakproof functions because
* it possibly allows unprivileged users to reference invisible tuples
* to be filtered out using views for row-level security.
* Only superuser is allowed to create leakproof functions because it
* possibly allows unprivileged users to reference invisible tuples to be
* filtered out using views for row-level security.
*/
if (isLeakProof && !superuser())
ereport(ERROR,
@ -1320,7 +1320,7 @@ AlterFunction(AlterFunctionStmt *stmt)
if (intVal(leakproof_item->arg) && !superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("only superuser can define a leakproof function")));
errmsg("only superuser can define a leakproof function")));
procForm->proleakproof = intVal(leakproof_item->arg);
}
if (cost_item)

View File

@ -96,7 +96,7 @@ static void RangeVarCallbackForReindexIndex(const RangeVar *relation,
* concrete benefit for core types.
* When a comparison or exclusion operator has a polymorphic input type, the
* actual input types must also match. This defends against the possibility
* actual input types must also match. This defends against the possibility
* that operators could vary behavior in response to get_fn_expr_argtype().
* At present, this hazard is theoretical: check_exclusion_constraint() and
* all core index access methods decline to set fn_expr for such calls.
@ -134,6 +134,7 @@ CheckIndexCompatible(Oid oldId,
/* Caller should already have the relation locked in some way. */
relationId = RangeVarGetRelid(heapRelation, NoLock, false);
/*
* We can pretend isconstraint = false unconditionally. It only serves to
* decide the text of an error message that should never happen for us.
@ -157,10 +158,10 @@ CheckIndexCompatible(Oid oldId,
ReleaseSysCache(tuple);
/*
* Compute the operator classes, collations, and exclusion operators
* for the new index, so we can test whether it's compatible with the
* existing one. Note that ComputeIndexAttrs might fail here, but that's
* OK: DefineIndex would have called this function with the same arguments
* Compute the operator classes, collations, and exclusion operators for
* the new index, so we can test whether it's compatible with the existing
* one. Note that ComputeIndexAttrs might fail here, but that's OK:
* DefineIndex would have called this function with the same arguments
* later on, and it would have failed then anyway.
*/
indexInfo = makeNode(IndexInfo);
@ -218,11 +219,11 @@ CheckIndexCompatible(Oid oldId,
return false;
/* For polymorphic opcintype, column type changes break compatibility. */
irel = index_open(oldId, AccessShareLock); /* caller probably has a lock */
irel = index_open(oldId, AccessShareLock); /* caller probably has a lock */
for (i = 0; i < old_natts; i++)
{
if (IsPolymorphicType(get_opclass_input_type(classObjectId[i])) &&
irel->rd_att->attrs[i]->atttypid != typeObjectId[i])
irel->rd_att->attrs[i]->atttypid != typeObjectId[i])
{
ret = false;
break;
@ -232,7 +233,8 @@ CheckIndexCompatible(Oid oldId,
/* Any change in exclusion operator selections breaks compatibility. */
if (ret && indexInfo->ii_ExclusionOps != NULL)
{
Oid *old_operators, *old_procs;
Oid *old_operators,
*old_procs;
uint16 *old_strats;
RelationGetExclusionInfo(irel, &old_operators, &old_procs, &old_strats);
@ -249,7 +251,7 @@ CheckIndexCompatible(Oid oldId,
op_input_types(indexInfo->ii_ExclusionOps[i], &left, &right);
if ((IsPolymorphicType(left) || IsPolymorphicType(right)) &&
irel->rd_att->attrs[i]->atttypid != typeObjectId[i])
irel->rd_att->attrs[i]->atttypid != typeObjectId[i])
{
ret = false;
break;
@ -1778,9 +1780,9 @@ RangeVarCallbackForReindexIndex(const RangeVar *relation,
return;
/*
* If the relation does exist, check whether it's an index. But note
* that the relation might have been dropped between the time we did the
* name lookup and now. In that case, there's nothing to do.
* If the relation does exist, check whether it's an index. But note that
* the relation might have been dropped between the time we did the name
* lookup and now. In that case, there's nothing to do.
*/
relkind = get_rel_relkind(relId);
if (!relkind)
@ -1798,9 +1800,9 @@ RangeVarCallbackForReindexIndex(const RangeVar *relation,
if (relId != oldRelId)
{
/*
* Lock level here should match reindex_index() heap lock.
* If the OID isn't valid, it means the index as concurrently dropped,
* which is not a problem for us; just return normally.
* Lock level here should match reindex_index() heap lock. If the OID
* isn't valid, it means the index as concurrently dropped, which is
* not a problem for us; just return normally.
*/
*heapOid = IndexGetRelation(relId, true);
if (OidIsValid(*heapOid))

View File

@ -40,9 +40,9 @@ LockTableCommand(LockStmt *lockstmt)
/*
* During recovery we only accept these variations: LOCK TABLE foo IN
* ACCESS SHARE MODE LOCK TABLE foo IN ROW SHARE MODE LOCK TABLE foo
* IN ROW EXCLUSIVE MODE This test must match the restrictions defined
* in LockAcquire()
* ACCESS SHARE MODE LOCK TABLE foo IN ROW SHARE MODE LOCK TABLE foo IN
* ROW EXCLUSIVE MODE This test must match the restrictions defined in
* LockAcquire()
*/
if (lockstmt->mode > RowExclusiveLock)
PreventCommandDuringRecovery("LOCK TABLE");
@ -74,15 +74,16 @@ static void
RangeVarCallbackForLockTable(const RangeVar *rv, Oid relid, Oid oldrelid,
void *arg)
{
LOCKMODE lockmode = * (LOCKMODE *) arg;
LOCKMODE lockmode = *(LOCKMODE *) arg;
char relkind;
AclResult aclresult;
if (!OidIsValid(relid))
return; /* doesn't exist, so no permissions check */
return; /* doesn't exist, so no permissions check */
relkind = get_rel_relkind(relid);
if (!relkind)
return; /* woops, concurrently dropped; no permissions check */
return; /* woops, concurrently dropped; no permissions
* check */
/* Currently, we only allow plain tables to be locked */
if (relkind != RELKIND_RELATION)
@ -122,9 +123,10 @@ LockTableRecurse(Oid reloid, LOCKMODE lockmode, bool nowait)
if (aclresult != ACLCHECK_OK)
{
char *relname = get_rel_name(childreloid);
if (!relname)
continue; /* child concurrently dropped, just skip it */
aclcheck_error(aclresult, ACL_KIND_CLASS, relname);
continue; /* child concurrently dropped, just skip it */
aclcheck_error(aclresult, ACL_KIND_CLASS, relname);
}
/* We have enough rights to lock the relation; do so. */
@ -134,17 +136,18 @@ LockTableRecurse(Oid reloid, LOCKMODE lockmode, bool nowait)
{
/* try to throw error by name; relation could be deleted... */
char *relname = get_rel_name(childreloid);
if (!relname)
continue; /* child concurrently dropped, just skip it */
continue; /* child concurrently dropped, just skip it */
ereport(ERROR,
(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
errmsg("could not obtain lock on relation \"%s\"",
relname)));
relname)));
}
/*
* Even if we got the lock, child might have been concurrently dropped.
* If so, we can skip it.
* Even if we got the lock, child might have been concurrently
* dropped. If so, we can skip it.
*/
if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(childreloid)))
{

View File

@ -1167,7 +1167,7 @@ assignProcTypes(OpFamilyMember *member, Oid amoid, Oid typeoid)
if (procform->prorettype != INT4OID)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
errmsg("btree comparison procedures must return integer")));
errmsg("btree comparison procedures must return integer")));
/*
* If lefttype/righttype isn't specified, use the proc's input
@ -1188,7 +1188,7 @@ assignProcTypes(OpFamilyMember *member, Oid amoid, Oid typeoid)
if (procform->prorettype != VOIDOID)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
errmsg("btree sort support procedures must return void")));
errmsg("btree sort support procedures must return void")));
/*
* Can't infer lefttype/righttype from proc, so use default rule
@ -1217,7 +1217,7 @@ assignProcTypes(OpFamilyMember *member, Oid amoid, Oid typeoid)
/*
* The default in CREATE OPERATOR CLASS is to use the class' opcintype as
* lefttype and righttype. In CREATE or ALTER OPERATOR FAMILY, opcintype
* lefttype and righttype. In CREATE or ALTER OPERATOR FAMILY, opcintype
* isn't available, so make the user specify the types.
*/
if (!OidIsValid(member->lefttype))

View File

@ -174,7 +174,7 @@ PrepareQuery(PrepareStmt *stmt, const char *queryString)
* ExecuteQuery --- implement the 'EXECUTE' utility statement.
*
* This code also supports CREATE TABLE ... AS EXECUTE. That case is
* indicated by passing a non-null intoClause. The DestReceiver is already
* indicated by passing a non-null intoClause. The DestReceiver is already
* set up correctly for CREATE TABLE AS, but we still have to make a few
* other adjustments here.
*
@ -211,7 +211,7 @@ ExecuteQuery(ExecuteStmt *stmt, IntoClause *intoClause,
{
/*
* Need an EState to evaluate parameters; must not delete it till end
* of query, in case parameters are pass-by-reference. Note that the
* of query, in case parameters are pass-by-reference. Note that the
* passed-in "params" could possibly be referenced in the parameter
* expressions.
*/
@ -237,15 +237,15 @@ ExecuteQuery(ExecuteStmt *stmt, IntoClause *intoClause,
/*
* For CREATE TABLE ... AS EXECUTE, we must verify that the prepared
* statement is one that produces tuples. Currently we insist that it be
* a plain old SELECT. In future we might consider supporting other
* a plain old SELECT. In future we might consider supporting other
* things such as INSERT ... RETURNING, but there are a couple of issues
* to be settled first, notably how WITH NO DATA should be handled in such
* a case (do we really want to suppress execution?) and how to pass down
* the OID-determining eflags (PortalStart won't handle them in such a
* case, and for that matter it's not clear the executor will either).
*
* For CREATE TABLE ... AS EXECUTE, we also have to ensure that the
* proper eflags and fetch count are passed to PortalStart/PortalRun.
* For CREATE TABLE ... AS EXECUTE, we also have to ensure that the proper
* eflags and fetch count are passed to PortalStart/PortalRun.
*/
if (intoClause)
{
@ -658,7 +658,7 @@ ExplainExecuteQuery(ExecuteStmt *execstmt, IntoClause *into, ExplainState *es,
{
/*
* Need an EState to evaluate parameters; must not delete it till end
* of query, in case parameters are pass-by-reference. Note that the
* of query, in case parameters are pass-by-reference. Note that the
* passed-in "params" could possibly be referenced in the parameter
* expressions.
*/

View File

@ -133,7 +133,7 @@ CreateProceduralLanguage(CreatePLangStmt *stmt)
false, /* isAgg */
false, /* isWindowFunc */
false, /* security_definer */
false, /* isLeakProof */
false, /* isLeakProof */
false, /* isStrict */
PROVOLATILE_VOLATILE,
buildoidvector(funcargtypes, 0),
@ -210,7 +210,7 @@ CreateProceduralLanguage(CreatePLangStmt *stmt)
false, /* isAgg */
false, /* isWindowFunc */
false, /* security_definer */
false, /* isLeakProof */
false, /* isLeakProof */
true, /* isStrict */
PROVOLATILE_VOLATILE,
buildoidvector(funcargtypes, 1),

View File

@ -237,7 +237,7 @@ GetSecurityLabel(const ObjectAddress *object, const char *provider)
return seclabel;
}
/*
/*
* SetSharedSecurityLabel is a helper function of SetSecurityLabel to
* handle shared database objects.
*/
@ -246,8 +246,8 @@ SetSharedSecurityLabel(const ObjectAddress *object,
const char *provider, const char *label)
{
Relation pg_shseclabel;
ScanKeyData keys[4];
SysScanDesc scan;
ScanKeyData keys[4];
SysScanDesc scan;
HeapTuple oldtup;
HeapTuple newtup = NULL;
Datum values[Natts_pg_shseclabel];
@ -414,8 +414,8 @@ void
DeleteSharedSecurityLabel(Oid objectId, Oid classId)
{
Relation pg_shseclabel;
ScanKeyData skey[2];
SysScanDesc scan;
ScanKeyData skey[2];
SysScanDesc scan;
HeapTuple oldtup;
ScanKeyInit(&skey[0],

Some files were not shown because too many files have changed in this diff Show More