pgindent run for 8.3.

This commit is contained in:
Bruce Momjian 2007-11-15 21:14:46 +00:00
parent 3adc760fb9
commit fdf5a5efb7
486 changed files with 10044 additions and 9664 deletions

View File

@ -1,5 +1,5 @@
/******************************************************************************
$PostgreSQL: pgsql/contrib/cube/cube.c,v 1.33 2007/06/05 21:31:03 tgl Exp $
$PostgreSQL: pgsql/contrib/cube/cube.c,v 1.34 2007/11/15 21:14:29 momjian Exp $
This file contains routines that can be bound to a Postgres backend and
called by the backend in the process of processing queries. The calling
@ -306,7 +306,7 @@ cube_subset(PG_FUNCTION_ARGS)
result->x[i + dim] = c->x[dx[i] + c->dim - 1];
}
PG_FREE_IF_COPY(c,0);
PG_FREE_IF_COPY(c, 0);
PG_RETURN_NDBOX(result);
}
@ -360,7 +360,7 @@ cube_out(PG_FUNCTION_ARGS)
appendStringInfoChar(&buf, ')');
}
PG_FREE_IF_COPY(cube,0);
PG_FREE_IF_COPY(cube, 0);
PG_RETURN_CSTRING(buf.data);
}
@ -381,20 +381,20 @@ g_cube_consistent(PG_FUNCTION_ARGS)
GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0);
NDBOX *query = PG_GETARG_NDBOX(1);
StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2);
bool res;
bool res;
/*
* if entry is not leaf, use g_cube_internal_consistent, else use
* g_cube_leaf_consistent
*/
if (GIST_LEAF(entry))
res = g_cube_leaf_consistent( DatumGetNDBOX(entry->key),
query, strategy);
res = g_cube_leaf_consistent(DatumGetNDBOX(entry->key),
query, strategy);
else
res = g_cube_internal_consistent( DatumGetNDBOX(entry->key),
query, strategy);
res = g_cube_internal_consistent(DatumGetNDBOX(entry->key),
query, strategy);
PG_FREE_IF_COPY(query,1);
PG_FREE_IF_COPY(query, 1);
PG_RETURN_BOOL(res);
}
@ -451,14 +451,15 @@ Datum
g_cube_decompress(PG_FUNCTION_ARGS)
{
GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0);
NDBOX *key = DatumGetNDBOX(PG_DETOAST_DATUM(entry->key));
NDBOX *key = DatumGetNDBOX(PG_DETOAST_DATUM(entry->key));
if (key != DatumGetNDBOX(entry->key))
{
GISTENTRY *retval = (GISTENTRY *) palloc(sizeof(GISTENTRY));
gistentryinit(*retval, PointerGetDatum(key),
entry->rel, entry->page,
entry->offset, FALSE);
entry->rel, entry->page,
entry->offset, FALSE);
PG_RETURN_POINTER(retval);
}
PG_RETURN_POINTER(entry);
@ -479,8 +480,8 @@ g_cube_penalty(PG_FUNCTION_ARGS)
double tmp1,
tmp2;
ud = cube_union_v0( DatumGetNDBOX(origentry->key),
DatumGetNDBOX(newentry->key));
ud = cube_union_v0(DatumGetNDBOX(origentry->key),
DatumGetNDBOX(newentry->key));
rt_cube_size(ud, &tmp1);
rt_cube_size(DatumGetNDBOX(origentry->key), &tmp2);
*result = (float) (tmp1 - tmp2);
@ -812,12 +813,12 @@ cube_union(PG_FUNCTION_ARGS)
{
NDBOX *a = PG_GETARG_NDBOX(0),
*b = PG_GETARG_NDBOX(1);
NDBOX *res;
NDBOX *res;
res = cube_union_v0(a, b);
PG_FREE_IF_COPY(a,0);
PG_FREE_IF_COPY(b,1);
PG_FREE_IF_COPY(a, 0);
PG_FREE_IF_COPY(b, 1);
PG_RETURN_NDBOX(res);
}
@ -876,8 +877,9 @@ cube_inter(PG_FUNCTION_ARGS)
a->x[i + a->dim]), result->x[i + a->dim]);
}
PG_FREE_IF_COPY(a,0);
PG_FREE_IF_COPY(b,1);
PG_FREE_IF_COPY(a, 0);
PG_FREE_IF_COPY(b, 1);
/*
* Is it OK to return a non-null intersection for non-overlapping boxes?
*/
@ -899,7 +901,7 @@ cube_size(PG_FUNCTION_ARGS)
for (i = 0, j = a->dim; i < a->dim; i++, j++)
result = result * Abs((a->x[j] - a->x[i]));
PG_FREE_IF_COPY(a,0);
PG_FREE_IF_COPY(a, 0);
PG_RETURN_FLOAT8(result);
}
@ -1011,8 +1013,8 @@ cube_cmp(PG_FUNCTION_ARGS)
res = cube_cmp_v0(a, b);
PG_FREE_IF_COPY(a,0);
PG_FREE_IF_COPY(b,1);
PG_FREE_IF_COPY(a, 0);
PG_FREE_IF_COPY(b, 1);
PG_RETURN_INT32(res);
}
@ -1026,8 +1028,8 @@ cube_eq(PG_FUNCTION_ARGS)
res = cube_cmp_v0(a, b);
PG_FREE_IF_COPY(a,0);
PG_FREE_IF_COPY(b,1);
PG_FREE_IF_COPY(a, 0);
PG_FREE_IF_COPY(b, 1);
PG_RETURN_BOOL(res == 0);
}
@ -1041,8 +1043,8 @@ cube_ne(PG_FUNCTION_ARGS)
res = cube_cmp_v0(a, b);
PG_FREE_IF_COPY(a,0);
PG_FREE_IF_COPY(b,1);
PG_FREE_IF_COPY(a, 0);
PG_FREE_IF_COPY(b, 1);
PG_RETURN_BOOL(res != 0);
}
@ -1056,8 +1058,8 @@ cube_lt(PG_FUNCTION_ARGS)
res = cube_cmp_v0(a, b);
PG_FREE_IF_COPY(a,0);
PG_FREE_IF_COPY(b,1);
PG_FREE_IF_COPY(a, 0);
PG_FREE_IF_COPY(b, 1);
PG_RETURN_BOOL(res < 0);
}
@ -1071,8 +1073,8 @@ cube_gt(PG_FUNCTION_ARGS)
res = cube_cmp_v0(a, b);
PG_FREE_IF_COPY(a,0);
PG_FREE_IF_COPY(b,1);
PG_FREE_IF_COPY(a, 0);
PG_FREE_IF_COPY(b, 1);
PG_RETURN_BOOL(res > 0);
}
@ -1086,8 +1088,8 @@ cube_le(PG_FUNCTION_ARGS)
res = cube_cmp_v0(a, b);
PG_FREE_IF_COPY(a,0);
PG_FREE_IF_COPY(b,1);
PG_FREE_IF_COPY(a, 0);
PG_FREE_IF_COPY(b, 1);
PG_RETURN_BOOL(res <= 0);
}
@ -1101,8 +1103,8 @@ cube_ge(PG_FUNCTION_ARGS)
res = cube_cmp_v0(a, b);
PG_FREE_IF_COPY(a,0);
PG_FREE_IF_COPY(b,1);
PG_FREE_IF_COPY(a, 0);
PG_FREE_IF_COPY(b, 1);
PG_RETURN_BOOL(res >= 0);
}
@ -1157,8 +1159,8 @@ cube_contains(PG_FUNCTION_ARGS)
res = cube_contains_v0(a, b);
PG_FREE_IF_COPY(a,0);
PG_FREE_IF_COPY(b,1);
PG_FREE_IF_COPY(a, 0);
PG_FREE_IF_COPY(b, 1);
PG_RETURN_BOOL(res);
}
@ -1173,8 +1175,8 @@ cube_contained(PG_FUNCTION_ARGS)
res = cube_contains_v0(b, a);
PG_FREE_IF_COPY(a,0);
PG_FREE_IF_COPY(b,1);
PG_FREE_IF_COPY(a, 0);
PG_FREE_IF_COPY(b, 1);
PG_RETURN_BOOL(res);
}
@ -1234,8 +1236,8 @@ cube_overlap(PG_FUNCTION_ARGS)
res = cube_overlap_v0(a, b);
PG_FREE_IF_COPY(a,0);
PG_FREE_IF_COPY(b,1);
PG_FREE_IF_COPY(a, 0);
PG_FREE_IF_COPY(b, 1);
PG_RETURN_BOOL(res);
}
@ -1281,8 +1283,8 @@ cube_distance(PG_FUNCTION_ARGS)
distance += d * d;
}
PG_FREE_IF_COPY(a,0);
PG_FREE_IF_COPY(b,1);
PG_FREE_IF_COPY(a, 0);
PG_FREE_IF_COPY(b, 1);
PG_RETURN_FLOAT8(sqrt(distance));
}
@ -1317,7 +1319,7 @@ cube_is_point(PG_FUNCTION_ARGS)
PG_RETURN_BOOL(FALSE);
}
PG_FREE_IF_COPY(a,0);
PG_FREE_IF_COPY(a, 0);
PG_RETURN_BOOL(TRUE);
}
@ -1331,7 +1333,7 @@ cube_dim(PG_FUNCTION_ARGS)
c = PG_GETARG_NDBOX(0);
dim = c->dim;
PG_FREE_IF_COPY(c,0);
PG_FREE_IF_COPY(c, 0);
PG_RETURN_INT32(c->dim);
}
@ -1350,7 +1352,7 @@ cube_ll_coord(PG_FUNCTION_ARGS)
if (c->dim >= n && n > 0)
result = Min(c->x[n - 1], c->x[c->dim + n - 1]);
PG_FREE_IF_COPY(c,0);
PG_FREE_IF_COPY(c, 0);
PG_RETURN_FLOAT8(result);
}
@ -1369,7 +1371,7 @@ cube_ur_coord(PG_FUNCTION_ARGS)
if (c->dim >= n && n > 0)
result = Max(c->x[n - 1], c->x[c->dim + n - 1]);
PG_FREE_IF_COPY(c,0);
PG_FREE_IF_COPY(c, 0);
PG_RETURN_FLOAT8(result);
}
@ -1384,7 +1386,7 @@ cube_enlarge(PG_FUNCTION_ARGS)
j,
k;
NDBOX *a;
double r;
double r;
int4 n;
a = PG_GETARG_NDBOX(0);
@ -1426,7 +1428,7 @@ cube_enlarge(PG_FUNCTION_ARGS)
result->x[j] = r;
}
PG_FREE_IF_COPY(a,0);
PG_FREE_IF_COPY(a, 0);
PG_RETURN_NDBOX(result);
}
@ -1490,7 +1492,7 @@ cube_c_f8(PG_FUNCTION_ARGS)
result->x[result->dim - 1] = x;
result->x[2 * result->dim - 1] = x;
PG_FREE_IF_COPY(c,0);
PG_FREE_IF_COPY(c, 0);
PG_RETURN_NDBOX(result);
}
@ -1521,6 +1523,6 @@ cube_c_f8_f8(PG_FUNCTION_ARGS)
result->x[result->dim - 1] = x1;
result->x[2 * result->dim - 1] = x2;
PG_FREE_IF_COPY(c,0);
PG_FREE_IF_COPY(c, 0);
PG_RETURN_NDBOX(result);
}

View File

@ -8,7 +8,7 @@
* Darko Prenosil <Darko.Prenosil@finteh.hr>
* Shridhar Daithankar <shridhar_daithankar@persistent.co.in>
*
* $PostgreSQL: pgsql/contrib/dblink/dblink.c,v 1.65 2007/08/27 01:24:50 tgl Exp $
* $PostgreSQL: pgsql/contrib/dblink/dblink.c,v 1.66 2007/11/15 21:14:29 momjian Exp $
* Copyright (c) 2001-2007, PostgreSQL Global Development Group
* ALL RIGHTS RESERVED;
*
@ -256,10 +256,10 @@ dblink_connect(PG_FUNCTION_ARGS)
pfree(rconn);
ereport(ERROR,
(errcode(ERRCODE_S_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED),
errmsg("password is required"),
errdetail("Non-superuser cannot connect if the server does not request a password."),
errhint("Target server's authentication method must be changed.")));
(errcode(ERRCODE_S_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED),
errmsg("password is required"),
errdetail("Non-superuser cannot connect if the server does not request a password."),
errhint("Target server's authentication method must be changed.")));
}
}

View File

@ -6,7 +6,7 @@
* Copyright (c) 2007, PostgreSQL Global Development Group
*
* IDENTIFICATION
* $PostgreSQL: pgsql/contrib/dict_int/dict_int.c,v 1.1 2007/10/15 21:36:50 tgl Exp $
* $PostgreSQL: pgsql/contrib/dict_int/dict_int.c,v 1.2 2007/11/15 21:14:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -19,24 +19,25 @@
PG_MODULE_MAGIC;
typedef struct {
int maxlen;
bool rejectlong;
} DictInt;
typedef struct
{
int maxlen;
bool rejectlong;
} DictInt;
PG_FUNCTION_INFO_V1(dintdict_init);
Datum dintdict_init(PG_FUNCTION_ARGS);
Datum dintdict_init(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(dintdict_lexize);
Datum dintdict_lexize(PG_FUNCTION_ARGS);
Datum dintdict_lexize(PG_FUNCTION_ARGS);
Datum
dintdict_init(PG_FUNCTION_ARGS)
{
List *dictoptions = (List *) PG_GETARG_POINTER(0);
DictInt *d;
ListCell *l;
List *dictoptions = (List *) PG_GETARG_POINTER(0);
DictInt *d;
ListCell *l;
d = (DictInt *) palloc0(sizeof(DictInt));
d->maxlen = 6;
@ -44,7 +45,7 @@ dintdict_init(PG_FUNCTION_ARGS)
foreach(l, dictoptions)
{
DefElem *defel = (DefElem *) lfirst(l);
DefElem *defel = (DefElem *) lfirst(l);
if (pg_strcasecmp(defel->defname, "MAXLEN") == 0)
{
@ -62,22 +63,22 @@ dintdict_init(PG_FUNCTION_ARGS)
defel->defname)));
}
}
PG_RETURN_POINTER(d);
}
Datum
dintdict_lexize(PG_FUNCTION_ARGS)
{
DictInt *d = (DictInt*)PG_GETARG_POINTER(0);
char *in = (char*)PG_GETARG_POINTER(1);
char *txt = pnstrdup(in, PG_GETARG_INT32(2));
TSLexeme *res=palloc(sizeof(TSLexeme)*2);
DictInt *d = (DictInt *) PG_GETARG_POINTER(0);
char *in = (char *) PG_GETARG_POINTER(1);
char *txt = pnstrdup(in, PG_GETARG_INT32(2));
TSLexeme *res = palloc(sizeof(TSLexeme) * 2);
res[1].lexeme = NULL;
if (PG_GETARG_INT32(2) > d->maxlen)
if (PG_GETARG_INT32(2) > d->maxlen)
{
if ( d->rejectlong )
if (d->rejectlong)
{
/* reject by returning void array */
pfree(txt);

View File

@ -6,7 +6,7 @@
* Copyright (c) 2007, PostgreSQL Global Development Group
*
* IDENTIFICATION
* $PostgreSQL: pgsql/contrib/dict_xsyn/dict_xsyn.c,v 1.1 2007/10/15 21:36:50 tgl Exp $
* $PostgreSQL: pgsql/contrib/dict_xsyn/dict_xsyn.c,v 1.2 2007/11/15 21:14:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -24,29 +24,30 @@ PG_MODULE_MAGIC;
typedef struct
{
char *key; /* Word */
char *value; /* Unparsed list of synonyms, including the word itself */
char *key; /* Word */
char *value; /* Unparsed list of synonyms, including the
* word itself */
} Syn;
typedef struct
{
int len;
Syn *syn;
int len;
Syn *syn;
bool keeporig;
bool keeporig;
} DictSyn;
PG_FUNCTION_INFO_V1(dxsyn_init);
Datum dxsyn_init(PG_FUNCTION_ARGS);
Datum dxsyn_init(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(dxsyn_lexize);
Datum dxsyn_lexize(PG_FUNCTION_ARGS);
Datum dxsyn_lexize(PG_FUNCTION_ARGS);
static char *
find_word(char *in, char **end)
{
char *start;
char *start;
*end = NULL;
while (*in && t_isspace(in))
@ -71,12 +72,12 @@ compare_syn(const void *a, const void *b)
}
static void
read_dictionary(DictSyn *d, char *filename)
read_dictionary(DictSyn * d, char *filename)
{
char *real_filename = get_tsearch_config_filename(filename, "rules");
FILE *fin;
char *line;
int cur = 0;
char *real_filename = get_tsearch_config_filename(filename, "rules");
FILE *fin;
char *line;
int cur = 0;
if ((fin = AllocateFile(real_filename, "r")) == NULL)
ereport(ERROR,
@ -86,9 +87,9 @@ read_dictionary(DictSyn *d, char *filename)
while ((line = t_readline(fin)) != NULL)
{
char *value;
char *key;
char *end = NULL;
char *value;
char *key;
char *end = NULL;
if (*line == '\0')
continue;
@ -130,9 +131,9 @@ read_dictionary(DictSyn *d, char *filename)
Datum
dxsyn_init(PG_FUNCTION_ARGS)
{
List *dictoptions = (List *) PG_GETARG_POINTER(0);
DictSyn *d;
ListCell *l;
List *dictoptions = (List *) PG_GETARG_POINTER(0);
DictSyn *d;
ListCell *l;
d = (DictSyn *) palloc0(sizeof(DictSyn));
d->len = 0;
@ -141,7 +142,7 @@ dxsyn_init(PG_FUNCTION_ARGS)
foreach(l, dictoptions)
{
DefElem *defel = (DefElem *) lfirst(l);
DefElem *defel = (DefElem *) lfirst(l);
if (pg_strcasecmp(defel->defname, "KEEPORIG") == 0)
{
@ -166,19 +167,19 @@ dxsyn_init(PG_FUNCTION_ARGS)
Datum
dxsyn_lexize(PG_FUNCTION_ARGS)
{
DictSyn *d = (DictSyn *) PG_GETARG_POINTER(0);
char *in = (char *) PG_GETARG_POINTER(1);
int length = PG_GETARG_INT32(2);
Syn word;
Syn *found;
TSLexeme *res = NULL;
DictSyn *d = (DictSyn *) PG_GETARG_POINTER(0);
char *in = (char *) PG_GETARG_POINTER(1);
int length = PG_GETARG_INT32(2);
Syn word;
Syn *found;
TSLexeme *res = NULL;
if (!length || d->len == 0)
PG_RETURN_POINTER(NULL);
/* Create search pattern */
{
char *temp = pnstrdup(in, length);
char *temp = pnstrdup(in, length);
word.key = lowerstr(temp);
pfree(temp);
@ -186,7 +187,7 @@ dxsyn_lexize(PG_FUNCTION_ARGS)
}
/* Look for matching syn */
found = (Syn *)bsearch(&word, d->syn, d->len, sizeof(Syn), compare_syn);
found = (Syn *) bsearch(&word, d->syn, d->len, sizeof(Syn), compare_syn);
pfree(word.key);
if (!found)
@ -194,28 +195,28 @@ dxsyn_lexize(PG_FUNCTION_ARGS)
/* Parse string of synonyms and return array of words */
{
char *value = pstrdup(found->value);
int value_length = strlen(value);
char *pos = value;
int nsyns = 0;
bool is_first = true;
char *value = pstrdup(found->value);
int value_length = strlen(value);
char *pos = value;
int nsyns = 0;
bool is_first = true;
res = palloc(0);
while(pos < value + value_length)
while (pos < value + value_length)
{
char *end;
char *syn = find_word(pos, &end);
char *end;
char *syn = find_word(pos, &end);
if (!syn)
break;
*end = '\0';
res = repalloc(res, sizeof(TSLexeme)*(nsyns + 2));
res = repalloc(res, sizeof(TSLexeme) * (nsyns + 2));
res[nsyns].lexeme = NULL;
/* first word is added to result only if KEEPORIG flag is set */
if(d->keeporig || !is_first)
if (d->keeporig || !is_first)
{
res[nsyns].lexeme = pstrdup(syn);
res[nsyns + 1].lexeme = NULL;

View File

@ -50,7 +50,7 @@ typedef struct
int comparePairs(const void *a, const void *b);
int uniquePairs(Pairs * a, int4 l, int4 *buflen);
#define HStoreContainsStrategyNumber 7
#define HStoreExistsStrategyNumber 9
#define HStoreContainsStrategyNumber 7
#define HStoreExistsStrategyNumber 9
#endif

View File

@ -1,24 +1,24 @@
#include "hstore.h"
#include "access/gin.h"
#include "access/gin.h"
#define KEYFLAG 'K'
#define VALFLAG 'V'
#define NULLFLAG 'N'
#define KEYFLAG 'K'
#define VALFLAG 'V'
#define NULLFLAG 'N'
PG_FUNCTION_INFO_V1(gin_extract_hstore);
Datum gin_extract_hstore(PG_FUNCTION_ARGS);
Datum gin_extract_hstore(PG_FUNCTION_ARGS);
static text*
makeitem( char *str, int len )
static text *
makeitem(char *str, int len)
{
text *item;
text *item;
item = (text*)palloc( VARHDRSZ + len + 1 );
item = (text *) palloc(VARHDRSZ + len + 1);
SET_VARSIZE(item, VARHDRSZ + len + 1);
if ( str && len > 0 )
memcpy( VARDATA(item)+1, str, len );
if (str && len > 0)
memcpy(VARDATA(item) + 1, str, len);
return item;
}
@ -26,37 +26,37 @@ makeitem( char *str, int len )
Datum
gin_extract_hstore(PG_FUNCTION_ARGS)
{
HStore *hs = PG_GETARG_HS(0);
int32 *nentries = (int32 *) PG_GETARG_POINTER(1);
Datum *entries = NULL;
HStore *hs = PG_GETARG_HS(0);
int32 *nentries = (int32 *) PG_GETARG_POINTER(1);
Datum *entries = NULL;
*nentries = 2*hs->size;
*nentries = 2 * hs->size;
if ( hs->size > 0 )
if (hs->size > 0)
{
HEntry *ptr = ARRPTR(hs);
char *words = STRPTR(hs);
int i=0;
HEntry *ptr = ARRPTR(hs);
char *words = STRPTR(hs);
int i = 0;
entries = (Datum*)palloc( sizeof(Datum) * 2 * hs->size );
entries = (Datum *) palloc(sizeof(Datum) * 2 * hs->size);
while (ptr - ARRPTR(hs) < hs->size)
{
text *item;
text *item;
item = makeitem( words + ptr->pos, ptr->keylen );
item = makeitem(words + ptr->pos, ptr->keylen);
*VARDATA(item) = KEYFLAG;
entries[i++] = PointerGetDatum(item);
if ( ptr->valisnull )
if (ptr->valisnull)
{
item = makeitem( NULL, 0 );
item = makeitem(NULL, 0);
*VARDATA(item) = NULLFLAG;
}
else
{
item = makeitem( words + ptr->pos + ptr->keylen, ptr->vallen );
item = makeitem(words + ptr->pos + ptr->keylen, ptr->vallen);
*VARDATA(item) = VALFLAG;
}
entries[i++] = PointerGetDatum(item);
@ -65,36 +65,37 @@ gin_extract_hstore(PG_FUNCTION_ARGS)
}
}
PG_FREE_IF_COPY(hs,0);
PG_FREE_IF_COPY(hs, 0);
PG_RETURN_POINTER(entries);
}
PG_FUNCTION_INFO_V1(gin_extract_hstore_query);
Datum gin_extract_hstore_query(PG_FUNCTION_ARGS);
Datum gin_extract_hstore_query(PG_FUNCTION_ARGS);
Datum
gin_extract_hstore_query(PG_FUNCTION_ARGS)
{
StrategyNumber strategy = PG_GETARG_UINT16(2);
if ( strategy == HStoreContainsStrategyNumber )
if (strategy == HStoreContainsStrategyNumber)
{
PG_RETURN_DATUM( DirectFunctionCall2(
gin_extract_hstore,
PG_GETARG_DATUM(0),
PG_GETARG_DATUM(1)
));
PG_RETURN_DATUM(DirectFunctionCall2(
gin_extract_hstore,
PG_GETARG_DATUM(0),
PG_GETARG_DATUM(1)
));
}
else if ( strategy == HStoreExistsStrategyNumber )
else if (strategy == HStoreExistsStrategyNumber)
{
text *item, *q = PG_GETARG_TEXT_P(0);
int32 *nentries = (int32 *) PG_GETARG_POINTER(1);
Datum *entries = NULL;
text *item,
*q = PG_GETARG_TEXT_P(0);
int32 *nentries = (int32 *) PG_GETARG_POINTER(1);
Datum *entries = NULL;
*nentries = 1;
entries = (Datum*)palloc( sizeof(Datum) );
entries = (Datum *) palloc(sizeof(Datum));
item = makeitem( VARDATA(q), VARSIZE(q)-VARHDRSZ );
item = makeitem(VARDATA(q), VARSIZE(q) - VARHDRSZ);
*VARDATA(item) = KEYFLAG;
entries[0] = PointerGetDatum(item);
@ -107,29 +108,28 @@ gin_extract_hstore_query(PG_FUNCTION_ARGS)
}
PG_FUNCTION_INFO_V1(gin_consistent_hstore);
Datum gin_consistent_hstore(PG_FUNCTION_ARGS);
Datum gin_consistent_hstore(PG_FUNCTION_ARGS);
Datum
gin_consistent_hstore(PG_FUNCTION_ARGS)
{
StrategyNumber strategy = PG_GETARG_UINT16(1);
bool res = true;
bool res = true;
if ( strategy == HStoreContainsStrategyNumber )
if (strategy == HStoreContainsStrategyNumber)
{
bool *check = (bool *) PG_GETARG_POINTER(0);
HStore *query = PG_GETARG_HS(2);
int i;
bool *check = (bool *) PG_GETARG_POINTER(0);
HStore *query = PG_GETARG_HS(2);
int i;
for(i=0;res && i<2*query->size;i++)
if ( check[i] == false )
for (i = 0; res && i < 2 * query->size; i++)
if (check[i] == false)
res = false;
}
else if ( strategy == HStoreExistsStrategyNumber )
else if (strategy == HStoreExistsStrategyNumber)
res = true;
else
elog(ERROR, "Unsupported strategy number: %d", strategy);
PG_RETURN_BOOL(res);
}

View File

@ -275,13 +275,13 @@ tconvert(PG_FUNCTION_ARGS)
int len;
HStore *out;
if ( PG_ARGISNULL(0) )
if (PG_ARGISNULL(0))
PG_RETURN_NULL();
key = PG_GETARG_TEXT_P(0);
if ( PG_ARGISNULL(1) )
len = CALCDATASIZE(1, VARSIZE(key) );
if (PG_ARGISNULL(1))
len = CALCDATASIZE(1, VARSIZE(key));
else
{
val = PG_GETARG_TEXT_P(1);
@ -292,7 +292,7 @@ tconvert(PG_FUNCTION_ARGS)
out->size = 1;
ARRPTR(out)->keylen = VARSIZE(key) - VARHDRSZ;
if ( PG_ARGISNULL(1) )
if (PG_ARGISNULL(1))
{
ARRPTR(out)->vallen = 0;
ARRPTR(out)->valisnull = true;
@ -537,18 +537,18 @@ hs_contains(PG_FUNCTION_ARGS)
if (entry)
{
if ( te->valisnull || entry->valisnull )
if (te->valisnull || entry->valisnull)
{
if ( !(te->valisnull && entry->valisnull) )
if (!(te->valisnull && entry->valisnull))
res = false;
}
else if ( te->vallen != entry->vallen ||
strncmp(
vv + entry->pos + entry->keylen,
tv + te->pos + te->keylen,
te->vallen)
)
res = false;
else if (te->vallen != entry->vallen ||
strncmp(
vv + entry->pos + entry->keylen,
tv + te->pos + te->keylen,
te->vallen)
)
res = false;
}
else
res = false;

View File

@ -57,16 +57,17 @@ ginint4_queryextract(PG_FUNCTION_ARGS)
}
}
if ( nentries == 0 )
if (nentries == 0)
{
switch( strategy )
switch (strategy)
{
case BooleanSearchStrategy:
case RTOverlapStrategyNumber:
*nentries = -1; /* nobody can be found */
break;
default: /* require fullscan: GIN can't find void arrays */
break;
*nentries = -1; /* nobody can be found */
break;
default: /* require fullscan: GIN can't find void
* arrays */
break;
}
}

View File

@ -233,10 +233,11 @@ g_int_decompress(PG_FUNCTION_ARGS)
CHECKARRVALID(in);
if (ARRISVOID(in))
{
if (in != (ArrayType *) DatumGetPointer(entry->key)) {
if (in != (ArrayType *) DatumGetPointer(entry->key))
{
retval = palloc(sizeof(GISTENTRY));
gistentryinit(*retval, PointerGetDatum(in),
entry->rel, entry->page, entry->offset, FALSE);
entry->rel, entry->page, entry->offset, FALSE);
PG_RETURN_POINTER(retval);
}

View File

@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
*
* IDENTIFICATION
* $PostgreSQL: pgsql/contrib/isn/isn.c,v 1.6 2007/06/05 21:31:03 tgl Exp $
* $PostgreSQL: pgsql/contrib/isn/isn.c,v 1.7 2007/11/15 21:14:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -506,7 +506,7 @@ ean2UPC(char *isn)
* Returns the ean13 value of the string.
*/
static
ean13
ean13
str2ean(const char *num)
{
ean13 ean = 0; /* current ean */

View File

@ -302,9 +302,9 @@ bt_page_items(PG_FUNCTION_ARGS)
buffer = ReadBuffer(rel, blkno);
/*
* We copy the page into local storage to avoid holding pin on
* the buffer longer than we must, and possibly failing to
* release it at all if the calling query doesn't fetch all rows.
* We copy the page into local storage to avoid holding pin on the
* buffer longer than we must, and possibly failing to release it at
* all if the calling query doesn't fetch all rows.
*/
mctx = MemoryContextSwitchTo(fctx->multi_call_memory_ctx);

View File

@ -8,17 +8,17 @@
* information as possible, even if it's nonsense. That's because if a
* page is corrupt, we don't know why and how exactly it is corrupt, so we
* let the user to judge it.
*
*
* These functions are restricted to superusers for the fear of introducing
* security holes if the input checking isn't as water-tight as it should.
* You'd need to be superuser to obtain a raw page image anyway, so
* security holes if the input checking isn't as water-tight as it should.
* You'd need to be superuser to obtain a raw page image anyway, so
* there's hardly any use case for using these without superuser-rights
* anyway.
*
* Copyright (c) 2007, PostgreSQL Global Development Group
*
* IDENTIFICATION
* $PostgreSQL: pgsql/contrib/pageinspect/heapfuncs.c,v 1.2 2007/09/12 22:10:25 tgl Exp $
* $PostgreSQL: pgsql/contrib/pageinspect/heapfuncs.c,v 1.3 2007/11/15 21:14:30 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -34,10 +34,10 @@
#include "utils/builtins.h"
#include "miscadmin.h"
Datum heap_page_items(PG_FUNCTION_ARGS);
Datum heap_page_items(PG_FUNCTION_ARGS);
#define GET_TEXT(str_) \
DirectFunctionCall1(textin, CStringGetDatum(str_))
DirectFunctionCall1(textin, CStringGetDatum(str_))
/*
* bits_to_text
@ -48,12 +48,12 @@ Datum heap_page_items(PG_FUNCTION_ARGS);
static char *
bits_to_text(bits8 *bits, int len)
{
int i;
char *str;
int i;
char *str;
str = palloc(len + 1);
for(i = 0; i < len; i++)
for (i = 0; i < len; i++)
str[i] = (bits[(i / 8)] & (1 << (i % 8))) ? '1' : '0';
str[i] = '\0';
@ -74,15 +74,15 @@ typedef struct heap_page_items_state
TupleDesc tupd;
Page page;
uint16 offset;
} heap_page_items_state;
} heap_page_items_state;
Datum
heap_page_items(PG_FUNCTION_ARGS)
{
bytea *raw_page = PG_GETARG_BYTEA_P(0);
bytea *raw_page = PG_GETARG_BYTEA_P(0);
heap_page_items_state *inter_call_data = NULL;
FuncCallContext *fctx;
int raw_page_size;
int raw_page_size;
if (!superuser())
ereport(ERROR,
@ -96,10 +96,10 @@ heap_page_items(PG_FUNCTION_ARGS)
TupleDesc tupdesc;
MemoryContext mctx;
if(raw_page_size < SizeOfPageHeaderData)
ereport(ERROR,
if (raw_page_size < SizeOfPageHeaderData)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("input page too small (%d bytes)", raw_page_size)));
errmsg("input page too small (%d bytes)", raw_page_size)));
fctx = SRF_FIRSTCALL_INIT();
mctx = MemoryContextSwitchTo(fctx->multi_call_memory_ctx);
@ -132,42 +132,42 @@ heap_page_items(PG_FUNCTION_ARGS)
ItemId id;
Datum values[13];
bool nulls[13];
uint16 lp_offset;
uint16 lp_offset;
uint16 lp_flags;
uint16 lp_len;
memset(nulls, 0, sizeof(nulls));
/* Extract information from the line pointer */
id = PageGetItemId(page, inter_call_data->offset);
lp_offset = ItemIdGetOffset(id);
lp_flags = ItemIdGetFlags(id);
lp_len = ItemIdGetLength(id);
lp_offset = ItemIdGetOffset(id);
lp_flags = ItemIdGetFlags(id);
lp_len = ItemIdGetLength(id);
values[0] = UInt16GetDatum(inter_call_data->offset);
values[1] = UInt16GetDatum(lp_offset);
values[2] = UInt16GetDatum(lp_flags);
values[3] = UInt16GetDatum(lp_len);
/* We do just enough validity checking to make sure we don't
* reference data outside the page passed to us. The page
* could be corrupt in many other ways, but at least we won't
* crash.
/*
* We do just enough validity checking to make sure we don't reference
* data outside the page passed to us. The page could be corrupt in
* many other ways, but at least we won't crash.
*/
if (ItemIdHasStorage(id) &&
lp_len >= sizeof(HeapTupleHeader) &&
lp_offset == MAXALIGN(lp_offset) &&
lp_offset + lp_len <= raw_page_size)
{
HeapTupleHeader tuphdr;
int bits_len;
HeapTupleHeader tuphdr;
int bits_len;
/* Extract information from the tuple header */
tuphdr = (HeapTupleHeader) PageGetItem(page, id);
values[4] = UInt32GetDatum(HeapTupleHeaderGetXmin(tuphdr));
values[5] = UInt32GetDatum(HeapTupleHeaderGetXmax(tuphdr));
values[6] = UInt32GetDatum(HeapTupleHeaderGetRawCommandId(tuphdr)); /* shared with xvac */
@ -176,22 +176,23 @@ heap_page_items(PG_FUNCTION_ARGS)
values[9] = UInt16GetDatum(tuphdr->t_infomask);
values[10] = UInt8GetDatum(tuphdr->t_hoff);
/* We already checked that the item as is completely within
* the raw page passed to us, with the length given in the line
/*
* We already checked that the item as is completely within the
* raw page passed to us, with the length given in the line
* pointer.. Let's check that t_hoff doesn't point over lp_len,
* before using it to access t_bits and oid.
*/
if (tuphdr->t_hoff >= sizeof(HeapTupleHeader) &&
if (tuphdr->t_hoff >= sizeof(HeapTupleHeader) &&
tuphdr->t_hoff <= lp_len)
{
if (tuphdr->t_infomask & HEAP_HASNULL)
{
bits_len = tuphdr->t_hoff -
(((char *)tuphdr->t_bits) - ((char *)tuphdr));
bits_len = tuphdr->t_hoff -
(((char *) tuphdr->t_bits) -((char *) tuphdr));
values[11] = GET_TEXT(
bits_to_text(tuphdr->t_bits, bits_len * 8));
}
bits_to_text(tuphdr->t_bits, bits_len * 8));
}
else
nulls[11] = true;
@ -208,17 +209,19 @@ heap_page_items(PG_FUNCTION_ARGS)
}
else
{
/* The line pointer is not used, or it's invalid. Set the rest of
* the fields to NULL */
int i;
/*
* The line pointer is not used, or it's invalid. Set the rest of
* the fields to NULL
*/
int i;
for(i = 4; i <= 12; i++)
for (i = 4; i <= 12; i++)
nulls[i] = true;
}
/* Build and return the result tuple. */
resultTuple = heap_form_tuple(inter_call_data->tupd, values, nulls);
result = HeapTupleGetDatum(resultTuple);
/* Build and return the result tuple. */
resultTuple = heap_form_tuple(inter_call_data->tupd, values, nulls);
result = HeapTupleGetDatum(resultTuple);
inter_call_data->offset++;

View File

@ -8,7 +8,7 @@
* Copyright (c) 2007, PostgreSQL Global Development Group
*
* IDENTIFICATION
* $PostgreSQL: pgsql/contrib/pageinspect/rawpage.c,v 1.2 2007/09/21 21:25:42 tgl Exp $
* $PostgreSQL: pgsql/contrib/pageinspect/rawpage.c,v 1.3 2007/11/15 21:14:30 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -26,8 +26,8 @@
PG_MODULE_MAGIC;
Datum get_raw_page(PG_FUNCTION_ARGS);
Datum page_header(PG_FUNCTION_ARGS);
Datum get_raw_page(PG_FUNCTION_ARGS);
Datum page_header(PG_FUNCTION_ARGS);
/*
* get_raw_page
@ -43,9 +43,9 @@ get_raw_page(PG_FUNCTION_ARGS)
uint32 blkno = PG_GETARG_UINT32(1);
Relation rel;
RangeVar *relrv;
bytea *raw_page;
char *raw_page_data;
RangeVar *relrv;
bytea *raw_page;
char *raw_page_data;
Buffer buf;
if (!superuser())
@ -61,12 +61,12 @@ get_raw_page(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("cannot get raw page from view \"%s\"",
RelationGetRelationName(rel))));
RelationGetRelationName(rel))));
if (rel->rd_rel->relkind == RELKIND_COMPOSITE_TYPE)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("cannot get raw page from composite type \"%s\"",
RelationGetRelationName(rel))));
RelationGetRelationName(rel))));
if (blkno >= RelationGetNumberOfBlocks(rel))
elog(ERROR, "block number %u is out of range for relation \"%s\"",
@ -125,13 +125,13 @@ page_header(PG_FUNCTION_ARGS)
raw_page_size = VARSIZE(raw_page) - VARHDRSZ;
/*
* Check that enough data was supplied, so that we don't try to access
* fields outside the supplied buffer.
* Check that enough data was supplied, so that we don't try to access
* fields outside the supplied buffer.
*/
if(raw_page_size < sizeof(PageHeaderData))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("input page too small (%d bytes)", raw_page_size)));
if (raw_page_size < sizeof(PageHeaderData))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("input page too small (%d bytes)", raw_page_size)));
page = (PageHeader) VARDATA(raw_page);
@ -154,12 +154,12 @@ page_header(PG_FUNCTION_ARGS)
values[7] = UInt16GetDatum(PageGetPageLayoutVersion(page));
values[8] = TransactionIdGetDatum(page->pd_prune_xid);
/* Build and return the tuple. */
/* Build and return the tuple. */
memset(nulls, 0, sizeof(nulls));
tuple = heap_form_tuple(tupdesc, values, nulls);
result = HeapTupleGetDatum(tuple);
tuple = heap_form_tuple(tupdesc, values, nulls);
result = HeapTupleGetDatum(tuple);
PG_RETURN_DATUM(result);
}

View File

@ -3,7 +3,7 @@
* pg_buffercache_pages.c
* display some contents of the buffer cache
*
* $PostgreSQL: pgsql/contrib/pg_buffercache/pg_buffercache_pages.c,v 1.13 2007/07/16 21:20:36 tgl Exp $
* $PostgreSQL: pgsql/contrib/pg_buffercache/pg_buffercache_pages.c,v 1.14 2007/11/15 21:14:30 momjian Exp $
*-------------------------------------------------------------------------
*/
#include "postgres.h"
@ -149,9 +149,9 @@ pg_buffercache_pages(PG_FUNCTION_ARGS)
/*
* And release locks. We do this in reverse order for two reasons:
* (1) Anyone else who needs more than one of the locks will be trying
* to lock them in increasing order; we don't want to release the other
* process until it can get all the locks it needs.
* (2) This avoids O(N^2) behavior inside LWLockRelease.
* to lock them in increasing order; we don't want to release the
* other process until it can get all the locks it needs. (2) This
* avoids O(N^2) behavior inside LWLockRelease.
*/
for (i = NUM_BUFFER_PARTITIONS; --i >= 0;)
LWLockRelease(FirstBufMappingLock + i);

View File

@ -1,12 +1,12 @@
/*
* pg_standby.c
*
*
* Production-ready example of how to create a Warm Standby
* database server using continuous archiving as a
* database server using continuous archiving as a
* replication mechanism
*
* We separate the parameters for archive and nextWALfile
* so that we can check the archive exists, even if the
* so that we can check the archive exists, even if the
* WAL file doesn't (yet).
*
* This program will be executed once in full for each file
@ -14,9 +14,9 @@
*
* It is designed to cater to a variety of needs, as well
* providing a customizable section.
*
* Original author: Simon Riggs simon@2ndquadrant.com
* Current maintainer: Simon Riggs
*
* Original author: Simon Riggs simon@2ndquadrant.com
* Current maintainer: Simon Riggs
*/
#include "postgres_fe.h"
@ -26,7 +26,7 @@
#include <signal.h>
#ifdef WIN32
int getopt(int argc, char * const argv[], const char *optstring);
int getopt(int argc, char *const argv[], const char *optstring);
#else
#include <sys/time.h>
#include <unistd.h>
@ -34,42 +34,44 @@ int getopt(int argc, char * const argv[], const char *optstring);
#ifdef HAVE_GETOPT_H
#include <getopt.h>
#endif
#endif /* ! WIN32 */
extern char *optarg;
extern int optind;
/* Options and defaults */
int sleeptime = 5; /* amount of time to sleep between file checks */
int waittime = -1; /* how long we have been waiting, -1 no wait yet */
int maxwaittime = 0; /* how long are we prepared to wait for? */
int keepfiles = 0; /* number of WAL files to keep, 0 keep all */
int maxretries = 3; /* number of retries on restore command */
bool debug = false; /* are we debugging? */
bool triggered = false; /* have we been triggered? */
bool need_cleanup = false; /* do we need to remove files from archive? */
int sleeptime = 5; /* amount of time to sleep between file checks */
int waittime = -1; /* how long we have been waiting, -1 no wait
* yet */
int maxwaittime = 0; /* how long are we prepared to wait for? */
int keepfiles = 0; /* number of WAL files to keep, 0 keep all */
int maxretries = 3; /* number of retries on restore command */
bool debug = false; /* are we debugging? */
bool triggered = false; /* have we been triggered? */
bool need_cleanup = false; /* do we need to remove files from
* archive? */
static volatile sig_atomic_t signaled = false;
char *archiveLocation; /* where to find the archive? */
char *triggerPath; /* where to find the trigger file? */
char *xlogFilePath; /* where we are going to restore to */
char *nextWALFileName; /* the file we need to get from archive */
char *restartWALFileName; /* the file from which we can restart restore */
char *priorWALFileName; /* the file we need to get from archive */
char WALFilePath[MAXPGPATH];/* the file path including archive */
char restoreCommand[MAXPGPATH]; /* run this to restore */
char exclusiveCleanupFileName[MAXPGPATH]; /* the file we need to get from archive */
char *archiveLocation; /* where to find the archive? */
char *triggerPath; /* where to find the trigger file? */
char *xlogFilePath; /* where we are going to restore to */
char *nextWALFileName; /* the file we need to get from archive */
char *restartWALFileName; /* the file from which we can restart restore */
char *priorWALFileName; /* the file we need to get from archive */
char WALFilePath[MAXPGPATH]; /* the file path including archive */
char restoreCommand[MAXPGPATH]; /* run this to restore */
char exclusiveCleanupFileName[MAXPGPATH]; /* the file we need to
* get from archive */
#define RESTORE_COMMAND_COPY 0
#define RESTORE_COMMAND_LINK 1
int restoreCommandType;
int restoreCommandType;
#define XLOG_DATA 0
#define XLOG_HISTORY 1
#define XLOG_BACKUP_LABEL 2
int nextWALFileType;
int nextWALFileType;
#define SET_RESTORE_COMMAND(cmd, arg1, arg2) \
snprintf(restoreCommand, MAXPGPATH, cmd " \"%s\" \"%s\"", arg1, arg2)
@ -86,21 +88,21 @@ struct stat stat_buf;
* accessible directory. If you want to make other assumptions,
* such as using a vendor-specific archive and access API, these
* routines are the ones you'll need to change. You're
* enouraged to submit any changes to pgsql-patches@postgresql.org
* or personally to the current maintainer. Those changes may be
* enouraged to submit any changes to pgsql-patches@postgresql.org
* or personally to the current maintainer. Those changes may be
* folded in to later versions of this program.
*/
#define XLOG_DATA_FNAME_LEN 24
#define XLOG_DATA_FNAME_LEN 24
/* Reworked from access/xlog_internal.h */
#define XLogFileName(fname, tli, log, seg) \
snprintf(fname, XLOG_DATA_FNAME_LEN + 1, "%08X%08X%08X", tli, log, seg)
/*
* Initialize allows customized commands into the warm standby program.
* Initialize allows customized commands into the warm standby program.
*
* As an example, and probably the common case, we use either
* cp/ln commands on *nix, or copy/move command on Windows.
* As an example, and probably the common case, we use either
* cp/ln commands on *nix, or copy/move command on Windows.
*
*/
static void
@ -111,79 +113,79 @@ CustomizableInitialize(void)
switch (restoreCommandType)
{
case RESTORE_COMMAND_LINK:
SET_RESTORE_COMMAND("mklink",WALFilePath, xlogFilePath);
SET_RESTORE_COMMAND("mklink", WALFilePath, xlogFilePath);
case RESTORE_COMMAND_COPY:
default:
SET_RESTORE_COMMAND("copy",WALFilePath, xlogFilePath);
SET_RESTORE_COMMAND("copy", WALFilePath, xlogFilePath);
break;
}
}
#else
snprintf(WALFilePath, MAXPGPATH, "%s/%s", archiveLocation, nextWALFileName);
switch (restoreCommandType)
{
case RESTORE_COMMAND_LINK:
#if HAVE_WORKING_LINK
SET_RESTORE_COMMAND("ln -s -f",WALFilePath, xlogFilePath);
SET_RESTORE_COMMAND("ln -s -f", WALFilePath, xlogFilePath);
break;
#endif
case RESTORE_COMMAND_COPY:
default:
SET_RESTORE_COMMAND("cp",WALFilePath, xlogFilePath);
SET_RESTORE_COMMAND("cp", WALFilePath, xlogFilePath);
break;
}
}
#endif
/*
* This code assumes that archiveLocation is a directory
* You may wish to add code to check for tape libraries, etc..
* So, since it is a directory, we use stat to test if its accessible
* This code assumes that archiveLocation is a directory You may wish to
* add code to check for tape libraries, etc.. So, since it is a
* directory, we use stat to test if its accessible
*/
if (stat(archiveLocation, &stat_buf) != 0)
{
fprintf(stderr, "pg_standby: archiveLocation \"%s\" does not exist\n", archiveLocation);
fprintf(stderr, "pg_standby: archiveLocation \"%s\" does not exist\n", archiveLocation);
fflush(stderr);
exit(2);
exit(2);
}
}
/*
* CustomizableNextWALFileReady()
*
*
* Is the requested file ready yet?
*/
static bool
static bool
CustomizableNextWALFileReady()
{
if (stat(WALFilePath, &stat_buf) == 0)
{
/*
* If its a backup file, return immediately
* If its a regular file return only if its the right size already
* If its a backup file, return immediately If its a regular file
* return only if its the right size already
*/
if (strlen(nextWALFileName) > 24 &&
strspn(nextWALFileName, "0123456789ABCDEF") == 24 &&
strcmp(nextWALFileName + strlen(nextWALFileName) - strlen(".backup"),
".backup") == 0)
strcmp(nextWALFileName + strlen(nextWALFileName) - strlen(".backup"),
".backup") == 0)
{
nextWALFileType = XLOG_BACKUP_LABEL;
return true;
return true;
}
else
if (stat_buf.st_size == XLOG_SEG_SIZE)
{
else if (stat_buf.st_size == XLOG_SEG_SIZE)
{
#ifdef WIN32
/*
* Windows reports that the file has the right number of bytes
* even though the file is still being copied and cannot be
* opened by pg_standby yet. So we wait for sleeptime secs
* before attempting to restore. If that is not enough, we
* will rely on the retry/holdoff mechanism.
*/
pg_usleep(sleeptime * 1000000L);
/*
* Windows reports that the file has the right number of bytes
* even though the file is still being copied and cannot be opened
* by pg_standby yet. So we wait for sleeptime secs before
* attempting to restore. If that is not enough, we will rely on
* the retry/holdoff mechanism.
*/
pg_usleep(sleeptime * 1000000L);
#endif
nextWALFileType = XLOG_DATA;
return true;
}
nextWALFileType = XLOG_DATA;
return true;
}
/*
* If still too small, wait until it is the correct size
@ -192,10 +194,10 @@ CustomizableNextWALFileReady()
{
if (debug)
{
fprintf(stderr, "file size greater than expected\n");
fprintf(stderr, "file size greater than expected\n");
fflush(stderr);
}
exit(3);
exit(3);
}
}
@ -212,35 +214,36 @@ CustomizableCleanupPriorWALFiles(void)
*/
if (nextWALFileType == XLOG_DATA)
{
int rc;
DIR *xldir;
struct dirent *xlde;
int rc;
DIR *xldir;
struct dirent *xlde;
/*
* Assume its OK to keep failing. The failure situation may change over
* time, so we'd rather keep going on the main processing than fail
* because we couldnt clean up yet.
* Assume its OK to keep failing. The failure situation may change
* over time, so we'd rather keep going on the main processing than
* fail because we couldnt clean up yet.
*/
if ((xldir = opendir(archiveLocation)) != NULL)
{
while ((xlde = readdir(xldir)) != NULL)
{
/*
* We ignore the timeline part of the XLOG segment identifiers in
* deciding whether a segment is still needed. This ensures that we
* won't prematurely remove a segment from a parent timeline. We could
* probably be a little more proactive about removing segments of
* non-parent timelines, but that would be a whole lot more
* complicated.
* We ignore the timeline part of the XLOG segment identifiers
* in deciding whether a segment is still needed. This
* ensures that we won't prematurely remove a segment from a
* parent timeline. We could probably be a little more
* proactive about removing segments of non-parent timelines,
* but that would be a whole lot more complicated.
*
* We use the alphanumeric sorting property of the filenames to decide
* which ones are earlier than the exclusiveCleanupFileName file.
* Note that this means files are not removed in the order they were
* originally written, in case this worries you.
* We use the alphanumeric sorting property of the filenames
* to decide which ones are earlier than the
* exclusiveCleanupFileName file. Note that this means files
* are not removed in the order they were originally written,
* in case this worries you.
*/
if (strlen(xlde->d_name) == XLOG_DATA_FNAME_LEN &&
strspn(xlde->d_name, "0123456789ABCDEF") == XLOG_DATA_FNAME_LEN &&
strcmp(xlde->d_name + 8, exclusiveCleanupFileName + 8) < 0)
strcmp(xlde->d_name + 8, exclusiveCleanupFileName + 8) < 0)
{
#ifdef WIN32
snprintf(WALFilePath, MAXPGPATH, "%s\\%s", archiveLocation, xlde->d_name);
@ -249,7 +252,7 @@ CustomizableCleanupPriorWALFiles(void)
#endif
if (debug)
fprintf(stderr, "\nremoving \"%s\"", WALFilePath);
fprintf(stderr, "\nremoving \"%s\"", WALFilePath);
rc = unlink(WALFilePath);
if (rc != 0)
@ -264,7 +267,7 @@ CustomizableCleanupPriorWALFiles(void)
fprintf(stderr, "\n");
}
else
fprintf(stderr, "pg_standby: archiveLocation \"%s\" open error\n", archiveLocation);
fprintf(stderr, "pg_standby: archiveLocation \"%s\" open error\n", archiveLocation);
closedir(xldir);
fflush(stderr);
@ -278,19 +281,19 @@ CustomizableCleanupPriorWALFiles(void)
/*
* SetWALFileNameForCleanup()
*
*
* Set the earliest WAL filename that we want to keep on the archive
* and decide whether we need_cleanup
* and decide whether we need_cleanup
*/
static bool
SetWALFileNameForCleanup(void)
{
uint32 tli = 1,
log = 0,
seg = 0;
uint32 log_diff = 0,
seg_diff = 0;
bool cleanup = false;
uint32 tli = 1,
log = 0,
seg = 0;
uint32 log_diff = 0,
seg_diff = 0;
bool cleanup = false;
if (restartWALFileName)
{
@ -305,7 +308,7 @@ SetWALFileNameForCleanup(void)
{
log_diff = keepfiles / MaxSegmentsPerLogFile;
seg_diff = keepfiles % MaxSegmentsPerLogFile;
if (seg_diff > seg)
if (seg_diff > seg)
{
log_diff++;
seg = MaxSegmentsPerLogFile - seg_diff;
@ -333,31 +336,30 @@ SetWALFileNameForCleanup(void)
/*
* CheckForExternalTrigger()
*
*
* Is there a trigger file?
*/
static bool
static bool
CheckForExternalTrigger(void)
{
int rc;
int rc;
/*
* Look for a trigger file, if that option has been selected
* Look for a trigger file, if that option has been selected
*
* We use stat() here because triggerPath is always a file
* rather than potentially being in an archive
* We use stat() here because triggerPath is always a file rather than
* potentially being in an archive
*/
if (triggerPath && stat(triggerPath, &stat_buf) == 0)
{
fprintf(stderr, "trigger file found\n");
fprintf(stderr, "trigger file found\n");
fflush(stderr);
/*
* If trigger file found, we *must* delete it. Here's why:
* When recovery completes, we will be asked again
* for the same file from the archive using pg_standby
* so must remove trigger file so we can reload file again
* and come up correctly.
* If trigger file found, we *must* delete it. Here's why: When
* recovery completes, we will be asked again for the same file from
* the archive using pg_standby so must remove trigger file so we can
* reload file again and come up correctly.
*/
rc = unlink(triggerPath);
if (rc != 0)
@ -374,14 +376,14 @@ CheckForExternalTrigger(void)
/*
* RestoreWALFileForRecovery()
*
*
* Perform the action required to restore the file from archive
*/
static bool
RestoreWALFileForRecovery(void)
{
int rc = 0;
int numretries = 0;
int rc = 0;
int numretries = 0;
if (debug)
{
@ -401,7 +403,7 @@ RestoreWALFileForRecovery(void)
}
return true;
}
pg_usleep(numretries++ * sleeptime * 1000000L);
pg_usleep(numretries++ * sleeptime * 1000000L);
}
/*
@ -441,13 +443,13 @@ sighandler(int sig)
}
/*------------ MAIN ----------------------------------------*/
int
int
main(int argc, char **argv)
{
int c;
(void) signal(SIGINT, sighandler);
(void) signal(SIGQUIT, sighandler);
(void) signal(SIGINT, sighandler);
(void) signal(SIGQUIT, sighandler);
while ((c = getopt(argc, argv, "cdk:lr:s:t:w:")) != -1)
{
@ -492,8 +494,8 @@ main(int argc, char **argv)
case 't': /* Trigger file */
triggerPath = optarg;
if (CheckForExternalTrigger())
exit(1); /* Normal exit, with non-zero */
break;
exit(1); /* Normal exit, with non-zero */
break;
case 'w': /* Max wait time */
maxwaittime = atoi(optarg);
if (maxwaittime < 0)
@ -510,7 +512,7 @@ main(int argc, char **argv)
}
}
/*
/*
* Parameter checking - after checking to see if trigger file present
*/
if (argc == 1)
@ -521,8 +523,8 @@ main(int argc, char **argv)
/*
* We will go to the archiveLocation to get nextWALFileName.
* nextWALFileName may not exist yet, which would not be an error,
* so we separate the archiveLocation and nextWALFileName so we can check
* nextWALFileName may not exist yet, which would not be an error, so we
* separate the archiveLocation and nextWALFileName so we can check
* separately whether archiveLocation exists, if not that is an error
*/
if (optind < argc)
@ -532,7 +534,7 @@ main(int argc, char **argv)
}
else
{
fprintf(stderr, "pg_standby: must specify archiveLocation\n");
fprintf(stderr, "pg_standby: must specify archiveLocation\n");
usage();
exit(2);
}
@ -544,7 +546,7 @@ main(int argc, char **argv)
}
else
{
fprintf(stderr, "pg_standby: use %%f to specify nextWALFileName\n");
fprintf(stderr, "pg_standby: use %%f to specify nextWALFileName\n");
usage();
exit(2);
}
@ -556,7 +558,7 @@ main(int argc, char **argv)
}
else
{
fprintf(stderr, "pg_standby: use %%p to specify xlogFilePath\n");
fprintf(stderr, "pg_standby: use %%p to specify xlogFilePath\n");
usage();
exit(2);
}
@ -573,14 +575,14 @@ main(int argc, char **argv)
if (debug)
{
fprintf(stderr, "\nTrigger file : %s", triggerPath ? triggerPath : "<not set>");
fprintf(stderr, "\nWaiting for WAL file : %s", nextWALFileName);
fprintf(stderr, "\nWAL file path : %s", WALFilePath);
fprintf(stderr, "\nRestoring to... : %s", xlogFilePath);
fprintf(stderr, "\nSleep interval : %d second%s",
sleeptime, (sleeptime > 1 ? "s" : " "));
fprintf(stderr, "\nMax wait interval : %d %s",
maxwaittime, (maxwaittime > 0 ? "seconds" : "forever"));
fprintf(stderr, "\nTrigger file : %s", triggerPath ? triggerPath : "<not set>");
fprintf(stderr, "\nWaiting for WAL file : %s", nextWALFileName);
fprintf(stderr, "\nWAL file path : %s", WALFilePath);
fprintf(stderr, "\nRestoring to... : %s", xlogFilePath);
fprintf(stderr, "\nSleep interval : %d second%s",
sleeptime, (sleeptime > 1 ? "s" : " "));
fprintf(stderr, "\nMax wait interval : %d %s",
maxwaittime, (maxwaittime > 0 ? "seconds" : "forever"));
fprintf(stderr, "\nCommand for restore : %s", restoreCommand);
fprintf(stderr, "\nKeep archive history : %s and later", exclusiveCleanupFileName);
fflush(stderr);
@ -609,20 +611,20 @@ main(int argc, char **argv)
}
}
/*
/*
* Main wait loop
*/
while (!CustomizableNextWALFileReady() && !triggered)
{
if (sleeptime <= 60)
pg_usleep(sleeptime * 1000000L);
pg_usleep(sleeptime * 1000000L);
if (signaled)
{
triggered = true;
if (debug)
{
fprintf(stderr, "\nsignaled to exit\n");
fprintf(stderr, "\nsignaled to exit\n");
fflush(stderr);
}
}
@ -631,36 +633,34 @@ main(int argc, char **argv)
if (debug)
{
fprintf(stderr, "\nWAL file not present yet.");
fprintf(stderr, "\nWAL file not present yet.");
if (triggerPath)
fprintf(stderr, " Checking for trigger file...");
fprintf(stderr, " Checking for trigger file...");
fflush(stderr);
}
waittime += sleeptime;
if (!triggered && (CheckForExternalTrigger() || (waittime >= maxwaittime && maxwaittime > 0)))
{
triggered = true;
if (debug && waittime >= maxwaittime && maxwaittime > 0)
fprintf(stderr, "\nTimed out after %d seconds\n",waittime);
fprintf(stderr, "\nTimed out after %d seconds\n", waittime);
}
}
}
/*
* Action on exit
/*
* Action on exit
*/
if (triggered)
exit(1); /* Normal exit, with non-zero */
exit(1); /* Normal exit, with non-zero */
/*
* Once we have restored this file successfully we
* can remove some prior WAL files.
* If this restore fails we musn't remove any
* file because some of them will be requested again
* immediately after the failed restore, or when
* we restart recovery.
/*
* Once we have restored this file successfully we can remove some prior
* WAL files. If this restore fails we musn't remove any file because some
* of them will be requested again immediately after the failed restore,
* or when we restart recovery.
*/
if (RestoreWALFileForRecovery() && need_cleanup)
CustomizableCleanupPriorWALFiles();

View File

@ -16,23 +16,23 @@ Datum gin_trgm_consistent(PG_FUNCTION_ARGS);
Datum
gin_extract_trgm(PG_FUNCTION_ARGS)
{
text *val = (text *) PG_GETARG_TEXT_P(0);
int32 *nentries = (int32 *) PG_GETARG_POINTER(1);
Datum *entries = NULL;
TRGM *trg;
text *val = (text *) PG_GETARG_TEXT_P(0);
int32 *nentries = (int32 *) PG_GETARG_POINTER(1);
Datum *entries = NULL;
TRGM *trg;
int4 trglen;
*nentries = 0;
trg = generate_trgm(VARDATA(val), VARSIZE(val) - VARHDRSZ);
trglen = ARRNELEM(trg);
if (trglen > 0)
{
trgm *ptr;
int4 i = 0,
item;
trgm *ptr;
int4 i = 0,
item;
*nentries = (int32) trglen;
entries = (Datum *) palloc(sizeof(Datum) * trglen);
@ -41,7 +41,7 @@ gin_extract_trgm(PG_FUNCTION_ARGS)
{
item = TRGMINT(ptr);
entries[i++] = Int32GetDatum(item);
ptr++;
}
}
@ -52,20 +52,20 @@ gin_extract_trgm(PG_FUNCTION_ARGS)
Datum
gin_trgm_consistent(PG_FUNCTION_ARGS)
{
bool *check = (bool *) PG_GETARG_POINTER(0);
text *query = (text *) PG_GETARG_TEXT_P(2);
bool *check = (bool *) PG_GETARG_POINTER(0);
text *query = (text *) PG_GETARG_TEXT_P(2);
bool res = FALSE;
TRGM *trg;
TRGM *trg;
int4 i,
trglen,
ntrue = 0;
trg = generate_trgm(VARDATA(query), VARSIZE(query) - VARHDRSZ);
trglen = ARRNELEM(trg);
for (i = 0; i < trglen; i++)
if (check[i])
ntrue ++;
ntrue++;
#ifdef DIVUNION
res = (trglen == ntrue) ? true : ((((((float4) ntrue) / ((float4) (trglen - ntrue)))) >= trgm_limit) ? true : false);

View File

@ -1,5 +1,5 @@
/*
* $PostgreSQL: pgsql/contrib/pgbench/pgbench.c,v 1.73 2007/10/22 10:40:47 mha Exp $
* $PostgreSQL: pgsql/contrib/pgbench/pgbench.c,v 1.74 2007/11/15 21:14:31 momjian Exp $
*
* pgbench: a simple benchmark program for PostgreSQL
* written by Tatsuo Ishii
@ -53,9 +53,9 @@ extern int optind;
/* max number of clients allowed */
#ifdef FD_SETSIZE
#define MAXCLIENTS (FD_SETSIZE - 10)
#define MAXCLIENTS (FD_SETSIZE - 10)
#else
#define MAXCLIENTS 1024
#define MAXCLIENTS 1024
#endif
int nclients = 1; /* default number of simulated clients */
@ -201,7 +201,7 @@ getrand(int min, int max)
/* call PQexec() and exit() on failure */
static void
executeStatement(PGconn *con, const char* sql)
executeStatement(PGconn *con, const char *sql)
{
PGresult *res;
@ -262,7 +262,7 @@ discard_response(CState * state)
/* check to see if the SQL result was good */
static int
check(CState *state, PGresult *res, int n)
check(CState * state, PGresult *res, int n)
{
CState *st = &state[n];
@ -275,7 +275,7 @@ check(CState *state, PGresult *res, int n)
default:
fprintf(stderr, "Client %d aborted in state %d: %s",
n, st->state, PQerrorMessage(st->con));
remains--; /* I've aborted */
remains--; /* I've aborted */
PQfinish(st->con);
st->con = NULL;
return (-1);
@ -452,12 +452,12 @@ top:
if (st->sleeping)
{ /* are we sleeping? */
int usec;
struct timeval now;
int usec;
struct timeval now;
gettimeofday(&now, NULL);
usec = (st->until.tv_sec - now.tv_sec) * 1000000 +
st->until.tv_usec - now.tv_usec;
st->until.tv_usec - now.tv_usec;
if (usec <= 0)
st->sleeping = 0; /* Done sleeping, go ahead with next command */
else
@ -798,11 +798,11 @@ init(void)
"drop table if exists accounts",
"create table accounts(aid int not null,bid int,abalance int,filler char(84)) with (fillfactor=%d)",
"drop table if exists history",
"create table history(tid int,bid int,aid int,delta int,mtime timestamp,filler char(22))"};
"create table history(tid int,bid int,aid int,delta int,mtime timestamp,filler char(22))"};
static char *DDLAFTERs[] = {
"alter table branches add primary key (bid)",
"alter table tellers add primary key (tid)",
"alter table accounts add primary key (aid)"};
"alter table accounts add primary key (aid)"};
char sql[256];
@ -821,7 +821,8 @@ init(void)
(strstr(DDLs[i], "create table tellers") == DDLs[i]) ||
(strstr(DDLs[i], "create table accounts") == DDLs[i]))
{
char ddl_stmt[128];
char ddl_stmt[128];
snprintf(ddl_stmt, 128, DDLs[i], fillfactor);
executeStatement(con, ddl_stmt);
continue;
@ -990,7 +991,7 @@ process_commands(char *buf)
pg_strcasecmp(my_commands->argv[2], "ms") != 0 &&
pg_strcasecmp(my_commands->argv[2], "s"))
{
fprintf(stderr, "%s: unknown time unit '%s' - must be us, ms or s\n",
fprintf(stderr, "%s: unknown time unit '%s' - must be us, ms or s\n",
my_commands->argv[0], my_commands->argv[2]);
return NULL;
}
@ -1204,7 +1205,7 @@ main(int argc, char **argv)
int c;
int is_init_mode = 0; /* initialize mode? */
int is_no_vacuum = 0; /* no vacuum at all before testing? */
int do_vacuum_accounts = 0; /* do vacuum accounts before testing? */
int do_vacuum_accounts = 0; /* do vacuum accounts before testing? */
int debug = 0; /* debug flag */
int ttype = 0; /* transaction type. 0: TPC-B, 1: SELECT only,
* 2: skip update of branches and tellers */
@ -1308,7 +1309,7 @@ main(int argc, char **argv)
fprintf(stderr, "Use limit/ulimit to increase the limit before using pgbench.\n");
exit(1);
}
#endif /* HAVE_GETRLIMIT */
#endif /* HAVE_GETRLIMIT */
break;
case 'C':
is_connect = 1;
@ -1615,8 +1616,8 @@ main(int argc, char **argv)
if (state[i].sleeping)
{
int this_usec;
int sock = PQsocket(state[i].con);
int this_usec;
int sock = PQsocket(state[i].con);
if (min_usec < 0)
{
@ -1625,7 +1626,7 @@ main(int argc, char **argv)
}
this_usec = (state[i].until.tv_sec - now.tv_sec) * 1000000 +
state[i].until.tv_usec - now.tv_usec;
state[i].until.tv_usec - now.tv_usec;
if (this_usec > 0 && (min_usec == 0 || this_usec < min_usec))
min_usec = this_usec;
@ -1657,11 +1658,11 @@ main(int argc, char **argv)
timeout.tv_usec = min_usec % 1000000;
nsocks = select(maxsock + 1, &input_mask, (fd_set *) NULL,
(fd_set *) NULL, &timeout);
(fd_set *) NULL, &timeout);
}
else
nsocks = select(maxsock + 1, &input_mask, (fd_set *) NULL,
(fd_set *) NULL, (struct timeval *) NULL);
(fd_set *) NULL, (struct timeval *) NULL);
if (nsocks < 0)
{
if (errno == EINTR)

View File

@ -1,7 +1,7 @@
/*
* Butchered version of sshblowf.c from putty-0.59.
*
* $PostgreSQL: pgsql/contrib/pgcrypto/blf.c,v 1.8 2007/03/28 22:48:58 neilc Exp $
* $PostgreSQL: pgsql/contrib/pgcrypto/blf.c,v 1.9 2007/11/15 21:14:31 momjian Exp $
*/
/*
@ -251,7 +251,7 @@ static const uint32 sbox3[] = {
static void
blowfish_encrypt(uint32 xL, uint32 xR, uint32 *output,
BlowfishContext *ctx)
BlowfishContext * ctx)
{
uint32 *S0 = ctx->S0;
uint32 *S1 = ctx->S1;
@ -285,7 +285,7 @@ blowfish_encrypt(uint32 xL, uint32 xR, uint32 *output,
static void
blowfish_decrypt(uint32 xL, uint32 xR, uint32 *output,
BlowfishContext *ctx)
BlowfishContext * ctx)
{
uint32 *S0 = ctx->S0;
uint32 *S1 = ctx->S1;
@ -318,7 +318,7 @@ blowfish_decrypt(uint32 xL, uint32 xR, uint32 *output,
}
void
blowfish_encrypt_cbc(uint8 *blk, int len, BlowfishContext *ctx)
blowfish_encrypt_cbc(uint8 *blk, int len, BlowfishContext * ctx)
{
uint32 xL,
xR,
@ -351,7 +351,7 @@ blowfish_encrypt_cbc(uint8 *blk, int len, BlowfishContext *ctx)
}
void
blowfish_decrypt_cbc(uint8 *blk, int len, BlowfishContext *ctx)
blowfish_decrypt_cbc(uint8 *blk, int len, BlowfishContext * ctx)
{
uint32 xL,
xR,
@ -384,7 +384,7 @@ blowfish_decrypt_cbc(uint8 *blk, int len, BlowfishContext *ctx)
}
void
blowfish_encrypt_ecb(uint8 *blk, int len, BlowfishContext *ctx)
blowfish_encrypt_ecb(uint8 *blk, int len, BlowfishContext * ctx)
{
uint32 xL,
xR,
@ -405,7 +405,7 @@ blowfish_encrypt_ecb(uint8 *blk, int len, BlowfishContext *ctx)
}
void
blowfish_decrypt_ecb(uint8 *blk, int len, BlowfishContext *ctx)
blowfish_decrypt_ecb(uint8 *blk, int len, BlowfishContext * ctx)
{
uint32 xL,
xR,
@ -426,7 +426,7 @@ blowfish_decrypt_ecb(uint8 *blk, int len, BlowfishContext *ctx)
}
void
blowfish_setkey(BlowfishContext *ctx,
blowfish_setkey(BlowfishContext * ctx,
const uint8 *key, short keybytes)
{
uint32 *S0 = ctx->S0;
@ -437,7 +437,7 @@ blowfish_setkey(BlowfishContext *ctx,
uint32 str[2];
int i;
Assert(keybytes > 0 && keybytes <= (448/8));
Assert(keybytes > 0 && keybytes <= (448 / 8));
for (i = 0; i < 18; i++)
{
@ -492,9 +492,8 @@ blowfish_setkey(BlowfishContext *ctx,
}
void
blowfish_setiv(BlowfishContext *ctx, const uint8 *iv)
blowfish_setiv(BlowfishContext * ctx, const uint8 *iv)
{
ctx->iv0 = GET_32BIT_MSB_FIRST(iv);
ctx->iv1 = GET_32BIT_MSB_FIRST(iv + 4);
}

View File

@ -1,4 +1,4 @@
/* $PostgreSQL: pgsql/contrib/pgcrypto/blf.h,v 1.6 2007/03/28 22:48:58 neilc Exp $ */
/* $PostgreSQL: pgsql/contrib/pgcrypto/blf.h,v 1.7 2007/11/15 21:14:31 momjian Exp $ */
/*
* PuTTY is copyright 1997-2007 Simon Tatham.
*
@ -35,14 +35,12 @@ typedef struct
S3[256],
P[18];
uint32 iv0,
iv1; /* for CBC mode */
} BlowfishContext;
void blowfish_setkey(BlowfishContext *ctx, const uint8 *key, short keybytes);
void blowfish_setiv(BlowfishContext *ctx, const uint8 *iv);
void blowfish_encrypt_cbc(uint8 *blk, int len, BlowfishContext *ctx);
void blowfish_decrypt_cbc(uint8 *blk, int len, BlowfishContext *ctx);
void blowfish_encrypt_ecb(uint8 *blk, int len, BlowfishContext *ctx);
void blowfish_decrypt_ecb(uint8 *blk, int len, BlowfishContext *ctx);
iv1; /* for CBC mode */
} BlowfishContext;
void blowfish_setkey(BlowfishContext * ctx, const uint8 *key, short keybytes);
void blowfish_setiv(BlowfishContext * ctx, const uint8 *iv);
void blowfish_encrypt_cbc(uint8 *blk, int len, BlowfishContext * ctx);
void blowfish_decrypt_cbc(uint8 *blk, int len, BlowfishContext * ctx);
void blowfish_encrypt_ecb(uint8 *blk, int len, BlowfishContext * ctx);
void blowfish_decrypt_ecb(uint8 *blk, int len, BlowfishContext * ctx);

View File

@ -1,5 +1,5 @@
/*
* $PostgreSQL: pgsql/contrib/pgcrypto/crypt-blowfish.c,v 1.12 2007/04/06 05:36:50 tgl Exp $
* $PostgreSQL: pgsql/contrib/pgcrypto/crypt-blowfish.c,v 1.13 2007/11/15 21:14:31 momjian Exp $
*
* This code comes from John the Ripper password cracker, with reentrant
* and crypt(3) interfaces added, but optimizations specific to password
@ -436,7 +436,7 @@ BF_encode(char *dst, const BF_word * src, int size)
}
static void
BF_swap(BF_word *x, int count)
BF_swap(BF_word * x, int count)
{
/* Swap on little-endian hardware, else do nothing */
#ifndef WORDS_BIGENDIAN

View File

@ -26,7 +26,7 @@
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
/* $PostgreSQL: pgsql/contrib/pgcrypto/imath.h,v 1.5 2006/10/04 00:29:46 momjian Exp $ */
/* $PostgreSQL: pgsql/contrib/pgcrypto/imath.h,v 1.6 2007/11/15 21:14:31 momjian Exp $ */
#ifndef IMATH_H_
#define IMATH_H_
@ -115,11 +115,12 @@ mp_result mp_int_mul(mp_int a, mp_int b, mp_int c); /* c = a * b */
mp_result mp_int_mul_value(mp_int a, int value, mp_int c);
mp_result mp_int_mul_pow2(mp_int a, int p2, mp_int c);
mp_result mp_int_sqr(mp_int a, mp_int c); /* c = a * a */
mp_result
mp_int_div(mp_int a, mp_int b, /* q = a / b */
mp_int_div(mp_int a, mp_int b, /* q = a / b */
mp_int q, mp_int r); /* r = a % b */
mp_result
mp_int_div_value(mp_int a, int value, /* q = a / value */
mp_int_div_value(mp_int a, int value, /* q = a / value */
mp_int q, int *r); /* r = a % value */
mp_result
mp_int_div_pow2(mp_int a, int p2, /* q = a / 2^p2 */

View File

@ -26,7 +26,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $PostgreSQL: pgsql/contrib/pgcrypto/internal.c,v 1.26 2007/03/28 22:48:58 neilc Exp $
* $PostgreSQL: pgsql/contrib/pgcrypto/internal.c,v 1.27 2007/11/15 21:14:31 momjian Exp $
*/
#include "postgres.h"
@ -251,7 +251,7 @@ struct int_ctx
uint8 iv[INT_MAX_IV];
union
{
BlowfishContext bf;
BlowfishContext bf;
rijndael_ctx rj;
} ctx;
unsigned keylen;
@ -426,7 +426,7 @@ bf_block_size(PX_Cipher * c)
static unsigned
bf_key_size(PX_Cipher * c)
{
return 448/8;
return 448 / 8;
}
static unsigned

View File

@ -26,7 +26,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $PostgreSQL: pgsql/contrib/pgcrypto/openssl.c,v 1.31 2007/09/29 02:18:15 tgl Exp $
* $PostgreSQL: pgsql/contrib/pgcrypto/openssl.c,v 1.32 2007/11/15 21:14:31 momjian Exp $
*/
#include "postgres.h"
@ -98,10 +98,13 @@ static void
AES_cbc_encrypt(const uint8 *src, uint8 *dst, int len, AES_KEY *ctx, uint8 *iv, int enc)
{
memcpy(dst, src, len);
if (enc) {
if (enc)
{
aes_cbc_encrypt(ctx, iv, dst, len);
memcpy(iv, dst + len - 16, 16);
} else {
}
else
{
aes_cbc_decrypt(ctx, iv, dst, len);
memcpy(iv, src + len - 16, 16);
}
@ -394,26 +397,27 @@ static int
bf_check_supported_key_len(void)
{
static const uint8 key[56] = {
0xf0,0xe1,0xd2,0xc3,0xb4,0xa5,0x96,0x87,0x78,0x69,
0x5a,0x4b,0x3c,0x2d,0x1e,0x0f,0x00,0x11,0x22,0x33,
0x44,0x55,0x66,0x77,0x04,0x68,0x91,0x04,0xc2,0xfd,
0x3b,0x2f,0x58,0x40,0x23,0x64,0x1a,0xba,0x61,0x76,
0x1f,0x1f,0x1f,0x1f,0x0e,0x0e,0x0e,0x0e,0xff,0xff,
0xff,0xff,0xff,0xff,0xff,0xff
0xf0, 0xe1, 0xd2, 0xc3, 0xb4, 0xa5, 0x96, 0x87, 0x78, 0x69,
0x5a, 0x4b, 0x3c, 0x2d, 0x1e, 0x0f, 0x00, 0x11, 0x22, 0x33,
0x44, 0x55, 0x66, 0x77, 0x04, 0x68, 0x91, 0x04, 0xc2, 0xfd,
0x3b, 0x2f, 0x58, 0x40, 0x23, 0x64, 0x1a, 0xba, 0x61, 0x76,
0x1f, 0x1f, 0x1f, 0x1f, 0x0e, 0x0e, 0x0e, 0x0e, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff
};
static const uint8 data[8] = {0xfe,0xdc,0xba,0x98,0x76,0x54,0x32,0x10};
static const uint8 res[8] = {0xc0,0x45,0x04,0x01,0x2e,0x4e,0x1f,0x53};
static const uint8 data[8] = {0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10};
static const uint8 res[8] = {0xc0, 0x45, 0x04, 0x01, 0x2e, 0x4e, 0x1f, 0x53};
static uint8 out[8];
BF_KEY bf_key;
BF_KEY bf_key;
/* encrypt with 448bits key and verify output */
BF_set_key(&bf_key, 56, key);
BF_ecb_encrypt(data, out, &bf_key, BF_ENCRYPT);
if (memcmp(out, res, 8) != 0)
return 0; /* Output does not match -> strong cipher is not supported */
if (memcmp(out, res, 8) != 0)
return 0; /* Output does not match -> strong cipher is
* not supported */
return 1;
}
@ -421,18 +425,19 @@ static int
bf_init(PX_Cipher * c, const uint8 *key, unsigned klen, const uint8 *iv)
{
ossldata *od = c->ptr;
static int bf_is_strong = -1;
static int bf_is_strong = -1;
/*
* Test if key len is supported. BF_set_key silently cut large keys and it could be
* be a problem when user transfer crypted data from one server to another.
* Test if key len is supported. BF_set_key silently cut large keys and it
* could be be a problem when user transfer crypted data from one server
* to another.
*/
if( bf_is_strong == -1)
if (bf_is_strong == -1)
bf_is_strong = bf_check_supported_key_len();
if( !bf_is_strong && klen>16 )
return PXE_KEY_TOO_BIG;
if (!bf_is_strong && klen > 16)
return PXE_KEY_TOO_BIG;
/* Key len is supported. We can use it. */
BF_set_key(&od->u.bf.key, klen, key);
@ -750,13 +755,14 @@ ossl_aes_init(PX_Cipher * c, const uint8 *key, unsigned klen, const uint8 *iv)
static int
ossl_aes_key_init(ossldata * od, int type)
{
int err;
int err;
/*
* Strong key support could be missing on some openssl installations.
* We must check return value from set key function.
*/
* Strong key support could be missing on some openssl installations. We
* must check return value from set key function.
*/
if (type == AES_ENCRYPT)
err = AES_set_encrypt_key(od->key, od->klen * 8, &od->u.aes_key);
err = AES_set_encrypt_key(od->key, od->klen * 8, &od->u.aes_key);
else
err = AES_set_decrypt_key(od->key, od->klen * 8, &od->u.aes_key);
@ -776,7 +782,7 @@ ossl_aes_ecb_encrypt(PX_Cipher * c, const uint8 *data, unsigned dlen,
unsigned bs = gen_ossl_block_size(c);
ossldata *od = c->ptr;
const uint8 *end = data + dlen - bs;
int err;
int err;
if (!od->init)
if ((err = ossl_aes_key_init(od, AES_ENCRYPT)) != 0)
@ -794,7 +800,7 @@ ossl_aes_ecb_decrypt(PX_Cipher * c, const uint8 *data, unsigned dlen,
unsigned bs = gen_ossl_block_size(c);
ossldata *od = c->ptr;
const uint8 *end = data + dlen - bs;
int err;
int err;
if (!od->init)
if ((err = ossl_aes_key_init(od, AES_DECRYPT)) != 0)
@ -810,12 +816,12 @@ ossl_aes_cbc_encrypt(PX_Cipher * c, const uint8 *data, unsigned dlen,
uint8 *res)
{
ossldata *od = c->ptr;
int err;
int err;
if (!od->init)
if ((err = ossl_aes_key_init(od, AES_ENCRYPT)) != 0)
return err;
AES_cbc_encrypt(data, res, dlen, &od->u.aes_key, od->iv, AES_ENCRYPT);
return 0;
}
@ -825,7 +831,7 @@ ossl_aes_cbc_decrypt(PX_Cipher * c, const uint8 *data, unsigned dlen,
uint8 *res)
{
ossldata *od = c->ptr;
int err;
int err;
if (!od->init)
if ((err = ossl_aes_key_init(od, AES_DECRYPT)) != 0)

View File

@ -26,7 +26,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $PostgreSQL: pgsql/contrib/pgcrypto/pgp-compress.c,v 1.6 2007/01/14 20:55:14 alvherre Exp $
* $PostgreSQL: pgsql/contrib/pgcrypto/pgp-compress.c,v 1.7 2007/11/15 21:14:31 momjian Exp $
*/
#include "postgres.h"
@ -312,7 +312,6 @@ pgp_decompress_filter(PullFilter ** res, PGP_Context * ctx, PullFilter * src)
{
return pullf_create(res, &decompress_filter, ctx, src);
}
#else /* !HAVE_ZLIB */
int

View File

@ -26,7 +26,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $PostgreSQL: pgsql/contrib/pgcrypto/px.c,v 1.16 2007/08/23 16:15:51 tgl Exp $
* $PostgreSQL: pgsql/contrib/pgcrypto/px.c,v 1.17 2007/11/15 21:14:31 momjian Exp $
*/
#include "postgres.h"
@ -286,7 +286,7 @@ combo_decrypt(PX_Combo * cx, const uint8 *data, unsigned dlen,
/* with padding, empty ciphertext is not allowed */
if (cx->padding)
return PXE_DECRYPT_FAILED;
/* without padding, report empty result */
*rlen = 0;
return 0;

View File

@ -33,7 +33,7 @@
*
* $From: sha2.c,v 1.1 2001/11/08 00:01:51 adg Exp adg $
*
* $PostgreSQL: pgsql/contrib/pgcrypto/sha2.c,v 1.9 2007/04/06 05:36:50 tgl Exp $
* $PostgreSQL: pgsql/contrib/pgcrypto/sha2.c,v 1.10 2007/11/15 21:14:31 momjian Exp $
*/
#include "postgres.h"
@ -78,7 +78,7 @@
(x) = ((tmp & 0xffff0000ffff0000ULL) >> 16) | \
((tmp & 0x0000ffff0000ffffULL) << 16); \
}
#endif /* not bigendian */
#endif /* not bigendian */
/*
* Macro for incrementally adding the unsigned 64-bit integer n to the

View File

@ -159,16 +159,17 @@ pgstatindex(PG_FUNCTION_ARGS)
else if (P_ISLEAF(opaque))
{
int max_avail;
max_avail = BLCKSZ - (BLCKSZ - ((PageHeader)page)->pd_special + SizeOfPageHeaderData);
int max_avail;
max_avail = BLCKSZ - (BLCKSZ - ((PageHeader) page)->pd_special + SizeOfPageHeaderData);
indexStat.max_avail += max_avail;
indexStat.free_space += PageGetFreeSpace(page);
indexStat.leaf_pages++;
/*
* If the next leaf is on an earlier block, it
* means a fragmentation.
* If the next leaf is on an earlier block, it means a
* fragmentation.
*/
if (opaque->btpo_next != P_NONE && opaque->btpo_next < blkno)
indexStat.fragments++;

View File

@ -552,8 +552,8 @@ crosstab(PG_FUNCTION_ARGS)
xpstrdup(values[0], rowid);
/*
* Check to see if the rowid is the same as that of the last
* tuple sent -- if so, skip this tuple entirely
* Check to see if the rowid is the same as that of the
* last tuple sent -- if so, skip this tuple entirely
*/
if (!firstpass && xstreq(lastrowid, rowid))
{
@ -563,8 +563,8 @@ crosstab(PG_FUNCTION_ARGS)
}
/*
* If rowid hasn't changed on us, continue building the
* ouput tuple.
* If rowid hasn't changed on us, continue building the ouput
* tuple.
*/
if (xstreq(rowid, values[0]))
{

View File

@ -6,7 +6,7 @@
* Copyright (c) 2007, PostgreSQL Global Development Group
*
* IDENTIFICATION
* $PostgreSQL: pgsql/contrib/test_parser/test_parser.c,v 1.1 2007/10/15 21:36:50 tgl Exp $
* $PostgreSQL: pgsql/contrib/test_parser/test_parser.c,v 1.2 2007/11/15 21:14:31 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -22,40 +22,44 @@ PG_MODULE_MAGIC;
*/
/* self-defined type */
typedef struct {
char * buffer; /* text to parse */
int len; /* length of the text in buffer */
int pos; /* position of the parser */
} ParserState;
typedef struct
{
char *buffer; /* text to parse */
int len; /* length of the text in buffer */
int pos; /* position of the parser */
} ParserState;
/* copy-paste from wparser.h of tsearch2 */
typedef struct {
int lexid;
char *alias;
char *descr;
} LexDescr;
typedef struct
{
int lexid;
char *alias;
char *descr;
} LexDescr;
/*
* prototypes
*/
PG_FUNCTION_INFO_V1(testprs_start);
Datum testprs_start(PG_FUNCTION_ARGS);
Datum testprs_start(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(testprs_getlexeme);
Datum testprs_getlexeme(PG_FUNCTION_ARGS);
Datum testprs_getlexeme(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(testprs_end);
Datum testprs_end(PG_FUNCTION_ARGS);
Datum testprs_end(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(testprs_lextype);
Datum testprs_lextype(PG_FUNCTION_ARGS);
Datum testprs_lextype(PG_FUNCTION_ARGS);
/*
* functions
*/
Datum testprs_start(PG_FUNCTION_ARGS)
Datum
testprs_start(PG_FUNCTION_ARGS)
{
ParserState *pst = (ParserState *) palloc0(sizeof(ParserState));
pst->buffer = (char *) PG_GETARG_POINTER(0);
pst->len = PG_GETARG_INT32(1);
pst->pos = 0;
@ -63,15 +67,16 @@ Datum testprs_start(PG_FUNCTION_ARGS)
PG_RETURN_POINTER(pst);
}
Datum testprs_getlexeme(PG_FUNCTION_ARGS)
Datum
testprs_getlexeme(PG_FUNCTION_ARGS)
{
ParserState *pst = (ParserState *) PG_GETARG_POINTER(0);
char **t = (char **) PG_GETARG_POINTER(1);
int *tlen = (int *) PG_GETARG_POINTER(2);
ParserState *pst = (ParserState *) PG_GETARG_POINTER(0);
char **t = (char **) PG_GETARG_POINTER(1);
int *tlen = (int *) PG_GETARG_POINTER(2);
int type;
*tlen = pst->pos;
*t = pst->buffer + pst->pos;
*t = pst->buffer + pst->pos;
if ((pst->buffer)[pst->pos] == ' ')
{
@ -81,7 +86,9 @@ Datum testprs_getlexeme(PG_FUNCTION_ARGS)
while ((pst->buffer)[pst->pos] == ' ' &&
pst->pos < pst->len)
(pst->pos)++;
} else {
}
else
{
/* word type */
type = 3;
/* go to the next white-space character */
@ -94,28 +101,29 @@ Datum testprs_getlexeme(PG_FUNCTION_ARGS)
/* we are finished if (*tlen == 0) */
if (*tlen == 0)
type=0;
type = 0;
PG_RETURN_INT32(type);
}
Datum testprs_end(PG_FUNCTION_ARGS)
Datum
testprs_end(PG_FUNCTION_ARGS)
{
ParserState *pst = (ParserState *) PG_GETARG_POINTER(0);
pfree(pst);
PG_RETURN_VOID();
}
Datum testprs_lextype(PG_FUNCTION_ARGS)
Datum
testprs_lextype(PG_FUNCTION_ARGS)
{
/*
* Remarks:
* - we have to return the blanks for headline reason
* - we use the same lexids like Teodor in the default
* word parser; in this way we can reuse the headline
* function of the default word parser.
* Remarks: - we have to return the blanks for headline reason - we use
* the same lexids like Teodor in the default word parser; in this way we
* can reuse the headline function of the default word parser.
*/
LexDescr *descr = (LexDescr *) palloc(sizeof(LexDescr) * (2+1));
LexDescr *descr = (LexDescr *) palloc(sizeof(LexDescr) * (2 + 1));
/* there are only two types in this parser */
descr[0].lexid = 3;

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/contrib/tsearch2/tsearch2.c,v 1.2 2007/11/13 22:14:50 tgl Exp $
* $PostgreSQL: pgsql/contrib/tsearch2/tsearch2.c,v 1.3 2007/11/15 21:14:31 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -24,8 +24,8 @@
PG_MODULE_MAGIC;
static Oid current_dictionary_oid = InvalidOid;
static Oid current_parser_oid = InvalidOid;
static Oid current_dictionary_oid = InvalidOid;
static Oid current_parser_oid = InvalidOid;
/* insert given value at argument position 0 */
#define INSERT_ARGUMENT0(argument, isnull) \
@ -65,27 +65,27 @@ static Oid current_parser_oid = InvalidOid;
} \
PG_FUNCTION_INFO_V1(name)
static Oid GetCurrentDict(void);
static Oid GetCurrentParser(void);
static Oid GetCurrentDict(void);
static Oid GetCurrentParser(void);
Datum tsa_lexize_byname(PG_FUNCTION_ARGS);
Datum tsa_lexize_bycurrent(PG_FUNCTION_ARGS);
Datum tsa_set_curdict(PG_FUNCTION_ARGS);
Datum tsa_set_curdict_byname(PG_FUNCTION_ARGS);
Datum tsa_token_type_current(PG_FUNCTION_ARGS);
Datum tsa_set_curprs(PG_FUNCTION_ARGS);
Datum tsa_set_curprs_byname(PG_FUNCTION_ARGS);
Datum tsa_parse_current(PG_FUNCTION_ARGS);
Datum tsa_set_curcfg(PG_FUNCTION_ARGS);
Datum tsa_set_curcfg_byname(PG_FUNCTION_ARGS);
Datum tsa_to_tsvector_name(PG_FUNCTION_ARGS);
Datum tsa_to_tsquery_name(PG_FUNCTION_ARGS);
Datum tsa_plainto_tsquery_name(PG_FUNCTION_ARGS);
Datum tsa_headline_byname(PG_FUNCTION_ARGS);
Datum tsa_ts_stat(PG_FUNCTION_ARGS);
Datum tsa_tsearch2(PG_FUNCTION_ARGS);
Datum tsa_rewrite_accum(PG_FUNCTION_ARGS);
Datum tsa_rewrite_finish(PG_FUNCTION_ARGS);
Datum tsa_lexize_byname(PG_FUNCTION_ARGS);
Datum tsa_lexize_bycurrent(PG_FUNCTION_ARGS);
Datum tsa_set_curdict(PG_FUNCTION_ARGS);
Datum tsa_set_curdict_byname(PG_FUNCTION_ARGS);
Datum tsa_token_type_current(PG_FUNCTION_ARGS);
Datum tsa_set_curprs(PG_FUNCTION_ARGS);
Datum tsa_set_curprs_byname(PG_FUNCTION_ARGS);
Datum tsa_parse_current(PG_FUNCTION_ARGS);
Datum tsa_set_curcfg(PG_FUNCTION_ARGS);
Datum tsa_set_curcfg_byname(PG_FUNCTION_ARGS);
Datum tsa_to_tsvector_name(PG_FUNCTION_ARGS);
Datum tsa_to_tsquery_name(PG_FUNCTION_ARGS);
Datum tsa_plainto_tsquery_name(PG_FUNCTION_ARGS);
Datum tsa_headline_byname(PG_FUNCTION_ARGS);
Datum tsa_ts_stat(PG_FUNCTION_ARGS);
Datum tsa_tsearch2(PG_FUNCTION_ARGS);
Datum tsa_rewrite_accum(PG_FUNCTION_ARGS);
Datum tsa_rewrite_finish(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(tsa_lexize_byname);
PG_FUNCTION_INFO_V1(tsa_lexize_bycurrent);
@ -150,11 +150,11 @@ UNSUPPORTED_FUNCTION(tsa_get_covers);
Datum
tsa_lexize_byname(PG_FUNCTION_ARGS)
{
text *dictname = PG_GETARG_TEXT_P(0);
Datum arg1 = PG_GETARG_DATUM(1);
text *dictname = PG_GETARG_TEXT_P(0);
Datum arg1 = PG_GETARG_DATUM(1);
return DirectFunctionCall2(ts_lexize,
ObjectIdGetDatum(TextGetObjectId(regdictionaryin, dictname)),
ObjectIdGetDatum(TextGetObjectId(regdictionaryin, dictname)),
arg1);
}
@ -162,8 +162,8 @@ tsa_lexize_byname(PG_FUNCTION_ARGS)
Datum
tsa_lexize_bycurrent(PG_FUNCTION_ARGS)
{
Datum arg0 = PG_GETARG_DATUM(0);
Oid id = GetCurrentDict();
Datum arg0 = PG_GETARG_DATUM(0);
Oid id = GetCurrentDict();
return DirectFunctionCall2(ts_lexize,
ObjectIdGetDatum(id),
@ -174,7 +174,7 @@ tsa_lexize_bycurrent(PG_FUNCTION_ARGS)
Datum
tsa_set_curdict(PG_FUNCTION_ARGS)
{
Oid dict_oid = PG_GETARG_OID(0);
Oid dict_oid = PG_GETARG_OID(0);
if (!SearchSysCacheExists(TSDICTOID,
ObjectIdGetDatum(dict_oid),
@ -191,8 +191,8 @@ tsa_set_curdict(PG_FUNCTION_ARGS)
Datum
tsa_set_curdict_byname(PG_FUNCTION_ARGS)
{
text *name = PG_GETARG_TEXT_P(0);
Oid dict_oid;
text *name = PG_GETARG_TEXT_P(0);
Oid dict_oid;
dict_oid = TSDictionaryGetDictid(stringToQualifiedNameList(TextPGetCString(name)), false);
@ -213,7 +213,7 @@ tsa_token_type_current(PG_FUNCTION_ARGS)
Datum
tsa_set_curprs(PG_FUNCTION_ARGS)
{
Oid parser_oid = PG_GETARG_OID(0);
Oid parser_oid = PG_GETARG_OID(0);
if (!SearchSysCacheExists(TSPARSEROID,
ObjectIdGetDatum(parser_oid),
@ -230,8 +230,8 @@ tsa_set_curprs(PG_FUNCTION_ARGS)
Datum
tsa_set_curprs_byname(PG_FUNCTION_ARGS)
{
text *name = PG_GETARG_TEXT_P(0);
Oid parser_oid;
text *name = PG_GETARG_TEXT_P(0);
Oid parser_oid;
parser_oid = TSParserGetPrsid(stringToQualifiedNameList(TextPGetCString(name)), false);
@ -252,12 +252,12 @@ tsa_parse_current(PG_FUNCTION_ARGS)
Datum
tsa_set_curcfg(PG_FUNCTION_ARGS)
{
Oid arg0 = PG_GETARG_OID(0);
char *name;
Oid arg0 = PG_GETARG_OID(0);
char *name;
name = DatumGetCString(DirectFunctionCall1(regconfigout,
ObjectIdGetDatum(arg0)));
set_config_option("default_text_search_config", name,
PGC_USERSET,
PGC_S_SESSION,
@ -271,8 +271,8 @@ tsa_set_curcfg(PG_FUNCTION_ARGS)
Datum
tsa_set_curcfg_byname(PG_FUNCTION_ARGS)
{
text *arg0 = PG_GETARG_TEXT_P(0);
char *name;
text *arg0 = PG_GETARG_TEXT_P(0);
char *name;
name = TextPGetCString(arg0);
@ -289,9 +289,9 @@ tsa_set_curcfg_byname(PG_FUNCTION_ARGS)
Datum
tsa_to_tsvector_name(PG_FUNCTION_ARGS)
{
text *cfgname = PG_GETARG_TEXT_P(0);
Datum arg1 = PG_GETARG_DATUM(1);
Oid config_oid;
text *cfgname = PG_GETARG_TEXT_P(0);
Datum arg1 = PG_GETARG_DATUM(1);
Oid config_oid;
config_oid = TextGetObjectId(regconfigin, cfgname);
@ -303,9 +303,9 @@ tsa_to_tsvector_name(PG_FUNCTION_ARGS)
Datum
tsa_to_tsquery_name(PG_FUNCTION_ARGS)
{
text *cfgname = PG_GETARG_TEXT_P(0);
Datum arg1 = PG_GETARG_DATUM(1);
Oid config_oid;
text *cfgname = PG_GETARG_TEXT_P(0);
Datum arg1 = PG_GETARG_DATUM(1);
Oid config_oid;
config_oid = TextGetObjectId(regconfigin, cfgname);
@ -318,9 +318,9 @@ tsa_to_tsquery_name(PG_FUNCTION_ARGS)
Datum
tsa_plainto_tsquery_name(PG_FUNCTION_ARGS)
{
text *cfgname = PG_GETARG_TEXT_P(0);
Datum arg1 = PG_GETARG_DATUM(1);
Oid config_oid;
text *cfgname = PG_GETARG_TEXT_P(0);
Datum arg1 = PG_GETARG_DATUM(1);
Oid config_oid;
config_oid = TextGetObjectId(regconfigin, cfgname);
@ -332,22 +332,22 @@ tsa_plainto_tsquery_name(PG_FUNCTION_ARGS)
Datum
tsa_headline_byname(PG_FUNCTION_ARGS)
{
Datum arg0 = PG_GETARG_DATUM(0);
Datum arg1 = PG_GETARG_DATUM(1);
Datum arg2 = PG_GETARG_DATUM(2);
Datum result;
Oid config_oid;
Datum arg0 = PG_GETARG_DATUM(0);
Datum arg1 = PG_GETARG_DATUM(1);
Datum arg2 = PG_GETARG_DATUM(2);
Datum result;
Oid config_oid;
/* first parameter has to be converted to oid */
config_oid = DatumGetObjectId(DirectFunctionCall1(regconfigin,
DirectFunctionCall1(textout, arg0)));
DirectFunctionCall1(textout, arg0)));
if (PG_NARGS() == 3)
result = DirectFunctionCall3(ts_headline_byid,
ObjectIdGetDatum(config_oid), arg1, arg2);
ObjectIdGetDatum(config_oid), arg1, arg2);
else
{
Datum arg3 = PG_GETARG_DATUM(3);
Datum arg3 = PG_GETARG_DATUM(3);
result = DirectFunctionCall4(ts_headline_byid_opt,
ObjectIdGetDatum(config_oid),
@ -371,11 +371,11 @@ tsa_tsearch2(PG_FUNCTION_ARGS)
{
TriggerData *trigdata;
Trigger *trigger;
char **tgargs;
char **tgargs;
int i;
/* Check call context */
if (!CALLED_AS_TRIGGER(fcinfo)) /* internal error */
if (!CALLED_AS_TRIGGER(fcinfo)) /* internal error */
elog(ERROR, "tsvector_update_trigger: not fired by trigger manager");
trigdata = (TriggerData *) fcinfo->context;
@ -388,7 +388,7 @@ tsa_tsearch2(PG_FUNCTION_ARGS)
tgargs = (char **) palloc((trigger->tgnargs + 1) * sizeof(char *));
tgargs[0] = trigger->tgargs[0];
for (i = 1; i < trigger->tgnargs; i++)
tgargs[i+1] = trigger->tgargs[i];
tgargs[i + 1] = trigger->tgargs[i];
tgargs[1] = pstrdup(GetConfigOptionByName("default_text_search_config",
NULL));

View File

@ -4,7 +4,7 @@
*
* Copyright (c) 2007 PostgreSQL Global Development Group
*
* $PostgreSQL: pgsql/contrib/uuid-ossp/uuid-ossp.c,v 1.3 2007/10/23 21:38:16 tgl Exp $
* $PostgreSQL: pgsql/contrib/uuid-ossp/uuid-ossp.c,v 1.4 2007/11/15 21:14:31 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -39,17 +39,17 @@
PG_MODULE_MAGIC;
Datum uuid_nil(PG_FUNCTION_ARGS);
Datum uuid_ns_dns(PG_FUNCTION_ARGS);
Datum uuid_ns_url(PG_FUNCTION_ARGS);
Datum uuid_ns_oid(PG_FUNCTION_ARGS);
Datum uuid_ns_x500(PG_FUNCTION_ARGS);
Datum uuid_nil(PG_FUNCTION_ARGS);
Datum uuid_ns_dns(PG_FUNCTION_ARGS);
Datum uuid_ns_url(PG_FUNCTION_ARGS);
Datum uuid_ns_oid(PG_FUNCTION_ARGS);
Datum uuid_ns_x500(PG_FUNCTION_ARGS);
Datum uuid_generate_v1(PG_FUNCTION_ARGS);
Datum uuid_generate_v1mc(PG_FUNCTION_ARGS);
Datum uuid_generate_v3(PG_FUNCTION_ARGS);
Datum uuid_generate_v4(PG_FUNCTION_ARGS);
Datum uuid_generate_v5(PG_FUNCTION_ARGS);
Datum uuid_generate_v1(PG_FUNCTION_ARGS);
Datum uuid_generate_v1mc(PG_FUNCTION_ARGS);
Datum uuid_generate_v3(PG_FUNCTION_ARGS);
Datum uuid_generate_v4(PG_FUNCTION_ARGS);
Datum uuid_generate_v5(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(uuid_nil);
@ -66,11 +66,11 @@ PG_FUNCTION_INFO_V1(uuid_generate_v5);
static char *
uuid_to_string(const uuid_t *uuid)
uuid_to_string(const uuid_t * uuid)
{
char *buf = palloc(UUID_LEN_STR + 1);
void *ptr = buf;
size_t len = UUID_LEN_STR + 1;
char *buf = palloc(UUID_LEN_STR + 1);
void *ptr = buf;
size_t len = UUID_LEN_STR + 1;
uuid_export(uuid, UUID_FMT_STR, &ptr, &len);
@ -79,7 +79,7 @@ uuid_to_string(const uuid_t *uuid)
static void
string_to_uuid(const char *str, uuid_t *uuid)
string_to_uuid(const char *str, uuid_t * uuid)
{
uuid_import(uuid, UUID_FMT_STR, str, UUID_LEN_STR + 1);
}
@ -88,8 +88,8 @@ string_to_uuid(const char *str, uuid_t *uuid)
static Datum
special_uuid_value(const char *name)
{
uuid_t *uuid;
char *str;
uuid_t *uuid;
char *str;
uuid_create(&uuid);
uuid_load(uuid, name);
@ -136,10 +136,10 @@ uuid_ns_x500(PG_FUNCTION_ARGS)
static Datum
uuid_generate_internal(int mode, const uuid_t *ns, const char *name)
uuid_generate_internal(int mode, const uuid_t * ns, const char *name)
{
uuid_t *uuid;
char *str;
uuid_t *uuid;
char *str;
uuid_create(&uuid);
uuid_make(uuid, mode, ns, name);
@ -165,7 +165,7 @@ uuid_generate_v1mc(PG_FUNCTION_ARGS)
static Datum
uuid_generate_v35_internal(int mode, pg_uuid_t *ns, text *name)
uuid_generate_v35_internal(int mode, pg_uuid_t * ns, text *name)
{
uuid_t *ns_uuid;
Datum result;
@ -176,7 +176,7 @@ uuid_generate_v35_internal(int mode, pg_uuid_t *ns, text *name)
result = uuid_generate_internal(mode,
ns_uuid,
DatumGetCString(DirectFunctionCall1(textout, PointerGetDatum(name))));
DatumGetCString(DirectFunctionCall1(textout, PointerGetDatum(name))));
uuid_destroy(ns_uuid);

View File

@ -28,7 +28,7 @@
* without explicitly invoking the toaster.
*
* This change will break any code that assumes it needn't detoast values
* that have been put into a tuple but never sent to disk. Hopefully there
* that have been put into a tuple but never sent to disk. Hopefully there
* are few such places.
*
* Varlenas still have alignment 'i' (or 'd') in pg_type/pg_attribute, since
@ -57,7 +57,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/common/heaptuple.c,v 1.118 2007/11/07 12:24:23 petere Exp $
* $PostgreSQL: pgsql/src/backend/access/common/heaptuple.c,v 1.119 2007/11/15 21:14:31 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -99,19 +99,19 @@ heap_compute_data_size(TupleDesc tupleDesc,
for (i = 0; i < numberOfAttributes; i++)
{
Datum val;
Datum val;
if (isnull[i])
continue;
val = values[i];
if (ATT_IS_PACKABLE(att[i]) &&
if (ATT_IS_PACKABLE(att[i]) &&
VARATT_CAN_MAKE_SHORT(DatumGetPointer(val)))
{
/*
* we're anticipating converting to a short varlena header,
* so adjust length and don't count any alignment
* we're anticipating converting to a short varlena header, so
* adjust length and don't count any alignment
*/
data_length += VARATT_CONVERTED_SHORT_SIZE(DatumGetPointer(val));
}
@ -147,19 +147,19 @@ ComputeDataSize(TupleDesc tupleDesc,
for (i = 0; i < numberOfAttributes; i++)
{
Datum val;
Datum val;
if (nulls[i] != ' ')
continue;
val = values[i];
if (ATT_IS_PACKABLE(att[i]) &&
if (ATT_IS_PACKABLE(att[i]) &&
VARATT_CAN_MAKE_SHORT(DatumGetPointer(val)))
{
/*
* we're anticipating converting to a short varlena header,
* so adjust length and don't count any alignment
* we're anticipating converting to a short varlena header, so
* adjust length and don't count any alignment
*/
data_length += VARATT_CONVERTED_SHORT_SIZE(DatumGetPointer(val));
}
@ -195,6 +195,7 @@ heap_fill_tuple(TupleDesc tupleDesc,
int i;
int numberOfAttributes = tupleDesc->natts;
Form_pg_attribute *att = tupleDesc->attrs;
#ifdef USE_ASSERT_CHECKING
char *start = data;
#endif
@ -238,8 +239,8 @@ heap_fill_tuple(TupleDesc tupleDesc,
}
/*
* XXX we use the att_align macros on the pointer value itself,
* not on an offset. This is a bit of a hack.
* XXX we use the att_align macros on the pointer value itself, not on
* an offset. This is a bit of a hack.
*/
if (att[i]->attbyval)
@ -327,6 +328,7 @@ DataFill(TupleDesc tupleDesc,
int i;
int numberOfAttributes = tupleDesc->natts;
Form_pg_attribute *att = tupleDesc->attrs;
#ifdef USE_ASSERT_CHECKING
char *start = data;
#endif
@ -370,8 +372,8 @@ DataFill(TupleDesc tupleDesc,
}
/*
* XXX we use the att_align macros on the pointer value itself,
* not on an offset. This is a bit of a hack.
* XXX we use the att_align macros on the pointer value itself, not on
* an offset. This is a bit of a hack.
*/
if (att[i]->attbyval)
@ -611,8 +613,8 @@ nocachegetattr(HeapTuple tuple,
/*
* Otherwise, check for non-fixed-length attrs up to and including
* target. If there aren't any, it's safe to cheaply initialize
* the cached offsets for these attrs.
* target. If there aren't any, it's safe to cheaply initialize the
* cached offsets for these attrs.
*/
if (HeapTupleHasVarWidth(tuple))
{
@ -673,8 +675,8 @@ nocachegetattr(HeapTuple tuple,
int i;
/*
* Now we know that we have to walk the tuple CAREFULLY. But we
* still might be able to cache some offsets for next time.
* Now we know that we have to walk the tuple CAREFULLY. But we still
* might be able to cache some offsets for next time.
*
* Note - This loop is a little tricky. For each non-null attribute,
* we have to first account for alignment padding before the attr,
@ -683,12 +685,12 @@ nocachegetattr(HeapTuple tuple,
* attcacheoff until we reach either a null or a var-width attribute.
*/
off = 0;
for (i = 0; ; i++) /* loop exit is at "break" */
for (i = 0;; i++) /* loop exit is at "break" */
{
if (HeapTupleHasNulls(tuple) && att_isnull(i, bp))
{
usecache = false;
continue; /* this cannot be the target att */
continue; /* this cannot be the target att */
}
/* If we know the next offset, we can skip the rest */
@ -697,10 +699,10 @@ nocachegetattr(HeapTuple tuple,
else if (att[i]->attlen == -1)
{
/*
* We can only cache the offset for a varlena attribute
* if the offset is already suitably aligned, so that there
* would be no pad bytes in any case: then the offset will
* be valid for either an aligned or unaligned value.
* We can only cache the offset for a varlena attribute if the
* offset is already suitably aligned, so that there would be
* no pad bytes in any case: then the offset will be valid for
* either an aligned or unaligned value.
*/
if (usecache &&
off == att_align_nominal(off, att[i]->attalign))
@ -771,11 +773,12 @@ heap_getsysattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
break;
case MinCommandIdAttributeNumber:
case MaxCommandIdAttributeNumber:
/*
* cmin and cmax are now both aliases for the same field,
* which can in fact also be a combo command id. XXX perhaps we
* should return the "real" cmin or cmax if possible, that is
* if we are inside the originating transaction?
* cmin and cmax are now both aliases for the same field, which
* can in fact also be a combo command id. XXX perhaps we should
* return the "real" cmin or cmax if possible, that is if we are
* inside the originating transaction?
*/
result = CommandIdGetDatum(HeapTupleHeaderGetRawCommandId(tup->t_data));
break;
@ -855,7 +858,8 @@ heap_form_tuple(TupleDesc tupleDescriptor,
{
HeapTuple tuple; /* return tuple */
HeapTupleHeader td; /* tuple data */
Size len, data_len;
Size len,
data_len;
int hoff;
bool hasnull = false;
Form_pg_attribute *att = tupleDescriptor->attrs;
@ -965,7 +969,8 @@ heap_formtuple(TupleDesc tupleDescriptor,
{
HeapTuple tuple; /* return tuple */
HeapTupleHeader td; /* tuple data */
Size len, data_len;
Size len,
data_len;
int hoff;
bool hasnull = false;
Form_pg_attribute *att = tupleDescriptor->attrs;
@ -1263,10 +1268,10 @@ heap_deform_tuple(HeapTuple tuple, TupleDesc tupleDesc,
else if (thisatt->attlen == -1)
{
/*
* We can only cache the offset for a varlena attribute
* if the offset is already suitably aligned, so that there
* would be no pad bytes in any case: then the offset will
* be valid for either an aligned or unaligned value.
* We can only cache the offset for a varlena attribute if the
* offset is already suitably aligned, so that there would be no
* pad bytes in any case: then the offset will be valid for either
* an aligned or unaligned value.
*/
if (!slow &&
off == att_align_nominal(off, thisatt->attalign))
@ -1375,10 +1380,10 @@ heap_deformtuple(HeapTuple tuple,
else if (thisatt->attlen == -1)
{
/*
* We can only cache the offset for a varlena attribute
* if the offset is already suitably aligned, so that there
* would be no pad bytes in any case: then the offset will
* be valid for either an aligned or unaligned value.
* We can only cache the offset for a varlena attribute if the
* offset is already suitably aligned, so that there would be no
* pad bytes in any case: then the offset will be valid for either
* an aligned or unaligned value.
*/
if (!slow &&
off == att_align_nominal(off, thisatt->attalign))
@ -1484,10 +1489,10 @@ slot_deform_tuple(TupleTableSlot *slot, int natts)
else if (thisatt->attlen == -1)
{
/*
* We can only cache the offset for a varlena attribute
* if the offset is already suitably aligned, so that there
* would be no pad bytes in any case: then the offset will
* be valid for either an aligned or unaligned value.
* We can only cache the offset for a varlena attribute if the
* offset is already suitably aligned, so that there would be no
* pad bytes in any case: then the offset will be valid for either
* an aligned or unaligned value.
*/
if (!slow &&
off == att_align_nominal(off, thisatt->attalign))
@ -1791,7 +1796,8 @@ heap_form_minimal_tuple(TupleDesc tupleDescriptor,
bool *isnull)
{
MinimalTuple tuple; /* return tuple */
Size len, data_len;
Size len,
data_len;
int hoff;
bool hasnull = false;
Form_pg_attribute *att = tupleDescriptor->attrs;

View File

@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/common/indextuple.c,v 1.83 2007/11/07 12:24:24 petere Exp $
* $PostgreSQL: pgsql/src/backend/access/common/indextuple.c,v 1.84 2007/11/15 21:14:31 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -77,7 +77,7 @@ index_form_tuple(TupleDesc tupleDescriptor,
{
untoasted_values[i] =
PointerGetDatum(heap_tuple_fetch_attr((struct varlena *)
DatumGetPointer(values[i])));
DatumGetPointer(values[i])));
untoasted_free[i] = true;
}
@ -309,8 +309,8 @@ nocache_index_getattr(IndexTuple tup,
/*
* Otherwise, check for non-fixed-length attrs up to and including
* target. If there aren't any, it's safe to cheaply initialize
* the cached offsets for these attrs.
* target. If there aren't any, it's safe to cheaply initialize the
* cached offsets for these attrs.
*/
if (IndexTupleHasVarwidths(tup))
{
@ -371,8 +371,8 @@ nocache_index_getattr(IndexTuple tup,
int i;
/*
* Now we know that we have to walk the tuple CAREFULLY. But we
* still might be able to cache some offsets for next time.
* Now we know that we have to walk the tuple CAREFULLY. But we still
* might be able to cache some offsets for next time.
*
* Note - This loop is a little tricky. For each non-null attribute,
* we have to first account for alignment padding before the attr,
@ -381,12 +381,12 @@ nocache_index_getattr(IndexTuple tup,
* attcacheoff until we reach either a null or a var-width attribute.
*/
off = 0;
for (i = 0; ; i++) /* loop exit is at "break" */
for (i = 0;; i++) /* loop exit is at "break" */
{
if (IndexTupleHasNulls(tup) && att_isnull(i, bp))
{
usecache = false;
continue; /* this cannot be the target att */
continue; /* this cannot be the target att */
}
/* If we know the next offset, we can skip the rest */
@ -395,10 +395,10 @@ nocache_index_getattr(IndexTuple tup,
else if (att[i]->attlen == -1)
{
/*
* We can only cache the offset for a varlena attribute
* if the offset is already suitably aligned, so that there
* would be no pad bytes in any case: then the offset will
* be valid for either an aligned or unaligned value.
* We can only cache the offset for a varlena attribute if the
* offset is already suitably aligned, so that there would be
* no pad bytes in any case: then the offset will be valid for
* either an aligned or unaligned value.
*/
if (usecache &&
off == att_align_nominal(off, att[i]->attalign))

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/common/reloptions.c,v 1.5 2007/06/03 22:16:02 petere Exp $
* $PostgreSQL: pgsql/src/backend/access/common/reloptions.c,v 1.6 2007/11/15 21:14:31 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -206,8 +206,8 @@ parseRelOptions(Datum options, int numkeywords, const char *const * keywords,
if (values[j] && validate)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("parameter \"%s\" specified more than once",
keywords[j])));
errmsg("parameter \"%s\" specified more than once",
keywords[j])));
value_len = text_len - kw_len - 1;
value = (char *) palloc(value_len + 1);
memcpy(value, text_str + kw_len + 1, value_len);

View File

@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/gin/ginarrayproc.c,v 1.10 2007/08/21 01:11:12 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/gin/ginarrayproc.c,v 1.11 2007/11/15 21:14:31 momjian Exp $
*-------------------------------------------------------------------------
*/
#include "postgres.h"
@ -60,17 +60,18 @@ ginarrayextract(PG_FUNCTION_ARGS)
elmlen, elmbyval, elmalign,
&entries, NULL, (int *) nentries);
if ( *nentries == 0 && PG_NARGS() == 3 )
if (*nentries == 0 && PG_NARGS() == 3)
{
switch( PG_GETARG_UINT16(2) ) /* StrategyNumber */
switch (PG_GETARG_UINT16(2)) /* StrategyNumber */
{
case GinOverlapStrategy:
*nentries = -1; /* nobody can be found */
break;
*nentries = -1; /* nobody can be found */
break;
case GinContainsStrategy:
case GinContainedStrategy:
case GinEqualStrategy:
default: /* require fullscan: GIN can't find void arrays */
default: /* require fullscan: GIN can't find void
* arrays */
break;
}
}

View File

@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/gin/ginbtree.c,v 1.9 2007/06/05 12:47:49 teodor Exp $
* $PostgreSQL: pgsql/src/backend/access/gin/ginbtree.c,v 1.10 2007/11/15 21:14:31 momjian Exp $
*-------------------------------------------------------------------------
*/
@ -317,8 +317,8 @@ ginInsertValue(GinBtree btree, GinBtreeStack *stack)
Page newlpage;
/*
* newlpage is a pointer to memory page, it doesn't associate
* with buffer, stack->buffer should be untouched
* newlpage is a pointer to memory page, it doesn't associate with
* buffer, stack->buffer should be untouched
*/
newlpage = btree->splitPage(btree, stack->buffer, rbuffer, stack->off, &rdata);

View File

@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/gin/gindatapage.c,v 1.7 2007/06/04 15:56:28 teodor Exp $
* $PostgreSQL: pgsql/src/backend/access/gin/gindatapage.c,v 1.8 2007/11/15 21:14:31 momjian Exp $
*-------------------------------------------------------------------------
*/
@ -358,7 +358,7 @@ dataPlaceToPage(GinBtree btree, Buffer buf, OffsetNumber off, XLogRecData **prda
static XLogRecData rdata[3];
int sizeofitem = GinSizeOfItem(page);
static ginxlogInsert data;
int cnt=0;
int cnt = 0;
*prdata = rdata;
Assert(GinPageIsData(page));
@ -373,14 +373,14 @@ dataPlaceToPage(GinBtree btree, Buffer buf, OffsetNumber off, XLogRecData **prda
data.isData = TRUE;
data.isLeaf = GinPageIsLeaf(page) ? TRUE : FALSE;
/*
* Prevent full page write if child's split occurs. That is needed
* to remove incomplete splits while replaying WAL
*
* data.updateBlkno contains new block number (of newly created right page)
* for recently splited page.
/*
* Prevent full page write if child's split occurs. That is needed to
* remove incomplete splits while replaying WAL
*
* data.updateBlkno contains new block number (of newly created right
* page) for recently splited page.
*/
if ( data.updateBlkno == InvalidBlockNumber )
if (data.updateBlkno == InvalidBlockNumber)
{
rdata[0].buffer = buf;
rdata[0].buffer_std = FALSE;
@ -393,7 +393,7 @@ dataPlaceToPage(GinBtree btree, Buffer buf, OffsetNumber off, XLogRecData **prda
rdata[cnt].buffer = InvalidBuffer;
rdata[cnt].data = (char *) &data;
rdata[cnt].len = sizeof(ginxlogInsert);
rdata[cnt].next = &rdata[cnt+1];
rdata[cnt].next = &rdata[cnt + 1];
cnt++;
rdata[cnt].buffer = InvalidBuffer;

View File

@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/gin/ginentrypage.c,v 1.10 2007/10/29 13:49:21 teodor Exp $
* $PostgreSQL: pgsql/src/backend/access/gin/ginentrypage.c,v 1.11 2007/11/15 21:14:31 momjian Exp $
*-------------------------------------------------------------------------
*/
@ -354,7 +354,7 @@ entryPlaceToPage(GinBtree btree, Buffer buf, OffsetNumber off, XLogRecData **prd
static XLogRecData rdata[3];
OffsetNumber placed;
static ginxlogInsert data;
int cnt=0;
int cnt = 0;
*prdata = rdata;
data.updateBlkno = entryPreparePage(btree, page, off);
@ -372,14 +372,14 @@ entryPlaceToPage(GinBtree btree, Buffer buf, OffsetNumber off, XLogRecData **prd
data.isData = false;
data.isLeaf = GinPageIsLeaf(page) ? TRUE : FALSE;
/*
* Prevent full page write if child's split occurs. That is needed
* to remove incomplete splits while replaying WAL
/*
* Prevent full page write if child's split occurs. That is needed to
* remove incomplete splits while replaying WAL
*
* data.updateBlkno contains new block number (of newly created right page)
* for recently splited page.
* data.updateBlkno contains new block number (of newly created right
* page) for recently splited page.
*/
if ( data.updateBlkno == InvalidBlockNumber )
if (data.updateBlkno == InvalidBlockNumber)
{
rdata[0].buffer = buf;
rdata[0].buffer_std = TRUE;
@ -392,7 +392,7 @@ entryPlaceToPage(GinBtree btree, Buffer buf, OffsetNumber off, XLogRecData **prd
rdata[cnt].buffer = InvalidBuffer;
rdata[cnt].data = (char *) &data;
rdata[cnt].len = sizeof(ginxlogInsert);
rdata[cnt].next = &rdata[cnt+1];
rdata[cnt].next = &rdata[cnt + 1];
cnt++;
rdata[cnt].buffer = InvalidBuffer;

View File

@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/gin/ginget.c,v 1.8 2007/06/04 15:56:28 teodor Exp $
* $PostgreSQL: pgsql/src/backend/access/gin/ginget.c,v 1.9 2007/11/15 21:14:31 momjian Exp $
*-------------------------------------------------------------------------
*/
@ -23,29 +23,29 @@ findItemInPage(Page page, ItemPointer item, OffsetNumber *off)
OffsetNumber maxoff = GinPageGetOpaque(page)->maxoff;
int res;
if ( GinPageGetOpaque(page)->flags & GIN_DELETED )
if (GinPageGetOpaque(page)->flags & GIN_DELETED)
/* page was deleted by concurrent vacuum */
return false;
if ( *off > maxoff || *off == InvalidOffsetNumber )
if (*off > maxoff || *off == InvalidOffsetNumber)
res = -1;
else
res = compareItemPointers(item, (ItemPointer) GinDataPageGetItem(page, *off));
if ( res == 0 )
if (res == 0)
{
/* page isn't changed */
return true;
}
else if ( res > 0 )
return true;
}
else if (res > 0)
{
/*
* some items was added before our position, look further to find
* it or first greater
/*
* some items was added before our position, look further to find it
* or first greater
*/
(*off)++;
for (; *off <= maxoff; (*off)++)
for (; *off <= maxoff; (*off)++)
{
res = compareItemPointers(item, (ItemPointer) GinDataPageGetItem(page, *off));
@ -53,7 +53,7 @@ findItemInPage(Page page, ItemPointer item, OffsetNumber *off)
return true;
if (res < 0)
{
{
(*off)--;
return true;
}
@ -61,20 +61,20 @@ findItemInPage(Page page, ItemPointer item, OffsetNumber *off)
}
else
{
/*
* some items was deleted before our position, look from begining
* to find it or first greater
/*
* some items was deleted before our position, look from begining to
* find it or first greater
*/
for(*off = FirstOffsetNumber; *off<= maxoff; (*off)++)
for (*off = FirstOffsetNumber; *off <= maxoff; (*off)++)
{
res = compareItemPointers(item, (ItemPointer) GinDataPageGetItem(page, *off));
if ( res == 0 )
if (res == 0)
return true;
if (res < 0)
{
{
(*off)--;
return true;
}
@ -174,7 +174,7 @@ startScanEntry(Relation index, GinState *ginstate, GinScanEntry entry, bool firs
page = BufferGetPage(entry->buffer);
/* try to find curItem in current buffer */
if ( findItemInPage(page, &entry->curItem, &entry->offset) )
if (findItemInPage(page, &entry->curItem, &entry->offset))
return;
/* walk to right */
@ -186,13 +186,13 @@ startScanEntry(Relation index, GinState *ginstate, GinScanEntry entry, bool firs
page = BufferGetPage(entry->buffer);
entry->offset = InvalidOffsetNumber;
if ( findItemInPage(page, &entry->curItem, &entry->offset) )
if (findItemInPage(page, &entry->curItem, &entry->offset))
return;
}
/*
* curItem and any greated items was deleted by concurrent vacuum,
* so we finished scan with currrent entry
* curItem and any greated items was deleted by concurrent vacuum, so
* we finished scan with currrent entry
*/
}
}
@ -221,10 +221,10 @@ startScanKey(Relation index, GinState *ginstate, GinScanKey key)
if (GinFuzzySearchLimit > 0)
{
/*
* If all of keys more than threshold we will try to reduce result,
* we hope (and only hope, for intersection operation of array our
* supposition isn't true), that total result will not more than
* minimal predictNumberResult.
* If all of keys more than threshold we will try to reduce
* result, we hope (and only hope, for intersection operation of
* array our supposition isn't true), that total result will not
* more than minimal predictNumberResult.
*/
for (i = 0; i < key->nentries; i++)

View File

@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/gin/ginscan.c,v 1.10 2007/05/27 03:50:38 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/gin/ginscan.c,v 1.11 2007/11/15 21:14:31 momjian Exp $
*-------------------------------------------------------------------------
*/
@ -164,13 +164,13 @@ newScanKey(IndexScanDesc scan)
UInt16GetDatum(scankey[i].sk_strategy)
)
);
if ( nEntryValues < 0 )
if (nEntryValues < 0)
{
/*
* extractQueryFn signals that nothing will be found,
* so we can just set isVoidRes flag...
* extractQueryFn signals that nothing will be found, so we can
* just set isVoidRes flag...
*/
so->isVoidRes = true;
so->isVoidRes = true;
break;
}
if (entryValues == NULL || nEntryValues == 0)
@ -187,7 +187,7 @@ newScanKey(IndexScanDesc scan)
if (so->nkeys == 0 && !so->isVoidRes)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("GIN index does not support search with void query")));
errmsg("GIN index does not support search with void query")));
pgstat_count_index_scan(scan->indexRelation);
}

View File

@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/gin/ginutil.c,v 1.10 2007/01/31 15:09:45 teodor Exp $
* $PostgreSQL: pgsql/src/backend/access/gin/ginutil.c,v 1.11 2007/11/15 21:14:31 momjian Exp $
*-------------------------------------------------------------------------
*/
@ -126,17 +126,17 @@ compareEntries(GinState *ginstate, Datum a, Datum b)
&ginstate->compareFn,
a, b
)
);
);
}
typedef struct
{
FmgrInfo *cmpDatumFunc;
bool *needUnique;
} cmpEntriesData;
} cmpEntriesData;
static int
cmpEntries(const Datum *a, const Datum *b, cmpEntriesData *arg)
cmpEntries(const Datum *a, const Datum *b, cmpEntriesData * arg)
{
int res = DatumGetInt32(FunctionCall2(arg->cmpDatumFunc,
*a, *b));

View File

@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/gin/ginvacuum.c,v 1.17 2007/09/20 17:56:30 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/gin/ginvacuum.c,v 1.18 2007/11/15 21:14:31 momjian Exp $
*-------------------------------------------------------------------------
*/
@ -28,7 +28,7 @@ typedef struct
IndexBulkDeleteCallback callback;
void *callback_state;
GinState ginstate;
BufferAccessStrategy strategy;
BufferAccessStrategy strategy;
} GinVacuumState;
@ -160,14 +160,14 @@ ginVacuumPostingTreeLeaves(GinVacuumState *gvs, BlockNumber blkno, bool isRoot,
/*
* We should be sure that we don't concurrent with inserts, insert process
* never release root page until end (but it can unlock it and lock
* again). New scan can't start but previously started
* ones work concurrently.
* again). New scan can't start but previously started ones work
* concurrently.
*/
if ( isRoot )
if (isRoot)
LockBufferForCleanup(buffer);
else
LockBuffer(buffer, GIN_EXCLUSIVE);
LockBuffer(buffer, GIN_EXCLUSIVE);
Assert(GinPageIsData(page));
@ -240,8 +240,8 @@ ginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkn
BlockNumber parentBlkno, OffsetNumber myoff, bool isParentRoot)
{
Buffer dBuffer = ReadBufferWithStrategy(gvs->index, deleteBlkno, gvs->strategy);
Buffer lBuffer = (leftBlkno == InvalidBlockNumber) ?
InvalidBuffer : ReadBufferWithStrategy(gvs->index, leftBlkno, gvs->strategy);
Buffer lBuffer = (leftBlkno == InvalidBlockNumber) ?
InvalidBuffer : ReadBufferWithStrategy(gvs->index, leftBlkno, gvs->strategy);
Buffer pBuffer = ReadBufferWithStrategy(gvs->index, parentBlkno, gvs->strategy);
Page page,
parentPage;
@ -268,17 +268,20 @@ ginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkn
parentPage = BufferGetPage(pBuffer);
#ifdef USE_ASSERT_CHECKING
do {
PostingItem *tod=(PostingItem *) GinDataPageGetItem(parentPage, myoff);
Assert( PostingItemGetBlockNumber(tod) == deleteBlkno );
} while(0);
do
{
PostingItem *tod = (PostingItem *) GinDataPageGetItem(parentPage, myoff);
Assert(PostingItemGetBlockNumber(tod) == deleteBlkno);
} while (0);
#endif
PageDeletePostingItem(parentPage, myoff);
page = BufferGetPage(dBuffer);
/*
* we shouldn't change rightlink field to save
* workability of running search scan
* we shouldn't change rightlink field to save workability of running
* search scan
*/
GinPageGetOpaque(page)->flags = GIN_DELETED;
@ -363,8 +366,8 @@ typedef struct DataPageDeleteStack
struct DataPageDeleteStack *child;
struct DataPageDeleteStack *parent;
BlockNumber blkno; /* current block number */
BlockNumber leftBlkno; /* rightest non-deleted page on left */
BlockNumber blkno; /* current block number */
BlockNumber leftBlkno; /* rightest non-deleted page on left */
bool isRoot;
} DataPageDeleteStack;

View File

@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/gin/ginxlog.c,v 1.10 2007/10/29 19:26:57 teodor Exp $
* $PostgreSQL: pgsql/src/backend/access/gin/ginxlog.c,v 1.11 2007/11/15 21:14:31 momjian Exp $
*-------------------------------------------------------------------------
*/
#include "postgres.h"
@ -135,7 +135,7 @@ ginRedoInsert(XLogRecPtr lsn, XLogRecord *record)
Assert(data->isDelete == FALSE);
Assert(GinPageIsData(page));
if ( ! XLByteLE(lsn, PageGetLSN(page)) )
if (!XLByteLE(lsn, PageGetLSN(page)))
{
if (data->isLeaf)
{
@ -170,6 +170,7 @@ ginRedoInsert(XLogRecPtr lsn, XLogRecord *record)
if (!data->isLeaf && data->updateBlkno != InvalidBlockNumber)
{
PostingItem *pitem = (PostingItem *) (XLogRecGetData(record) + sizeof(ginxlogInsert));
forgetIncompleteSplit(data->node, PostingItemGetBlockNumber(pitem), data->updateBlkno);
}
@ -180,7 +181,7 @@ ginRedoInsert(XLogRecPtr lsn, XLogRecord *record)
Assert(!GinPageIsData(page));
if ( ! XLByteLE(lsn, PageGetLSN(page)) )
if (!XLByteLE(lsn, PageGetLSN(page)))
{
if (data->updateBlkno != InvalidBlockNumber)
{
@ -202,7 +203,7 @@ ginRedoInsert(XLogRecPtr lsn, XLogRecord *record)
if (PageAddItem(page, (Item) itup, IndexTupleSize(itup), data->offset, false, false) == InvalidOffsetNumber)
elog(ERROR, "failed to add item to index page in %u/%u/%u",
data->node.spcNode, data->node.dbNode, data->node.relNode);
data->node.spcNode, data->node.dbNode, data->node.relNode);
}
if (!data->isLeaf && data->updateBlkno != InvalidBlockNumber)
@ -212,7 +213,7 @@ ginRedoInsert(XLogRecPtr lsn, XLogRecord *record)
}
}
if ( ! XLByteLE(lsn, PageGetLSN(page)) )
if (!XLByteLE(lsn, PageGetLSN(page)))
{
PageSetLSN(page, lsn);
PageSetTLI(page, ThisTimeLineID);

View File

@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/gist/gist.c,v 1.147 2007/09/20 17:56:30 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/gist/gist.c,v 1.148 2007/11/15 21:14:31 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -360,8 +360,8 @@ gistplacetopage(GISTInsertState *state, GISTSTATE *giststate)
ptr->block.blkno = BufferGetBlockNumber(ptr->buffer);
/*
* fill page, we can do it because all these pages are new
* (ie not linked in tree or masked by temp page
* fill page, we can do it because all these pages are new (ie not
* linked in tree or masked by temp page
*/
data = (char *) (ptr->list);
for (i = 0; i < ptr->block.num; i++)

View File

@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/gist/gistget.c,v 1.67 2007/09/12 22:10:25 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/gist/gistget.c,v 1.68 2007/11/15 21:14:31 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -383,13 +383,12 @@ gistindex_keytest(IndexTuple tuple,
/*
* On non-leaf page we can't conclude that child hasn't NULL
* values because of assumption in GiST: uinon (VAL, NULL) is VAL
* But if on non-leaf page key IS NULL then all childs
* has NULL.
* But if on non-leaf page key IS NULL then all childs has NULL.
*/
Assert( key->sk_flags & SK_SEARCHNULL );
Assert(key->sk_flags & SK_SEARCHNULL);
if ( GistPageIsLeaf(p) && !isNull )
if (GistPageIsLeaf(p) && !isNull)
return false;
}
else if (isNull)
@ -404,12 +403,14 @@ gistindex_keytest(IndexTuple tuple,
FALSE, isNull);
/*
* Call the Consistent function to evaluate the test. The arguments
* are the index datum (as a GISTENTRY*), the comparison datum, and
* the comparison operator's strategy number and subtype from pg_amop.
* Call the Consistent function to evaluate the test. The
* arguments are the index datum (as a GISTENTRY*), the comparison
* datum, and the comparison operator's strategy number and
* subtype from pg_amop.
*
* (Presently there's no need to pass the subtype since it'll always
* be zero, but might as well pass it for possible future use.)
* (Presently there's no need to pass the subtype since it'll
* always be zero, but might as well pass it for possible future
* use.)
*/
test = FunctionCall4(&key->sk_func,
PointerGetDatum(&de),

View File

@ -10,7 +10,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/gist/gistproc.c,v 1.11 2007/09/07 17:04:26 teodor Exp $
* $PostgreSQL: pgsql/src/backend/access/gist/gistproc.c,v 1.12 2007/11/15 21:14:31 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -394,20 +394,22 @@ gist_box_picksplit(PG_FUNCTION_ARGS)
ADDLIST(listT, unionT, posT, i);
}
#define LIMIT_RATIO 0.1
#define LIMIT_RATIO 0.1
#define _IS_BADRATIO(x,y) ( (y) == 0 || (float)(x)/(float)(y) < LIMIT_RATIO )
#define IS_BADRATIO(x,y) ( _IS_BADRATIO((x),(y)) || _IS_BADRATIO((y),(x)) )
/* bad disposition, try to split by centers of boxes */
if ( IS_BADRATIO(posR, posL) && IS_BADRATIO(posT, posB) )
if (IS_BADRATIO(posR, posL) && IS_BADRATIO(posT, posB))
{
double avgCenterX=0.0, avgCenterY=0.0;
double CenterX, CenterY;
double avgCenterX = 0.0,
avgCenterY = 0.0;
double CenterX,
CenterY;
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
{
cur = DatumGetBoxP(entryvec->vector[i].key);
avgCenterX += ((double)cur->high.x + (double)cur->low.x)/2.0;
avgCenterY += ((double)cur->high.y + (double)cur->low.y)/2.0;
avgCenterX += ((double) cur->high.x + (double) cur->low.x) / 2.0;
avgCenterY += ((double) cur->high.y + (double) cur->low.y) / 2.0;
}
avgCenterX /= maxoff;
@ -417,11 +419,11 @@ gist_box_picksplit(PG_FUNCTION_ARGS)
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
{
cur = DatumGetBoxP(entryvec->vector[i].key);
CenterX = ((double)cur->high.x + (double)cur->low.x)/2.0;
CenterY = ((double)cur->high.y + (double)cur->low.y)/2.0;
if (CenterX < avgCenterX)
CenterX = ((double) cur->high.x + (double) cur->low.x) / 2.0;
CenterY = ((double) cur->high.y + (double) cur->low.y) / 2.0;
if (CenterX < avgCenterX)
ADDLIST(listL, unionL, posL, i);
else if (CenterX == avgCenterX)
{
@ -442,7 +444,7 @@ gist_box_picksplit(PG_FUNCTION_ARGS)
else
ADDLIST(listB, unionB, posB, i);
}
else
else
ADDLIST(listT, unionT, posT, i);
}
}

View File

@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/gist/gistvacuum.c,v 1.32 2007/09/20 17:56:30 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/gist/gistvacuum.c,v 1.33 2007/11/15 21:14:31 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -35,7 +35,7 @@ typedef struct
Relation index;
MemoryContext opCtx;
GistBulkDeleteResult *result;
BufferAccessStrategy strategy;
BufferAccessStrategy strategy;
} GistVacuum;
typedef struct

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/hash/hash.c,v 1.96 2007/09/12 22:10:25 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/hash/hash.c,v 1.97 2007/11/15 21:14:32 momjian Exp $
*
* NOTES
* This file contains only the public interface routines.
@ -548,7 +548,7 @@ loop_top:
vacuum_delay_point();
buf = _hash_getbuf_with_strategy(rel, blkno, HASH_WRITE,
LH_BUCKET_PAGE | LH_OVERFLOW_PAGE,
LH_BUCKET_PAGE | LH_OVERFLOW_PAGE,
info->strategy);
page = BufferGetPage(buf);
opaque = (HashPageOpaque) PageGetSpecialPointer(page);

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/hash/hashfunc.c,v 1.53 2007/09/21 22:52:52 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/hash/hashfunc.c,v 1.54 2007/11/15 21:14:32 momjian Exp $
*
* NOTES
* These functions are stored in pg_amproc. For each operator class
@ -103,8 +103,8 @@ hashfloat4(PG_FUNCTION_ARGS)
* To support cross-type hashing of float8 and float4, we want to return
* the same hash value hashfloat8 would produce for an equal float8 value.
* So, widen the value to float8 and hash that. (We must do this rather
* than have hashfloat8 try to narrow its value to float4; that could
* fail on overflow.)
* than have hashfloat8 try to narrow its value to float4; that could fail
* on overflow.)
*/
key8 = key;

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/hash/hashovfl.c,v 1.60 2007/09/20 17:56:30 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/hash/hashovfl.c,v 1.61 2007/11/15 21:14:32 momjian Exp $
*
* NOTES
* Overflow pages look like ordinary relation pages.
@ -156,7 +156,7 @@ _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf)
/*
* _hash_getovflpage()
*
* Find an available overflow page and return it. The returned buffer
* Find an available overflow page and return it. The returned buffer
* is pinned and write-locked, and has had _hash_pageinit() applied,
* but it is caller's responsibility to fill the special space.
*
@ -402,9 +402,9 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf,
bucket = ovflopaque->hasho_bucket;
/*
* Zero the page for debugging's sake; then write and release it.
* (Note: if we failed to zero the page here, we'd have problems
* with the Assert in _hash_pageinit() when the page is reused.)
* Zero the page for debugging's sake; then write and release it. (Note:
* if we failed to zero the page here, we'd have problems with the Assert
* in _hash_pageinit() when the page is reused.)
*/
MemSet(ovflpage, 0, BufferGetPageSize(ovflbuf));
_hash_wrtbuf(rel, ovflbuf);
@ -420,7 +420,7 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf,
Buffer prevbuf = _hash_getbuf_with_strategy(rel,
prevblkno,
HASH_WRITE,
LH_BUCKET_PAGE | LH_OVERFLOW_PAGE,
LH_BUCKET_PAGE | LH_OVERFLOW_PAGE,
bstrategy);
Page prevpage = BufferGetPage(prevbuf);
HashPageOpaque prevopaque = (HashPageOpaque) PageGetSpecialPointer(prevpage);

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/hash/hashpage.c,v 1.70 2007/09/20 17:56:30 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/hash/hashpage.c,v 1.71 2007/11/15 21:14:32 momjian Exp $
*
* NOTES
* Postgres hash pages look like ordinary relation pages. The opaque
@ -37,7 +37,7 @@
static bool _hash_alloc_buckets(Relation rel, BlockNumber firstblock,
uint32 nblocks);
uint32 nblocks);
static void _hash_splitbucket(Relation rel, Buffer metabuf,
Bucket obucket, Bucket nbucket,
BlockNumber start_oblkno,
@ -138,7 +138,7 @@ _hash_getbuf(Relation rel, BlockNumber blkno, int access, int flags)
*
* This must be used only to fetch pages that are known to be before
* the index's filesystem EOF, but are to be filled from scratch.
* _hash_pageinit() is applied automatically. Otherwise it has
* _hash_pageinit() is applied automatically. Otherwise it has
* effects similar to _hash_getbuf() with access = HASH_WRITE.
*
* When this routine returns, a write lock is set on the
@ -184,7 +184,7 @@ _hash_getinitbuf(Relation rel, BlockNumber blkno)
Buffer
_hash_getnewbuf(Relation rel, BlockNumber blkno)
{
BlockNumber nblocks = RelationGetNumberOfBlocks(rel);
BlockNumber nblocks = RelationGetNumberOfBlocks(rel);
Buffer buf;
if (blkno == P_NEW)
@ -354,10 +354,10 @@ _hash_metapinit(Relation rel)
ffactor = 10;
/*
* We initialize the metapage, the first two bucket pages, and the
* first bitmap page in sequence, using _hash_getnewbuf to cause
* smgrextend() calls to occur. This ensures that the smgr level
* has the right idea of the physical index length.
* We initialize the metapage, the first two bucket pages, and the first
* bitmap page in sequence, using _hash_getnewbuf to cause smgrextend()
* calls to occur. This ensures that the smgr level has the right idea of
* the physical index length.
*/
metabuf = _hash_getnewbuf(rel, HASH_METAPAGE);
pg = BufferGetPage(metabuf);
@ -501,15 +501,16 @@ _hash_expandtable(Relation rel, Buffer metabuf)
goto fail;
/*
* Can't split anymore if maxbucket has reached its maximum possible value.
* Can't split anymore if maxbucket has reached its maximum possible
* value.
*
* Ideally we'd allow bucket numbers up to UINT_MAX-1 (no higher because
* the calculation maxbucket+1 mustn't overflow). Currently we restrict
* to half that because of overflow looping in _hash_log2() and
* insufficient space in hashm_spares[]. It's moot anyway because an
* index with 2^32 buckets would certainly overflow BlockNumber and
* hence _hash_alloc_buckets() would fail, but if we supported buckets
* smaller than a disk block then this would be an independent constraint.
* index with 2^32 buckets would certainly overflow BlockNumber and hence
* _hash_alloc_buckets() would fail, but if we supported buckets smaller
* than a disk block then this would be an independent constraint.
*/
if (metap->hashm_maxbucket >= (uint32) 0x7FFFFFFE)
goto fail;
@ -536,10 +537,10 @@ _hash_expandtable(Relation rel, Buffer metabuf)
/*
* Likewise lock the new bucket (should never fail).
*
* Note: it is safe to compute the new bucket's blkno here, even though
* we may still need to update the BUCKET_TO_BLKNO mapping. This is
* because the current value of hashm_spares[hashm_ovflpoint] correctly
* shows where we are going to put a new splitpoint's worth of buckets.
* Note: it is safe to compute the new bucket's blkno here, even though we
* may still need to update the BUCKET_TO_BLKNO mapping. This is because
* the current value of hashm_spares[hashm_ovflpoint] correctly shows
* where we are going to put a new splitpoint's worth of buckets.
*/
start_nblkno = BUCKET_TO_BLKNO(metap, new_bucket);
@ -557,11 +558,12 @@ _hash_expandtable(Relation rel, Buffer metabuf)
if (spare_ndx > metap->hashm_ovflpoint)
{
Assert(spare_ndx == metap->hashm_ovflpoint + 1);
/*
* The number of buckets in the new splitpoint is equal to the
* total number already in existence, i.e. new_bucket. Currently
* this maps one-to-one to blocks required, but someday we may need
* a more complicated calculation here.
* The number of buckets in the new splitpoint is equal to the total
* number already in existence, i.e. new_bucket. Currently this maps
* one-to-one to blocks required, but someday we may need a more
* complicated calculation here.
*/
if (!_hash_alloc_buckets(rel, start_nblkno, new_bucket))
{
@ -673,14 +675,14 @@ fail:
static bool
_hash_alloc_buckets(Relation rel, BlockNumber firstblock, uint32 nblocks)
{
BlockNumber lastblock;
BlockNumber lastblock;
char zerobuf[BLCKSZ];
lastblock = firstblock + nblocks - 1;
/*
* Check for overflow in block number calculation; if so, we cannot
* extend the index anymore.
* Check for overflow in block number calculation; if so, we cannot extend
* the index anymore.
*/
if (lastblock < firstblock || lastblock == InvalidBlockNumber)
return false;

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.244 2007/11/07 12:24:24 petere Exp $
* $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.245 2007/11/15 21:14:32 momjian Exp $
*
*
* INTERFACE ROUTINES
@ -60,9 +60,9 @@
static HeapScanDesc heap_beginscan_internal(Relation relation,
Snapshot snapshot,
int nkeys, ScanKey key,
bool is_bitmapscan);
Snapshot snapshot,
int nkeys, ScanKey key,
bool is_bitmapscan);
static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf,
ItemPointerData from, Buffer newbuf, HeapTuple newtup, bool move);
static bool HeapSatisfiesHOTUpdate(Relation relation, Bitmapset *hot_attrs,
@ -85,18 +85,18 @@ initscan(HeapScanDesc scan, ScanKey key)
* Determine the number of blocks we have to scan.
*
* It is sufficient to do this once at scan start, since any tuples added
* while the scan is in progress will be invisible to my snapshot
* anyway. (That is not true when using a non-MVCC snapshot. However,
* we couldn't guarantee to return tuples added after scan start anyway,
* since they might go into pages we already scanned. To guarantee
* consistent results for a non-MVCC snapshot, the caller must hold some
* higher-level lock that ensures the interesting tuple(s) won't change.)
* while the scan is in progress will be invisible to my snapshot anyway.
* (That is not true when using a non-MVCC snapshot. However, we couldn't
* guarantee to return tuples added after scan start anyway, since they
* might go into pages we already scanned. To guarantee consistent
* results for a non-MVCC snapshot, the caller must hold some higher-level
* lock that ensures the interesting tuple(s) won't change.)
*/
scan->rs_nblocks = RelationGetNumberOfBlocks(scan->rs_rd);
/*
* If the table is large relative to NBuffers, use a bulk-read access
* strategy and enable synchronized scanning (see syncscan.c). Although
* strategy and enable synchronized scanning (see syncscan.c). Although
* the thresholds for these features could be different, we make them the
* same so that there are only two behaviors to tune rather than four.
*
@ -140,8 +140,8 @@ initscan(HeapScanDesc scan, ScanKey key)
memcpy(scan->rs_key, key, scan->rs_nkeys * sizeof(ScanKeyData));
/*
* Currently, we don't have a stats counter for bitmap heap scans
* (but the underlying bitmap index scans will be counted).
* Currently, we don't have a stats counter for bitmap heap scans (but the
* underlying bitmap index scans will be counted).
*/
if (!scan->rs_bitmapscan)
pgstat_count_heap_scan(scan->rs_rd);
@ -283,7 +283,7 @@ heapgettup(HeapScanDesc scan,
tuple->t_data = NULL;
return;
}
page = scan->rs_startblock; /* first page */
page = scan->rs_startblock; /* first page */
heapgetpage(scan, page);
lineoff = FirstOffsetNumber; /* first offnum */
scan->rs_inited = true;
@ -317,6 +317,7 @@ heapgettup(HeapScanDesc scan,
tuple->t_data = NULL;
return;
}
/*
* Disable reporting to syncscan logic in a backwards scan; it's
* not very likely anyone else is doing the same thing at the same
@ -459,9 +460,9 @@ heapgettup(HeapScanDesc scan,
finished = (page == scan->rs_startblock);
/*
* Report our new scan position for synchronization purposes.
* We don't do that when moving backwards, however. That would
* just mess up any other forward-moving scanners.
* Report our new scan position for synchronization purposes. We
* don't do that when moving backwards, however. That would just
* mess up any other forward-moving scanners.
*
* Note: we do this before checking for end of scan so that the
* final state of the position hint is back at the start of the
@ -554,7 +555,7 @@ heapgettup_pagemode(HeapScanDesc scan,
tuple->t_data = NULL;
return;
}
page = scan->rs_startblock; /* first page */
page = scan->rs_startblock; /* first page */
heapgetpage(scan, page);
lineindex = 0;
scan->rs_inited = true;
@ -585,6 +586,7 @@ heapgettup_pagemode(HeapScanDesc scan,
tuple->t_data = NULL;
return;
}
/*
* Disable reporting to syncscan logic in a backwards scan; it's
* not very likely anyone else is doing the same thing at the same
@ -719,9 +721,9 @@ heapgettup_pagemode(HeapScanDesc scan,
finished = (page == scan->rs_startblock);
/*
* Report our new scan position for synchronization purposes.
* We don't do that when moving backwards, however. That would
* just mess up any other forward-moving scanners.
* Report our new scan position for synchronization purposes. We
* don't do that when moving backwards, however. That would just
* mess up any other forward-moving scanners.
*
* Note: we do this before checking for end of scan so that the
* final state of the position hint is back at the start of the
@ -1057,7 +1059,7 @@ heap_openrv(const RangeVar *relation, LOCKMODE lockmode)
* heap_beginscan - begin relation scan
*
* heap_beginscan_bm is an alternative entry point for setting up a HeapScanDesc
* for a bitmap heap scan. Although that scan technology is really quite
* for a bitmap heap scan. Although that scan technology is really quite
* unlike a standard seqscan, there is just enough commonality to make it
* worth using the same data structure.
* ----------------
@ -1423,10 +1425,10 @@ bool
heap_hot_search_buffer(ItemPointer tid, Buffer buffer, Snapshot snapshot,
bool *all_dead)
{
Page dp = (Page) BufferGetPage(buffer);
Page dp = (Page) BufferGetPage(buffer);
TransactionId prev_xmax = InvalidTransactionId;
OffsetNumber offnum;
bool at_chain_start;
bool at_chain_start;
if (all_dead)
*all_dead = true;
@ -1438,7 +1440,7 @@ heap_hot_search_buffer(ItemPointer tid, Buffer buffer, Snapshot snapshot,
/* Scan through possible multiple members of HOT-chain */
for (;;)
{
ItemId lp;
ItemId lp;
HeapTupleData heapTuple;
/* check for bogus TID */
@ -1472,7 +1474,8 @@ heap_hot_search_buffer(ItemPointer tid, Buffer buffer, Snapshot snapshot,
break;
/*
* The xmin should match the previous xmax value, else chain is broken.
* The xmin should match the previous xmax value, else chain is
* broken.
*/
if (TransactionIdIsValid(prev_xmax) &&
!TransactionIdEquals(prev_xmax,
@ -1499,8 +1502,8 @@ heap_hot_search_buffer(ItemPointer tid, Buffer buffer, Snapshot snapshot,
*all_dead = false;
/*
* Check to see if HOT chain continues past this tuple; if so
* fetch the next offnum and loop around.
* Check to see if HOT chain continues past this tuple; if so fetch
* the next offnum and loop around.
*/
if (HeapTupleIsHotUpdated(&heapTuple))
{
@ -1511,7 +1514,7 @@ heap_hot_search_buffer(ItemPointer tid, Buffer buffer, Snapshot snapshot,
prev_xmax = HeapTupleHeaderGetXmax(heapTuple.t_data);
}
else
break; /* end of chain */
break; /* end of chain */
}
return false;
@ -1528,8 +1531,8 @@ bool
heap_hot_search(ItemPointer tid, Relation relation, Snapshot snapshot,
bool *all_dead)
{
bool result;
Buffer buffer;
bool result;
Buffer buffer;
buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
LockBuffer(buffer, BUFFER_LOCK_SHARE);
@ -1665,7 +1668,7 @@ heap_get_latest_tid(Relation relation,
*
* This is called after we have waited for the XMAX transaction to terminate.
* If the transaction aborted, we guarantee the XMAX_INVALID hint bit will
* be set on exit. If the transaction committed, we set the XMAX_COMMITTED
* be set on exit. If the transaction committed, we set the XMAX_COMMITTED
* hint bit if possible --- but beware that that may not yet be possible,
* if the transaction committed asynchronously. Hence callers should look
* only at XMAX_INVALID.
@ -2069,7 +2072,7 @@ l1:
/*
* If this transaction commits, the tuple will become DEAD sooner or
* later. Set flag that this page is a candidate for pruning once our xid
* falls below the OldestXmin horizon. If the transaction finally aborts,
* falls below the OldestXmin horizon. If the transaction finally aborts,
* the subsequent page pruning will be a no-op and the hint will be
* cleared.
*/
@ -2252,15 +2255,15 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
/*
* Fetch the list of attributes to be checked for HOT update. This is
* wasted effort if we fail to update or have to put the new tuple on
* a different page. But we must compute the list before obtaining
* buffer lock --- in the worst case, if we are doing an update on one
* of the relevant system catalogs, we could deadlock if we try to
* fetch the list later. In any case, the relcache caches the data
* so this is usually pretty cheap.
* wasted effort if we fail to update or have to put the new tuple on a
* different page. But we must compute the list before obtaining buffer
* lock --- in the worst case, if we are doing an update on one of the
* relevant system catalogs, we could deadlock if we try to fetch the list
* later. In any case, the relcache caches the data so this is usually
* pretty cheap.
*
* Note that we get a copy here, so we need not worry about relcache
* flush happening midway through.
* Note that we get a copy here, so we need not worry about relcache flush
* happening midway through.
*/
hot_attrs = RelationGetIndexAttrBitmap(relation);
@ -2555,7 +2558,7 @@ l2:
{
/*
* Since the new tuple is going into the same page, we might be able
* to do a HOT update. Check if any of the index columns have been
* to do a HOT update. Check if any of the index columns have been
* changed. If not, then HOT update is possible.
*/
if (HeapSatisfiesHOTUpdate(relation, hot_attrs, &oldtup, heaptup))
@ -2573,14 +2576,14 @@ l2:
/*
* If this transaction commits, the old tuple will become DEAD sooner or
* later. Set flag that this page is a candidate for pruning once our xid
* falls below the OldestXmin horizon. If the transaction finally aborts,
* falls below the OldestXmin horizon. If the transaction finally aborts,
* the subsequent page pruning will be a no-op and the hint will be
* cleared.
*
* XXX Should we set hint on newbuf as well? If the transaction
* aborts, there would be a prunable tuple in the newbuf; but for now
* we choose not to optimize for aborts. Note that heap_xlog_update
* must be kept in sync if this decision changes.
* XXX Should we set hint on newbuf as well? If the transaction aborts,
* there would be a prunable tuple in the newbuf; but for now we choose
* not to optimize for aborts. Note that heap_xlog_update must be kept in
* sync if this decision changes.
*/
PageSetPrunable(dp, xid);
@ -2695,22 +2698,24 @@ static bool
heap_tuple_attr_equals(TupleDesc tupdesc, int attrnum,
HeapTuple tup1, HeapTuple tup2)
{
Datum value1, value2;
bool isnull1, isnull2;
Datum value1,
value2;
bool isnull1,
isnull2;
Form_pg_attribute att;
/*
* If it's a whole-tuple reference, say "not equal". It's not really
* worth supporting this case, since it could only succeed after a
* no-op update, which is hardly a case worth optimizing for.
* worth supporting this case, since it could only succeed after a no-op
* update, which is hardly a case worth optimizing for.
*/
if (attrnum == 0)
return false;
/*
* Likewise, automatically say "not equal" for any system attribute
* other than OID and tableOID; we cannot expect these to be consistent
* in a HOT chain, or even to be set correctly yet in the new tuple.
* Likewise, automatically say "not equal" for any system attribute other
* than OID and tableOID; we cannot expect these to be consistent in a HOT
* chain, or even to be set correctly yet in the new tuple.
*/
if (attrnum < 0)
{
@ -2720,17 +2725,17 @@ heap_tuple_attr_equals(TupleDesc tupdesc, int attrnum,
}
/*
* Extract the corresponding values. XXX this is pretty inefficient
* if there are many indexed columns. Should HeapSatisfiesHOTUpdate
* do a single heap_deform_tuple call on each tuple, instead? But
* that doesn't work for system columns ...
* Extract the corresponding values. XXX this is pretty inefficient if
* there are many indexed columns. Should HeapSatisfiesHOTUpdate do a
* single heap_deform_tuple call on each tuple, instead? But that doesn't
* work for system columns ...
*/
value1 = heap_getattr(tup1, attrnum, tupdesc, &isnull1);
value2 = heap_getattr(tup2, attrnum, tupdesc, &isnull2);
/*
* If one value is NULL and other is not, then they are certainly
* not equal
* If one value is NULL and other is not, then they are certainly not
* equal
*/
if (isnull1 != isnull2)
return false;
@ -2744,7 +2749,7 @@ heap_tuple_attr_equals(TupleDesc tupdesc, int attrnum,
/*
* We do simple binary comparison of the two datums. This may be overly
* strict because there can be multiple binary representations for the
* same logical value. But we should be OK as long as there are no false
* same logical value. But we should be OK as long as there are no false
* positives. Using a type-specific equality operator is messy because
* there could be multiple notions of equality in different operator
* classes; furthermore, we cannot safely invoke user-defined functions
@ -2758,7 +2763,7 @@ heap_tuple_attr_equals(TupleDesc tupdesc, int attrnum,
else
{
Assert(attrnum <= tupdesc->natts);
att = tupdesc->attrs[attrnum - 1];
att = tupdesc->attrs[attrnum - 1];
return datumIsEqual(value1, value2, att->attbyval, att->attlen);
}
}
@ -2779,7 +2784,7 @@ static bool
HeapSatisfiesHOTUpdate(Relation relation, Bitmapset *hot_attrs,
HeapTuple oldtup, HeapTuple newtup)
{
int attrnum;
int attrnum;
while ((attrnum = bms_first_member(hot_attrs)) >= 0)
{
@ -3094,15 +3099,15 @@ l3:
}
/*
* We might already hold the desired lock (or stronger), possibly under
* a different subtransaction of the current top transaction. If so,
* there is no need to change state or issue a WAL record. We already
* handled the case where this is true for xmax being a MultiXactId,
* so now check for cases where it is a plain TransactionId.
* We might already hold the desired lock (or stronger), possibly under a
* different subtransaction of the current top transaction. If so, there
* is no need to change state or issue a WAL record. We already handled
* the case where this is true for xmax being a MultiXactId, so now check
* for cases where it is a plain TransactionId.
*
* Note in particular that this covers the case where we already hold
* exclusive lock on the tuple and the caller only wants shared lock.
* It would certainly not do to give up the exclusive lock.
* exclusive lock on the tuple and the caller only wants shared lock. It
* would certainly not do to give up the exclusive lock.
*/
xmax = HeapTupleHeaderGetXmax(tuple->t_data);
old_infomask = tuple->t_data->t_infomask;
@ -3179,8 +3184,8 @@ l3:
{
/*
* If the XMAX is a valid TransactionId, then we need to
* create a new MultiXactId that includes both the old
* locker and our own TransactionId.
* create a new MultiXactId that includes both the old locker
* and our own TransactionId.
*/
xid = MultiXactIdCreate(xmax, xid);
new_infomask |= HEAP_XMAX_IS_MULTI;
@ -3214,8 +3219,8 @@ l3:
/*
* Store transaction information of xact locking the tuple.
*
* Note: Cmax is meaningless in this context, so don't set it; this
* avoids possibly generating a useless combo CID.
* Note: Cmax is meaningless in this context, so don't set it; this avoids
* possibly generating a useless combo CID.
*/
tuple->t_data->t_infomask = new_infomask;
HeapTupleHeaderClearHotUpdated(tuple->t_data);
@ -3425,6 +3430,7 @@ heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
buf = InvalidBuffer;
}
HeapTupleHeaderSetXmin(tuple, FrozenTransactionId);
/*
* Might as well fix the hint bits too; usually XMIN_COMMITTED will
* already be set here, but there's a small chance not.
@ -3437,9 +3443,9 @@ heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
/*
* When we release shared lock, it's possible for someone else to change
* xmax before we get the lock back, so repeat the check after acquiring
* exclusive lock. (We don't need this pushup for xmin, because only
* VACUUM could be interested in changing an existing tuple's xmin,
* and there's only one VACUUM allowed on a table at a time.)
* exclusive lock. (We don't need this pushup for xmin, because only
* VACUUM could be interested in changing an existing tuple's xmin, and
* there's only one VACUUM allowed on a table at a time.)
*/
recheck_xmax:
if (!(tuple->t_infomask & HEAP_XMAX_IS_MULTI))
@ -3454,13 +3460,14 @@ recheck_xmax:
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
buf = InvalidBuffer;
goto recheck_xmax; /* see comment above */
goto recheck_xmax; /* see comment above */
}
HeapTupleHeaderSetXmax(tuple, InvalidTransactionId);
/*
* The tuple might be marked either XMAX_INVALID or
* XMAX_COMMITTED + LOCKED. Normalize to INVALID just to be
* sure no one gets confused.
* The tuple might be marked either XMAX_INVALID or XMAX_COMMITTED
* + LOCKED. Normalize to INVALID just to be sure no one gets
* confused.
*/
tuple->t_infomask &= ~HEAP_XMAX_COMMITTED;
tuple->t_infomask |= HEAP_XMAX_INVALID;
@ -3506,8 +3513,9 @@ recheck_xvac:
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
buf = InvalidBuffer;
goto recheck_xvac; /* see comment above */
goto recheck_xvac; /* see comment above */
}
/*
* If a MOVED_OFF tuple is not dead, the xvac transaction must
* have failed; whereas a non-dead MOVED_IN tuple must mean the
@ -3517,9 +3525,10 @@ recheck_xvac:
HeapTupleHeaderSetXvac(tuple, InvalidTransactionId);
else
HeapTupleHeaderSetXvac(tuple, FrozenTransactionId);
/*
* Might as well fix the hint bits too; usually XMIN_COMMITTED will
* already be set here, but there's a small chance not.
* Might as well fix the hint bits too; usually XMIN_COMMITTED
* will already be set here, but there's a small chance not.
*/
Assert(!(tuple->t_infomask & HEAP_XMIN_INVALID));
tuple->t_infomask |= HEAP_XMIN_COMMITTED;
@ -3632,8 +3641,8 @@ log_heap_clean(Relation reln, Buffer buffer,
/*
* The OffsetNumber arrays are not actually in the buffer, but we pretend
* that they are. When XLogInsert stores the whole buffer, the offset
* arrays need not be stored too. Note that even if all three arrays
* are empty, we want to expose the buffer as a candidate for whole-page
* arrays need not be stored too. Note that even if all three arrays are
* empty, we want to expose the buffer as a candidate for whole-page
* storage, since this record type implies a defragmentation operation
* even if no item pointers changed state.
*/
@ -3686,7 +3695,7 @@ log_heap_clean(Relation reln, Buffer buffer,
}
/*
* Perform XLogInsert for a heap-freeze operation. Caller must already
* Perform XLogInsert for a heap-freeze operation. Caller must already
* have modified the buffer and marked it dirty.
*/
XLogRecPtr
@ -3711,9 +3720,9 @@ log_heap_freeze(Relation reln, Buffer buffer,
rdata[0].next = &(rdata[1]);
/*
* The tuple-offsets array is not actually in the buffer, but pretend
* that it is. When XLogInsert stores the whole buffer, the offsets array
* need not be stored too.
* The tuple-offsets array is not actually in the buffer, but pretend that
* it is. When XLogInsert stores the whole buffer, the offsets array need
* not be stored too.
*/
if (offcnt > 0)
{
@ -3853,7 +3862,7 @@ log_heap_move(Relation reln, Buffer oldbuf, ItemPointerData from,
* for writing the page to disk after calling this routine.
*
* Note: all current callers build pages in private memory and write them
* directly to smgr, rather than using bufmgr. Therefore there is no need
* directly to smgr, rather than using bufmgr. Therefore there is no need
* to pass a buffer ID to XLogInsert, nor to perform MarkBufferDirty within
* the critical section.
*
@ -3905,9 +3914,9 @@ heap_xlog_clean(XLogRecPtr lsn, XLogRecord *record, bool clean_move)
Page page;
OffsetNumber *offnum;
OffsetNumber *end;
int nredirected;
int ndead;
int i;
int nredirected;
int ndead;
int i;
if (record->xl_info & XLR_BKP_BLOCK_1)
return;
@ -3934,12 +3943,12 @@ heap_xlog_clean(XLogRecPtr lsn, XLogRecord *record, bool clean_move)
{
OffsetNumber fromoff = *offnum++;
OffsetNumber tooff = *offnum++;
ItemId fromlp = PageGetItemId(page, fromoff);
ItemId fromlp = PageGetItemId(page, fromoff);
if (clean_move)
{
/* Physically move the "to" item to the "from" slot */
ItemId tolp = PageGetItemId(page, tooff);
ItemId tolp = PageGetItemId(page, tooff);
HeapTupleHeader htup;
*fromlp = *tolp;
@ -3962,7 +3971,7 @@ heap_xlog_clean(XLogRecPtr lsn, XLogRecord *record, bool clean_move)
for (i = 0; i < ndead; i++)
{
OffsetNumber off = *offnum++;
ItemId lp = PageGetItemId(page, off);
ItemId lp = PageGetItemId(page, off);
ItemIdSetDead(lp);
}
@ -3971,14 +3980,14 @@ heap_xlog_clean(XLogRecPtr lsn, XLogRecord *record, bool clean_move)
while (offnum < end)
{
OffsetNumber off = *offnum++;
ItemId lp = PageGetItemId(page, off);
ItemId lp = PageGetItemId(page, off);
ItemIdSetUnused(lp);
}
/*
* Finally, repair any fragmentation, and update the page's hint bit
* about whether it has free pointers.
* Finally, repair any fragmentation, and update the page's hint bit about
* whether it has free pointers.
*/
PageRepairFragmentation(page);
@ -4617,7 +4626,7 @@ heap_desc(StringInfo buf, uint8 xl_info, char *rec)
{
xl_heap_update *xlrec = (xl_heap_update *) rec;
if (xl_info & XLOG_HEAP_INIT_PAGE) /* can this case happen? */
if (xl_info & XLOG_HEAP_INIT_PAGE) /* can this case happen? */
appendStringInfo(buf, "hot_update(init): ");
else
appendStringInfo(buf, "hot_update: ");
@ -4724,7 +4733,7 @@ heap_sync(Relation rel)
/* toast heap, if any */
if (OidIsValid(rel->rd_rel->reltoastrelid))
{
Relation toastrel;
Relation toastrel;
toastrel = heap_open(rel->rd_rel->reltoastrelid, AccessShareLock);
FlushRelationBuffers(toastrel);

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/heap/pruneheap.c,v 1.3 2007/10/24 13:05:57 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/heap/pruneheap.c,v 1.4 2007/11/15 21:14:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -22,21 +22,21 @@
/* Local functions */
static int heap_prune_chain(Relation relation, Buffer buffer,
OffsetNumber rootoffnum,
TransactionId OldestXmin,
OffsetNumber *redirected, int *nredirected,
OffsetNumber *nowdead, int *ndead,
OffsetNumber *nowunused, int *nunused,
bool redirect_move);
static int heap_prune_chain(Relation relation, Buffer buffer,
OffsetNumber rootoffnum,
TransactionId OldestXmin,
OffsetNumber *redirected, int *nredirected,
OffsetNumber *nowdead, int *ndead,
OffsetNumber *nowunused, int *nunused,
bool redirect_move);
static void heap_prune_record_redirect(OffsetNumber *redirected,
int *nredirected,
OffsetNumber offnum,
OffsetNumber rdoffnum);
int *nredirected,
OffsetNumber offnum,
OffsetNumber rdoffnum);
static void heap_prune_record_dead(OffsetNumber *nowdead, int *ndead,
OffsetNumber offnum);
OffsetNumber offnum);
static void heap_prune_record_unused(OffsetNumber *nowunused, int *nunused,
OffsetNumber offnum);
OffsetNumber offnum);
/*
@ -70,16 +70,16 @@ heap_page_prune_opt(Relation relation, Buffer buffer, TransactionId OldestXmin)
return;
/*
* We prune when a previous UPDATE failed to find enough space on the
* page for a new tuple version, or when free space falls below the
* relation's fill-factor target (but not less than 10%).
* We prune when a previous UPDATE failed to find enough space on the page
* for a new tuple version, or when free space falls below the relation's
* fill-factor target (but not less than 10%).
*
* Checking free space here is questionable since we aren't holding
* any lock on the buffer; in the worst case we could get a bogus
* answer. It's unlikely to be *seriously* wrong, though, since
* reading either pd_lower or pd_upper is probably atomic. Avoiding
* taking a lock seems better than sometimes getting a wrong answer
* in what is after all just a heuristic estimate.
* Checking free space here is questionable since we aren't holding any
* lock on the buffer; in the worst case we could get a bogus answer.
* It's unlikely to be *seriously* wrong, though, since reading either
* pd_lower or pd_upper is probably atomic. Avoiding taking a lock seems
* better than sometimes getting a wrong answer in what is after all just
* a heuristic estimate.
*/
minfree = RelationGetTargetPageFreeSpace(relation,
HEAP_DEFAULT_FILLFACTOR);
@ -93,9 +93,9 @@ heap_page_prune_opt(Relation relation, Buffer buffer, TransactionId OldestXmin)
/*
* Now that we have buffer lock, get accurate information about the
* page's free space, and recheck the heuristic about whether to prune.
* (We needn't recheck PageIsPrunable, since no one else could have
* pruned while we hold pin.)
* page's free space, and recheck the heuristic about whether to
* prune. (We needn't recheck PageIsPrunable, since no one else could
* have pruned while we hold pin.)
*/
if (PageIsFull(dp) || PageGetHeapFreeSpace((Page) dp) < minfree)
{
@ -119,7 +119,7 @@ heap_page_prune_opt(Relation relation, Buffer buffer, TransactionId OldestXmin)
*
* If redirect_move is set, we remove redirecting line pointers by
* updating the root line pointer to point directly to the first non-dead
* tuple in the chain. NOTE: eliminating the redirect changes the first
* tuple in the chain. NOTE: eliminating the redirect changes the first
* tuple's effective CTID, and is therefore unsafe except within VACUUM FULL.
* The only reason we support this capability at all is that by using it,
* VACUUM FULL need not cope with LP_REDIRECT items at all; which seems a
@ -136,18 +136,18 @@ int
heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
bool redirect_move, bool report_stats)
{
int ndeleted = 0;
Page page = BufferGetPage(buffer);
OffsetNumber offnum,
maxoff;
OffsetNumber redirected[MaxHeapTuplesPerPage * 2];
OffsetNumber nowdead[MaxHeapTuplesPerPage];
OffsetNumber nowunused[MaxHeapTuplesPerPage];
int nredirected = 0;
int ndead = 0;
int nunused = 0;
bool page_was_full = false;
TransactionId save_prune_xid;
int ndeleted = 0;
Page page = BufferGetPage(buffer);
OffsetNumber offnum,
maxoff;
OffsetNumber redirected[MaxHeapTuplesPerPage * 2];
OffsetNumber nowdead[MaxHeapTuplesPerPage];
OffsetNumber nowunused[MaxHeapTuplesPerPage];
int nredirected = 0;
int ndead = 0;
int nunused = 0;
bool page_was_full = false;
TransactionId save_prune_xid;
START_CRIT_SECTION();
@ -159,7 +159,7 @@ heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
save_prune_xid = ((PageHeader) page)->pd_prune_xid;
PageClearPrunable(page);
/*
/*
* Also clear the "page is full" flag if it is set, since there's no point
* in repeating the prune/defrag process until something else happens to
* the page.
@ -176,7 +176,7 @@ heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
offnum <= maxoff;
offnum = OffsetNumberNext(offnum))
{
ItemId itemid = PageGetItemId(page, offnum);
ItemId itemid = PageGetItemId(page, offnum);
/* Nothing to do if slot is empty or already dead */
if (!ItemIdIsUsed(itemid) || ItemIdIsDead(itemid))
@ -233,9 +233,9 @@ heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
END_CRIT_SECTION();
/*
* If requested, report the number of tuples reclaimed to pgstats.
* This is ndeleted minus ndead, because we don't want to count a now-DEAD
* root item as a deletion for this purpose.
* If requested, report the number of tuples reclaimed to pgstats. This is
* ndeleted minus ndead, because we don't want to count a now-DEAD root
* item as a deletion for this purpose.
*/
if (report_stats && ndeleted > ndead)
pgstat_update_heap_dead_tuples(relation, ndeleted - ndead);
@ -243,19 +243,17 @@ heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
/*
* XXX Should we update the FSM information of this page ?
*
* There are two schools of thought here. We may not want to update
* FSM information so that the page is not used for unrelated
* UPDATEs/INSERTs and any free space in this page will remain
* available for further UPDATEs in *this* page, thus improving
* chances for doing HOT updates.
* There are two schools of thought here. We may not want to update FSM
* information so that the page is not used for unrelated UPDATEs/INSERTs
* and any free space in this page will remain available for further
* UPDATEs in *this* page, thus improving chances for doing HOT updates.
*
* But for a large table and where a page does not receive further
* UPDATEs for a long time, we might waste this space by not
* updating the FSM information. The relation may get extended and
* fragmented further.
* But for a large table and where a page does not receive further UPDATEs
* for a long time, we might waste this space by not updating the FSM
* information. The relation may get extended and fragmented further.
*
* One possibility is to leave "fillfactor" worth of space in this
* page and update FSM with the remaining space.
* One possibility is to leave "fillfactor" worth of space in this page
* and update FSM with the remaining space.
*
* In any case, the current FSM implementation doesn't accept
* one-page-at-a-time updates, so this is all academic for now.
@ -298,17 +296,17 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
OffsetNumber *nowunused, int *nunused,
bool redirect_move)
{
int ndeleted = 0;
Page dp = (Page) BufferGetPage(buffer);
TransactionId priorXmax = InvalidTransactionId;
ItemId rootlp;
HeapTupleHeader htup;
OffsetNumber latestdead = InvalidOffsetNumber,
maxoff = PageGetMaxOffsetNumber(dp),
offnum;
OffsetNumber chainitems[MaxHeapTuplesPerPage];
int nchain = 0,
i;
int ndeleted = 0;
Page dp = (Page) BufferGetPage(buffer);
TransactionId priorXmax = InvalidTransactionId;
ItemId rootlp;
HeapTupleHeader htup;
OffsetNumber latestdead = InvalidOffsetNumber,
maxoff = PageGetMaxOffsetNumber(dp),
offnum;
OffsetNumber chainitems[MaxHeapTuplesPerPage];
int nchain = 0,
i;
rootlp = PageGetItemId(dp, rootoffnum);
@ -321,14 +319,14 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
if (HeapTupleHeaderIsHeapOnly(htup))
{
/*
* If the tuple is DEAD and doesn't chain to anything else, mark it
* unused immediately. (If it does chain, we can only remove it as
* part of pruning its chain.)
* If the tuple is DEAD and doesn't chain to anything else, mark
* it unused immediately. (If it does chain, we can only remove
* it as part of pruning its chain.)
*
* We need this primarily to handle aborted HOT updates, that is,
* XMIN_INVALID heap-only tuples. Those might not be linked to
* by any chain, since the parent tuple might be re-updated before
* any pruning occurs. So we have to be able to reap them
* XMIN_INVALID heap-only tuples. Those might not be linked to by
* any chain, since the parent tuple might be re-updated before
* any pruning occurs. So we have to be able to reap them
* separately from chain-pruning.
*
* Note that we might first arrive at a dead heap-only tuple
@ -354,9 +352,9 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
/* while not end of the chain */
for (;;)
{
ItemId lp;
bool tupdead,
recent_dead;
ItemId lp;
bool tupdead,
recent_dead;
/* Some sanity checks */
if (offnum < FirstOffsetNumber || offnum > maxoff)
@ -368,9 +366,9 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
break;
/*
* If we are looking at the redirected root line pointer,
* jump to the first normal tuple in the chain. If we find
* a redirect somewhere else, stop --- it must not be same chain.
* If we are looking at the redirected root line pointer, jump to the
* first normal tuple in the chain. If we find a redirect somewhere
* else, stop --- it must not be same chain.
*/
if (ItemIdIsRedirected(lp))
{
@ -382,9 +380,9 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
}
/*
* Likewise, a dead item pointer can't be part of the chain.
* (We already eliminated the case of dead root tuple outside
* this function.)
* Likewise, a dead item pointer can't be part of the chain. (We
* already eliminated the case of dead root tuple outside this
* function.)
*/
if (ItemIdIsDead(lp))
break;
@ -417,6 +415,7 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
case HEAPTUPLE_RECENTLY_DEAD:
recent_dead = true;
/*
* This tuple may soon become DEAD. Update the hint field so
* that the page is reconsidered for pruning in future.
@ -425,6 +424,7 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
break;
case HEAPTUPLE_DELETE_IN_PROGRESS:
/*
* This tuple may soon become DEAD. Update the hint field so
* that the page is reconsidered for pruning in future.
@ -434,11 +434,12 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
case HEAPTUPLE_LIVE:
case HEAPTUPLE_INSERT_IN_PROGRESS:
/*
* If we wanted to optimize for aborts, we might consider
* marking the page prunable when we see INSERT_IN_PROGRESS.
* But we don't. See related decisions about when to mark
* the page prunable in heapam.c.
* But we don't. See related decisions about when to mark the
* page prunable in heapam.c.
*/
break;
@ -486,12 +487,12 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
* Mark as unused each intermediate item that we are able to remove
* from the chain.
*
* When the previous item is the last dead tuple seen, we are at
* the right candidate for redirection.
* When the previous item is the last dead tuple seen, we are at the
* right candidate for redirection.
*/
for (i = 1; (i < nchain) && (chainitems[i - 1] != latestdead); i++)
{
ItemId lp = PageGetItemId(dp, chainitems[i]);
ItemId lp = PageGetItemId(dp, chainitems[i]);
ItemIdSetUnused(lp);
heap_prune_record_unused(nowunused, nunused, chainitems[i]);
@ -499,17 +500,17 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
}
/*
* If the root entry had been a normal tuple, we are deleting it,
* so count it in the result. But changing a redirect (even to
* DEAD state) doesn't count.
* If the root entry had been a normal tuple, we are deleting it, so
* count it in the result. But changing a redirect (even to DEAD
* state) doesn't count.
*/
if (ItemIdIsNormal(rootlp))
ndeleted++;
/*
* If the DEAD tuple is at the end of the chain, the entire chain is
* dead and the root line pointer can be marked dead. Otherwise
* just redirect the root to the correct chain member.
* dead and the root line pointer can be marked dead. Otherwise just
* redirect the root to the correct chain member.
*/
if (i >= nchain)
{
@ -528,25 +529,25 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
{
/*
* We found a redirect item that doesn't point to a valid follow-on
* item. This can happen if the loop in heap_page_prune caused us
* to visit the dead successor of a redirect item before visiting
* the redirect item. We can clean up by setting the redirect item
* to DEAD state.
* item. This can happen if the loop in heap_page_prune caused us to
* visit the dead successor of a redirect item before visiting the
* redirect item. We can clean up by setting the redirect item to
* DEAD state.
*/
ItemIdSetDead(rootlp);
heap_prune_record_dead(nowdead, ndead, rootoffnum);
}
/*
* If requested, eliminate LP_REDIRECT items by moving tuples. Note that
* If requested, eliminate LP_REDIRECT items by moving tuples. Note that
* if the root item is LP_REDIRECT and doesn't point to a valid follow-on
* item, we already killed it above.
*/
if (redirect_move && ItemIdIsRedirected(rootlp))
{
OffsetNumber firstoffnum = ItemIdGetRedirect(rootlp);
ItemId firstlp = PageGetItemId(dp, firstoffnum);
HeapTupleData firsttup;
ItemId firstlp = PageGetItemId(dp, firstoffnum);
HeapTupleData firsttup;
Assert(ItemIdIsNormal(firstlp));
/* Set up firsttup to reference the tuple at its existing CTID */
@ -558,15 +559,15 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
firsttup.t_tableOid = RelationGetRelid(relation);
/*
* Mark the tuple for invalidation. Needed because we're changing
* its CTID.
* Mark the tuple for invalidation. Needed because we're changing its
* CTID.
*/
CacheInvalidateHeapTuple(relation, &firsttup);
/*
* Change heap-only status of the tuple because after the line
* pointer manipulation, it's no longer a heap-only tuple, but is
* directly pointed to by index entries.
* Change heap-only status of the tuple because after the line pointer
* manipulation, it's no longer a heap-only tuple, but is directly
* pointed to by index entries.
*/
Assert(HeapTupleIsHeapOnly(&firsttup));
HeapTupleClearHeapOnly(&firsttup);
@ -594,7 +595,7 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
/* Record newly-redirected item pointer */
static void
heap_prune_record_redirect(OffsetNumber *redirected, int *nredirected,
OffsetNumber offnum, OffsetNumber rdoffnum)
OffsetNumber offnum, OffsetNumber rdoffnum)
{
Assert(*nredirected < MaxHeapTuplesPerPage);
redirected[*nredirected * 2] = offnum;
@ -641,17 +642,18 @@ heap_prune_record_unused(OffsetNumber *nowunused, int *nunused,
void
heap_get_root_tuples(Page page, OffsetNumber *root_offsets)
{
OffsetNumber offnum, maxoff;
OffsetNumber offnum,
maxoff;
MemSet(root_offsets, 0, MaxHeapTuplesPerPage * sizeof(OffsetNumber));
maxoff = PageGetMaxOffsetNumber(page);
for (offnum = FirstOffsetNumber; offnum <= maxoff; offnum++)
{
ItemId lp = PageGetItemId(page, offnum);
HeapTupleHeader htup;
OffsetNumber nextoffnum;
TransactionId priorXmax;
ItemId lp = PageGetItemId(page, offnum);
HeapTupleHeader htup;
OffsetNumber nextoffnum;
TransactionId priorXmax;
/* skip unused and dead items */
if (!ItemIdIsUsed(lp) || ItemIdIsDead(lp))

View File

@ -10,7 +10,7 @@
*
* The caller is responsible for creating the new heap, all catalog
* changes, supplying the tuples to be written to the new heap, and
* rebuilding indexes. The caller must hold AccessExclusiveLock on the
* rebuilding indexes. The caller must hold AccessExclusiveLock on the
* target table, because we assume no one else is writing into it.
*
* To use the facility:
@ -18,13 +18,13 @@
* begin_heap_rewrite
* while (fetch next tuple)
* {
* if (tuple is dead)
* rewrite_heap_dead_tuple
* else
* {
* // do any transformations here if required
* rewrite_heap_tuple
* }
* if (tuple is dead)
* rewrite_heap_dead_tuple
* else
* {
* // do any transformations here if required
* rewrite_heap_tuple
* }
* }
* end_heap_rewrite
*
@ -43,7 +43,7 @@
* to substitute the correct ctid instead.
*
* For each ctid reference from A -> B, we might encounter either A first
* or B first. (Note that a tuple in the middle of a chain is both A and B
* or B first. (Note that a tuple in the middle of a chain is both A and B
* of different pairs.)
*
* If we encounter A first, we'll store the tuple in the unresolved_tups
@ -58,11 +58,11 @@
* and can write A immediately with the correct ctid.
*
* Entries in the hash tables can be removed as soon as the later tuple
* is encountered. That helps to keep the memory usage down. At the end,
* is encountered. That helps to keep the memory usage down. At the end,
* both tables are usually empty; we should have encountered both A and B
* of each pair. However, it's possible for A to be RECENTLY_DEAD and B
* entirely DEAD according to HeapTupleSatisfiesVacuum, because the test
* for deadness using OldestXmin is not exact. In such a case we might
* for deadness using OldestXmin is not exact. In such a case we might
* encounter B first, and skip it, and find A later. Then A would be added
* to unresolved_tups, and stay there until end of the rewrite. Since
* this case is very unusual, we don't worry about the memory usage.
@ -78,7 +78,7 @@
* of CLUSTERing on an unchanging key column, we'll see all the versions
* of a given tuple together anyway, and so the peak memory usage is only
* proportional to the number of RECENTLY_DEAD versions of a single row, not
* in the whole table. Note that if we do fail halfway through a CLUSTER,
* in the whole table. Note that if we do fail halfway through a CLUSTER,
* the old table is still valid, so failure is not catastrophic.
*
* We can't use the normal heap_insert function to insert into the new
@ -96,7 +96,7 @@
* Portions Copyright (c) 1994-5, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/heap/rewriteheap.c,v 1.7 2007/09/20 17:56:30 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/heap/rewriteheap.c,v 1.8 2007/11/15 21:14:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -116,20 +116,20 @@
*/
typedef struct RewriteStateData
{
Relation rs_new_rel; /* destination heap */
Page rs_buffer; /* page currently being built */
BlockNumber rs_blockno; /* block where page will go */
bool rs_buffer_valid; /* T if any tuples in buffer */
bool rs_use_wal; /* must we WAL-log inserts? */
TransactionId rs_oldest_xmin; /* oldest xmin used by caller to
Relation rs_new_rel; /* destination heap */
Page rs_buffer; /* page currently being built */
BlockNumber rs_blockno; /* block where page will go */
bool rs_buffer_valid; /* T if any tuples in buffer */
bool rs_use_wal; /* must we WAL-log inserts? */
TransactionId rs_oldest_xmin; /* oldest xmin used by caller to
* determine tuple visibility */
TransactionId rs_freeze_xid; /* Xid that will be used as freeze
* cutoff point */
MemoryContext rs_cxt; /* for hash tables and entries and
* tuples in them */
HTAB *rs_unresolved_tups; /* unmatched A tuples */
HTAB *rs_old_new_tid_map; /* unmatched B tuples */
} RewriteStateData;
TransactionId rs_freeze_xid;/* Xid that will be used as freeze cutoff
* point */
MemoryContext rs_cxt; /* for hash tables and entries and tuples in
* them */
HTAB *rs_unresolved_tups; /* unmatched A tuples */
HTAB *rs_old_new_tid_map; /* unmatched B tuples */
} RewriteStateData;
/*
* The lookup keys for the hash tables are tuple TID and xmin (we must check
@ -139,27 +139,27 @@ typedef struct RewriteStateData
*/
typedef struct
{
TransactionId xmin; /* tuple xmin */
TransactionId xmin; /* tuple xmin */
ItemPointerData tid; /* tuple location in old heap */
} TidHashKey;
} TidHashKey;
/*
* Entry structures for the hash tables
*/
typedef struct
{
TidHashKey key; /* expected xmin/old location of B tuple */
TidHashKey key; /* expected xmin/old location of B tuple */
ItemPointerData old_tid; /* A's location in the old heap */
HeapTuple tuple; /* A's tuple contents */
} UnresolvedTupData;
HeapTuple tuple; /* A's tuple contents */
} UnresolvedTupData;
typedef UnresolvedTupData *UnresolvedTup;
typedef struct
{
TidHashKey key; /* actual xmin/old location of B tuple */
TidHashKey key; /* actual xmin/old location of B tuple */
ItemPointerData new_tid; /* where we put it in the new heap */
} OldToNewMappingData;
} OldToNewMappingData;
typedef OldToNewMappingData *OldToNewMapping;
@ -189,8 +189,8 @@ begin_heap_rewrite(Relation new_heap, TransactionId oldest_xmin,
HASHCTL hash_ctl;
/*
* To ease cleanup, make a separate context that will contain
* the RewriteState struct itself plus all subsidiary data.
* To ease cleanup, make a separate context that will contain the
* RewriteState struct itself plus all subsidiary data.
*/
rw_cxt = AllocSetContextCreate(CurrentMemoryContext,
"Table rewrite",
@ -221,7 +221,7 @@ begin_heap_rewrite(Relation new_heap, TransactionId oldest_xmin,
state->rs_unresolved_tups =
hash_create("Rewrite / Unresolved ctids",
128, /* arbitrary initial size */
128, /* arbitrary initial size */
&hash_ctl,
HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
@ -229,7 +229,7 @@ begin_heap_rewrite(Relation new_heap, TransactionId oldest_xmin,
state->rs_old_new_tid_map =
hash_create("Rewrite / Old to new tid map",
128, /* arbitrary initial size */
128, /* arbitrary initial size */
&hash_ctl,
HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
@ -250,8 +250,8 @@ end_heap_rewrite(RewriteState state)
UnresolvedTup unresolved;
/*
* Write any remaining tuples in the UnresolvedTups table. If we have
* any left, they should in fact be dead, but let's err on the safe side.
* Write any remaining tuples in the UnresolvedTups table. If we have any
* left, they should in fact be dead, but let's err on the safe side.
*
* XXX this really is a waste of code no?
*/
@ -276,15 +276,15 @@ end_heap_rewrite(RewriteState state)
}
/*
* If the rel isn't temp, must fsync before commit. We use heap_sync
* to ensure that the toast table gets fsync'd too.
* If the rel isn't temp, must fsync before commit. We use heap_sync to
* ensure that the toast table gets fsync'd too.
*
* It's obvious that we must do this when not WAL-logging. It's less
* obvious that we have to do it even if we did WAL-log the pages.
* The reason is the same as in tablecmds.c's copy_relation_data():
* we're writing data that's not in shared buffers, and so a CHECKPOINT
* occurring during the rewriteheap operation won't have fsync'd data
* we wrote before the checkpoint.
* obvious that we have to do it even if we did WAL-log the pages. The
* reason is the same as in tablecmds.c's copy_relation_data(): we're
* writing data that's not in shared buffers, and so a CHECKPOINT
* occurring during the rewriteheap operation won't have fsync'd data we
* wrote before the checkpoint.
*/
if (!state->rs_new_rel->rd_istemp)
heap_sync(state->rs_new_rel);
@ -310,17 +310,17 @@ rewrite_heap_tuple(RewriteState state,
{
MemoryContext old_cxt;
ItemPointerData old_tid;
TidHashKey hashkey;
bool found;
bool free_new;
TidHashKey hashkey;
bool found;
bool free_new;
old_cxt = MemoryContextSwitchTo(state->rs_cxt);
/*
* Copy the original tuple's visibility information into new_tuple.
*
* XXX we might later need to copy some t_infomask2 bits, too?
* Right now, we intentionally clear the HOT status bits.
* XXX we might later need to copy some t_infomask2 bits, too? Right now,
* we intentionally clear the HOT status bits.
*/
memcpy(&new_tuple->t_data->t_choice.t_heap,
&old_tuple->t_data->t_choice.t_heap,
@ -335,16 +335,16 @@ rewrite_heap_tuple(RewriteState state,
* While we have our hands on the tuple, we may as well freeze any
* very-old xmin or xmax, so that future VACUUM effort can be saved.
*
* Note we abuse heap_freeze_tuple() a bit here, since it's expecting
* to be given a pointer to a tuple in a disk buffer. It happens
* though that we can get the right things to happen by passing
* InvalidBuffer for the buffer.
* Note we abuse heap_freeze_tuple() a bit here, since it's expecting to
* be given a pointer to a tuple in a disk buffer. It happens though that
* we can get the right things to happen by passing InvalidBuffer for the
* buffer.
*/
heap_freeze_tuple(new_tuple->t_data, state->rs_freeze_xid, InvalidBuffer);
/*
* Invalid ctid means that ctid should point to the tuple itself.
* We'll override it later if the tuple is part of an update chain.
* Invalid ctid means that ctid should point to the tuple itself. We'll
* override it later if the tuple is part of an update chain.
*/
ItemPointerSetInvalid(&new_tuple->t_data->t_ctid);
@ -369,9 +369,9 @@ rewrite_heap_tuple(RewriteState state,
if (mapping != NULL)
{
/*
* We've already copied the tuple that t_ctid points to, so we
* can set the ctid of this tuple to point to the new location,
* and insert it right away.
* We've already copied the tuple that t_ctid points to, so we can
* set the ctid of this tuple to point to the new location, and
* insert it right away.
*/
new_tuple->t_data->t_ctid = mapping->new_tid;
@ -405,10 +405,10 @@ rewrite_heap_tuple(RewriteState state,
}
/*
* Now we will write the tuple, and then check to see if it is the
* B tuple in any new or known pair. When we resolve a known pair,
* we will be able to write that pair's A tuple, and then we have to
* check if it resolves some other pair. Hence, we need a loop here.
* Now we will write the tuple, and then check to see if it is the B tuple
* in any new or known pair. When we resolve a known pair, we will be
* able to write that pair's A tuple, and then we have to check if it
* resolves some other pair. Hence, we need a loop here.
*/
old_tid = old_tuple->t_self;
free_new = false;
@ -422,13 +422,12 @@ rewrite_heap_tuple(RewriteState state,
new_tid = new_tuple->t_self;
/*
* If the tuple is the updated version of a row, and the prior
* version wouldn't be DEAD yet, then we need to either resolve
* the prior version (if it's waiting in rs_unresolved_tups),
* or make an entry in rs_old_new_tid_map (so we can resolve it
* when we do see it). The previous tuple's xmax would equal this
* one's xmin, so it's RECENTLY_DEAD if and only if the xmin is
* not before OldestXmin.
* If the tuple is the updated version of a row, and the prior version
* wouldn't be DEAD yet, then we need to either resolve the prior
* version (if it's waiting in rs_unresolved_tups), or make an entry
* in rs_old_new_tid_map (so we can resolve it when we do see it).
* The previous tuple's xmax would equal this one's xmin, so it's
* RECENTLY_DEAD if and only if the xmin is not before OldestXmin.
*/
if ((new_tuple->t_data->t_infomask & HEAP_UPDATED) &&
!TransactionIdPrecedes(HeapTupleHeaderGetXmin(new_tuple->t_data),
@ -449,9 +448,9 @@ rewrite_heap_tuple(RewriteState state,
if (unresolved != NULL)
{
/*
* We have seen and memorized the previous tuple already.
* Now that we know where we inserted the tuple its t_ctid
* points to, fix its t_ctid and insert it to the new heap.
* We have seen and memorized the previous tuple already. Now
* that we know where we inserted the tuple its t_ctid points
* to, fix its t_ctid and insert it to the new heap.
*/
if (free_new)
heap_freetuple(new_tuple);
@ -461,8 +460,8 @@ rewrite_heap_tuple(RewriteState state,
new_tuple->t_data->t_ctid = new_tid;
/*
* We don't need the hash entry anymore, but don't free
* its tuple just yet.
* We don't need the hash entry anymore, but don't free its
* tuple just yet.
*/
hash_search(state->rs_unresolved_tups, &hashkey,
HASH_REMOVE, &found);
@ -474,8 +473,8 @@ rewrite_heap_tuple(RewriteState state,
else
{
/*
* Remember the new tid of this tuple. We'll use it to set
* the ctid when we find the previous tuple in the chain.
* Remember the new tid of this tuple. We'll use it to set the
* ctid when we find the previous tuple in the chain.
*/
OldToNewMapping mapping;
@ -506,22 +505,22 @@ rewrite_heap_dead_tuple(RewriteState state, HeapTuple old_tuple)
{
/*
* If we have already seen an earlier tuple in the update chain that
* points to this tuple, let's forget about that earlier tuple. It's
* in fact dead as well, our simple xmax < OldestXmin test in
* HeapTupleSatisfiesVacuum just wasn't enough to detect it. It
* happens when xmin of a tuple is greater than xmax, which sounds
* points to this tuple, let's forget about that earlier tuple. It's in
* fact dead as well, our simple xmax < OldestXmin test in
* HeapTupleSatisfiesVacuum just wasn't enough to detect it. It happens
* when xmin of a tuple is greater than xmax, which sounds
* counter-intuitive but is perfectly valid.
*
* We don't bother to try to detect the situation the other way
* round, when we encounter the dead tuple first and then the
* recently dead one that points to it. If that happens, we'll
* have some unmatched entries in the UnresolvedTups hash table
* at the end. That can happen anyway, because a vacuum might
* have removed the dead tuple in the chain before us.
* We don't bother to try to detect the situation the other way round,
* when we encounter the dead tuple first and then the recently dead one
* that points to it. If that happens, we'll have some unmatched entries
* in the UnresolvedTups hash table at the end. That can happen anyway,
* because a vacuum might have removed the dead tuple in the chain before
* us.
*/
UnresolvedTup unresolved;
TidHashKey hashkey;
bool found;
TidHashKey hashkey;
bool found;
memset(&hashkey, 0, sizeof(hashkey));
hashkey.xmin = HeapTupleHeaderGetXmin(old_tuple->t_data);
@ -541,7 +540,7 @@ rewrite_heap_dead_tuple(RewriteState state, HeapTuple old_tuple)
}
/*
* Insert a tuple to the new relation. This has to track heap_insert
* Insert a tuple to the new relation. This has to track heap_insert
* and its subsidiary functions!
*
* t_self of the tuple is set to the new TID of the tuple. If t_ctid of the
@ -551,11 +550,12 @@ rewrite_heap_dead_tuple(RewriteState state, HeapTuple old_tuple)
static void
raw_heap_insert(RewriteState state, HeapTuple tup)
{
Page page = state->rs_buffer;
Size pageFreeSpace, saveFreeSpace;
Size len;
OffsetNumber newoff;
HeapTuple heaptup;
Page page = state->rs_buffer;
Size pageFreeSpace,
saveFreeSpace;
Size len;
OffsetNumber newoff;
HeapTuple heaptup;
/*
* If the new tuple is too big for storage or contains already toasted
@ -610,7 +610,8 @@ raw_heap_insert(RewriteState state, HeapTuple tup)
/*
* Now write the page. We say isTemp = true even if it's not a
* temp table, because there's no need for smgr to schedule an
* fsync for this write; we'll do it ourselves in end_heap_rewrite.
* fsync for this write; we'll do it ourselves in
* end_heap_rewrite.
*/
RelationOpenSmgr(state->rs_new_rel);
smgrextend(state->rs_new_rel->rd_smgr, state->rs_blockno,
@ -638,12 +639,12 @@ raw_heap_insert(RewriteState state, HeapTuple tup)
ItemPointerSet(&(tup->t_self), state->rs_blockno, newoff);
/*
* Insert the correct position into CTID of the stored tuple, too,
* if the caller didn't supply a valid CTID.
* Insert the correct position into CTID of the stored tuple, too, if the
* caller didn't supply a valid CTID.
*/
if(!ItemPointerIsValid(&tup->t_data->t_ctid))
if (!ItemPointerIsValid(&tup->t_data->t_ctid))
{
ItemId newitemid;
ItemId newitemid;
HeapTupleHeader onpage_tup;
newitemid = PageGetItemId(page, newoff);

View File

@ -4,7 +4,7 @@
* heap scan synchronization support
*
* When multiple backends run a sequential scan on the same table, we try
* to keep them synchronized to reduce the overall I/O needed. The goal is
* to keep them synchronized to reduce the overall I/O needed. The goal is
* to read each page into shared buffer cache only once, and let all backends
* that take part in the shared scan process the page before it falls out of
* the cache.
@ -26,7 +26,7 @@
* don't want such queries to slow down others.
*
* There can realistically only be a few large sequential scans on different
* tables in progress at any time. Therefore we just keep the scan positions
* tables in progress at any time. Therefore we just keep the scan positions
* in a small LRU list which we scan every time we need to look up or update a
* scan position. The whole mechanism is only applied for tables exceeding
* a threshold size (but that is not the concern of this module).
@ -40,7 +40,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/heap/syncscan.c,v 1.1 2007/06/08 18:23:52 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/heap/syncscan.c,v 1.2 2007/11/15 21:14:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -52,7 +52,7 @@
/* GUC variables */
#ifdef TRACE_SYNCSCAN
bool trace_syncscan = false;
bool trace_syncscan = false;
#endif
@ -89,21 +89,21 @@ typedef struct ss_scan_location_t
{
RelFileNode relfilenode; /* identity of a relation */
BlockNumber location; /* last-reported location in the relation */
} ss_scan_location_t;
} ss_scan_location_t;
typedef struct ss_lru_item_t
{
struct ss_lru_item_t *prev;
struct ss_lru_item_t *next;
ss_scan_location_t location;
} ss_lru_item_t;
struct ss_lru_item_t *prev;
struct ss_lru_item_t *next;
ss_scan_location_t location;
} ss_lru_item_t;
typedef struct ss_scan_locations_t
{
ss_lru_item_t *head;
ss_lru_item_t *tail;
ss_lru_item_t items[1]; /* SYNC_SCAN_NELEM items */
} ss_scan_locations_t;
ss_lru_item_t *head;
ss_lru_item_t *tail;
ss_lru_item_t items[1]; /* SYNC_SCAN_NELEM items */
} ss_scan_locations_t;
#define SizeOfScanLocations(N) offsetof(ss_scan_locations_t, items[N])
@ -112,7 +112,7 @@ static ss_scan_locations_t *scan_locations;
/* prototypes for internal functions */
static BlockNumber ss_search(RelFileNode relfilenode,
BlockNumber location, bool set);
BlockNumber location, bool set);
/*
@ -130,8 +130,8 @@ SyncScanShmemSize(void)
void
SyncScanShmemInit(void)
{
int i;
bool found;
int i;
bool found;
scan_locations = (ss_scan_locations_t *)
ShmemInitStruct("Sync Scan Locations List",
@ -186,20 +186,20 @@ SyncScanShmemInit(void)
static BlockNumber
ss_search(RelFileNode relfilenode, BlockNumber location, bool set)
{
ss_lru_item_t *item;
ss_lru_item_t *item;
item = scan_locations->head;
for (;;)
{
bool match;
bool match;
match = RelFileNodeEquals(item->location.relfilenode, relfilenode);
if (match || item->next == NULL)
{
/*
* If we reached the end of list and no match was found,
* take over the last entry
* If we reached the end of list and no match was found, take over
* the last entry
*/
if (!match)
{
@ -242,7 +242,7 @@ ss_search(RelFileNode relfilenode, BlockNumber location, bool set)
* relation, or 0 if no valid location is found.
*
* We expect the caller has just done RelationGetNumberOfBlocks(), and
* so that number is passed in rather than computing it again. The result
* so that number is passed in rather than computing it again. The result
* is guaranteed less than relnblocks (assuming that's > 0).
*/
BlockNumber
@ -257,8 +257,8 @@ ss_get_location(Relation rel, BlockNumber relnblocks)
/*
* If the location is not a valid block number for this scan, start at 0.
*
* This can happen if for instance a VACUUM truncated the table
* since the location was saved.
* This can happen if for instance a VACUUM truncated the table since the
* location was saved.
*/
if (startloc >= relnblocks)
startloc = 0;
@ -294,12 +294,12 @@ ss_report_location(Relation rel, BlockNumber location)
#endif
/*
* To reduce lock contention, only report scan progress every N pages.
* For the same reason, don't block if the lock isn't immediately
* available. Missing a few updates isn't critical, it just means that a
* new scan that wants to join the pack will start a little bit behind the
* head of the scan. Hopefully the pages are still in OS cache and the
* scan catches up quickly.
* To reduce lock contention, only report scan progress every N pages. For
* the same reason, don't block if the lock isn't immediately available.
* Missing a few updates isn't critical, it just means that a new scan
* that wants to join the pack will start a little bit behind the head of
* the scan. Hopefully the pages are still in OS cache and the scan
* catches up quickly.
*/
if ((location % SYNC_SCAN_REPORT_INTERVAL) == 0)
{

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/heap/tuptoaster.c,v 1.78 2007/10/11 18:19:58 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/heap/tuptoaster.c,v 1.79 2007/11/15 21:14:32 momjian Exp $
*
*
* INTERFACE ROUTINES
@ -72,9 +72,9 @@ do { \
static void toast_delete_datum(Relation rel, Datum value);
static Datum toast_save_datum(Relation rel, Datum value,
bool use_wal, bool use_fsm);
static struct varlena *toast_fetch_datum(struct varlena *attr);
static struct varlena *toast_fetch_datum_slice(struct varlena *attr,
bool use_wal, bool use_fsm);
static struct varlena *toast_fetch_datum(struct varlena * attr);
static struct varlena *toast_fetch_datum_slice(struct varlena * attr,
int32 sliceoffset, int32 length);
@ -90,9 +90,9 @@ static struct varlena *toast_fetch_datum_slice(struct varlena *attr,
----------
*/
struct varlena *
heap_tuple_fetch_attr(struct varlena *attr)
heap_tuple_fetch_attr(struct varlena * attr)
{
struct varlena *result;
struct varlena *result;
if (VARATT_IS_EXTERNAL(attr))
{
@ -121,7 +121,7 @@ heap_tuple_fetch_attr(struct varlena *attr)
* ----------
*/
struct varlena *
heap_tuple_untoast_attr(struct varlena *attr)
heap_tuple_untoast_attr(struct varlena * attr)
{
if (VARATT_IS_EXTERNAL(attr))
{
@ -156,8 +156,8 @@ heap_tuple_untoast_attr(struct varlena *attr)
/*
* This is a short-header varlena --- convert to 4-byte header format
*/
Size data_size = VARSIZE_SHORT(attr) - VARHDRSZ_SHORT;
Size new_size = data_size + VARHDRSZ;
Size data_size = VARSIZE_SHORT(attr) - VARHDRSZ_SHORT;
Size new_size = data_size + VARHDRSZ;
struct varlena *new_attr;
new_attr = (struct varlena *) palloc(new_size);
@ -178,12 +178,12 @@ heap_tuple_untoast_attr(struct varlena *attr)
* ----------
*/
struct varlena *
heap_tuple_untoast_attr_slice(struct varlena *attr,
heap_tuple_untoast_attr_slice(struct varlena * attr,
int32 sliceoffset, int32 slicelength)
{
struct varlena *preslice;
struct varlena *result;
char *attrdata;
char *attrdata;
int32 attrsize;
if (VARATT_IS_EXTERNAL(attr))
@ -205,7 +205,7 @@ heap_tuple_untoast_attr_slice(struct varlena *attr,
if (VARATT_IS_COMPRESSED(preslice))
{
PGLZ_Header *tmp = (PGLZ_Header *) preslice;
Size size = PGLZ_RAW_SIZE(tmp) + VARHDRSZ;
Size size = PGLZ_RAW_SIZE(tmp) + VARHDRSZ;
preslice = (struct varlena *) palloc(size);
SET_VARSIZE(preslice, size);
@ -300,7 +300,7 @@ toast_raw_datum_size(Datum value)
Size
toast_datum_size(Datum value)
{
struct varlena *attr = (struct varlena *) DatumGetPointer(value);
struct varlena *attr = (struct varlena *) DatumGetPointer(value);
Size result;
if (VARATT_IS_EXTERNAL(attr))
@ -469,8 +469,8 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup,
for (i = 0; i < numAttrs; i++)
{
struct varlena *old_value;
struct varlena *new_value;
struct varlena *old_value;
struct varlena *new_value;
if (oldtup != NULL)
{
@ -488,7 +488,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup,
VARATT_IS_EXTERNAL(old_value))
{
if (toast_isnull[i] || !VARATT_IS_EXTERNAL(new_value) ||
memcmp((char *) old_value, (char *) new_value,
memcmp((char *) old_value, (char *) new_value,
VARSIZE_EXTERNAL(old_value)) != 0)
{
/*
@ -543,7 +543,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup,
* We took care of UPDATE above, so any external value we find
* still in the tuple must be someone else's we cannot reuse.
* Fetch it back (without decompression, unless we are forcing
* PLAIN storage). If necessary, we'll push it out as a new
* PLAIN storage). If necessary, we'll push it out as a new
* external value below.
*/
if (VARATT_IS_EXTERNAL(new_value))
@ -656,7 +656,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup,
/*
* Second we look for attributes of attstorage 'x' or 'e' that are still
* inline. But skip this if there's no toast table to push them to.
* inline. But skip this if there's no toast table to push them to.
*/
while (heap_compute_data_size(tupleDesc,
toast_values, toast_isnull) > maxDataLen &&
@ -956,7 +956,7 @@ toast_flatten_tuple_attribute(Datum value,
has_nulls = true;
else if (att[i]->attlen == -1)
{
struct varlena *new_value;
struct varlena *new_value;
new_value = (struct varlena *) DatumGetPointer(toast_values[i]);
if (VARATT_IS_EXTERNAL(new_value) ||
@ -1046,7 +1046,8 @@ toast_compress_datum(Datum value)
Assert(!VARATT_IS_COMPRESSED(value));
/*
* No point in wasting a palloc cycle if value is too short for compression
* No point in wasting a palloc cycle if value is too short for
* compression
*/
if (valsize < PGLZ_strategy_default->min_input_size)
return PointerGetDatum(NULL);
@ -1110,8 +1111,8 @@ toast_save_datum(Relation rel, Datum value,
/*
* Get the data pointer and length, and compute va_rawsize and va_extsize.
*
* va_rawsize is the size of the equivalent fully uncompressed datum,
* so we have to adjust for short headers.
* va_rawsize is the size of the equivalent fully uncompressed datum, so
* we have to adjust for short headers.
*
* va_extsize is the actual size of the data payload in the toast records.
*/
@ -1119,7 +1120,7 @@ toast_save_datum(Relation rel, Datum value,
{
data_p = VARDATA_SHORT(value);
data_todo = VARSIZE_SHORT(value) - VARHDRSZ_SHORT;
toast_pointer.va_rawsize = data_todo + VARHDRSZ; /* as if not short */
toast_pointer.va_rawsize = data_todo + VARHDRSZ; /* as if not short */
toast_pointer.va_extsize = data_todo;
}
else if (VARATT_IS_COMPRESSED(value))
@ -1283,7 +1284,7 @@ toast_delete_datum(Relation rel, Datum value)
* ----------
*/
static struct varlena *
toast_fetch_datum(struct varlena *attr)
toast_fetch_datum(struct varlena * attr)
{
Relation toastrel;
Relation toastidx;
@ -1299,7 +1300,7 @@ toast_fetch_datum(struct varlena *attr)
int32 numchunks;
Pointer chunk;
bool isnull;
char *chunkdata;
char *chunkdata;
int32 chunksize;
/* Must copy to access aligned fields */
@ -1365,7 +1366,7 @@ toast_fetch_datum(struct varlena *attr)
{
/* should never happen */
elog(ERROR, "found toasted toast chunk");
chunksize = 0; /* keep compiler quiet */
chunksize = 0; /* keep compiler quiet */
chunkdata = NULL;
}
@ -1384,12 +1385,12 @@ toast_fetch_datum(struct varlena *attr)
residx, numchunks,
toast_pointer.va_valueid);
}
else if (residx == numchunks-1)
else if (residx == numchunks - 1)
{
if ((residx * TOAST_MAX_CHUNK_SIZE + chunksize) != ressize)
elog(ERROR, "unexpected chunk size %d (expected %d) in final chunk %d for toast value %u",
chunksize,
(int) (ressize - residx*TOAST_MAX_CHUNK_SIZE),
(int) (ressize - residx * TOAST_MAX_CHUNK_SIZE),
residx,
toast_pointer.va_valueid);
}
@ -1397,7 +1398,7 @@ toast_fetch_datum(struct varlena *attr)
elog(ERROR, "unexpected chunk number %d for toast value %u (out of range %d..%d)",
residx,
toast_pointer.va_valueid,
0, numchunks-1);
0, numchunks - 1);
/*
* Copy the data into proper place in our result
@ -1435,7 +1436,7 @@ toast_fetch_datum(struct varlena *attr)
* ----------
*/
static struct varlena *
toast_fetch_datum_slice(struct varlena *attr, int32 sliceoffset, int32 length)
toast_fetch_datum_slice(struct varlena * attr, int32 sliceoffset, int32 length)
{
Relation toastrel;
Relation toastidx;
@ -1457,7 +1458,7 @@ toast_fetch_datum_slice(struct varlena *attr, int32 sliceoffset, int32 length)
int totalchunks;
Pointer chunk;
bool isnull;
char *chunkdata;
char *chunkdata;
int32 chunksize;
int32 chcpystrt;
int32 chcpyend;
@ -1574,7 +1575,7 @@ toast_fetch_datum_slice(struct varlena *attr, int32 sliceoffset, int32 length)
{
/* should never happen */
elog(ERROR, "found toasted toast chunk");
chunksize = 0; /* keep compiler quiet */
chunksize = 0; /* keep compiler quiet */
chunkdata = NULL;
}
@ -1593,7 +1594,7 @@ toast_fetch_datum_slice(struct varlena *attr, int32 sliceoffset, int32 length)
residx, totalchunks,
toast_pointer.va_valueid);
}
else if (residx == totalchunks-1)
else if (residx == totalchunks - 1)
{
if ((residx * TOAST_MAX_CHUNK_SIZE + chunksize) != attrsize)
elog(ERROR, "unexpected chunk size %d (expected %d) in final chunk %d for toast value %u when fetching slice",
@ -1606,7 +1607,7 @@ toast_fetch_datum_slice(struct varlena *attr, int32 sliceoffset, int32 length)
elog(ERROR, "unexpected chunk number %d for toast value %u (out of range %d..%d)",
residx,
toast_pointer.va_valueid,
0, totalchunks-1);
0, totalchunks - 1);
/*
* Copy the data into proper place in our result

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/index/indexam.c,v 1.99 2007/09/20 17:56:30 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/index/indexam.c,v 1.100 2007/11/15 21:14:32 momjian Exp $
*
* INTERFACE ROUTINES
* index_open - open an index relation by relation OID
@ -379,7 +379,7 @@ index_markpos(IndexScanDesc scan)
* returnable tuple in each HOT chain, and so restoring the prior state at the
* granularity of the index AM is sufficient. Since the only current user
* of mark/restore functionality is nodeMergejoin.c, this effectively means
* that merge-join plans only work for MVCC snapshots. This could be fixed
* that merge-join plans only work for MVCC snapshots. This could be fixed
* if necessary, but for now it seems unimportant.
* ----------------
*/
@ -413,7 +413,7 @@ HeapTuple
index_getnext(IndexScanDesc scan, ScanDirection direction)
{
HeapTuple heapTuple = &scan->xs_ctup;
ItemPointer tid = &heapTuple->t_self;
ItemPointer tid = &heapTuple->t_self;
FmgrInfo *procedure;
SCAN_CHECKS;
@ -429,14 +429,14 @@ index_getnext(IndexScanDesc scan, ScanDirection direction)
for (;;)
{
OffsetNumber offnum;
bool at_chain_start;
Page dp;
bool at_chain_start;
Page dp;
if (scan->xs_next_hot != InvalidOffsetNumber)
{
/*
* We are resuming scan of a HOT chain after having returned
* an earlier member. Must still hold pin on current heap page.
* We are resuming scan of a HOT chain after having returned an
* earlier member. Must still hold pin on current heap page.
*/
Assert(BufferIsValid(scan->xs_cbuf));
Assert(ItemPointerGetBlockNumber(tid) ==
@ -506,7 +506,7 @@ index_getnext(IndexScanDesc scan, ScanDirection direction)
/* Scan through possible multiple members of HOT-chain */
for (;;)
{
ItemId lp;
ItemId lp;
ItemPointer ctid;
/* check for bogus TID */
@ -532,8 +532,8 @@ index_getnext(IndexScanDesc scan, ScanDirection direction)
}
/*
* We must initialize all of *heapTuple (ie, scan->xs_ctup)
* since it is returned to the executor on success.
* We must initialize all of *heapTuple (ie, scan->xs_ctup) since
* it is returned to the executor on success.
*/
heapTuple->t_data = (HeapTupleHeader) PageGetItem(dp, lp);
heapTuple->t_len = ItemIdGetLength(lp);
@ -544,20 +544,21 @@ index_getnext(IndexScanDesc scan, ScanDirection direction)
/*
* Shouldn't see a HEAP_ONLY tuple at chain start. (This test
* should be unnecessary, since the chain root can't be removed
* while we have pin on the index entry, but let's make it anyway.)
* while we have pin on the index entry, but let's make it
* anyway.)
*/
if (at_chain_start && HeapTupleIsHeapOnly(heapTuple))
break;
/*
* The xmin should match the previous xmax value, else chain is
* broken. (Note: this test is not optional because it protects
* us against the case where the prior chain member's xmax
* aborted since we looked at it.)
* broken. (Note: this test is not optional because it protects
* us against the case where the prior chain member's xmax aborted
* since we looked at it.)
*/
if (TransactionIdIsValid(scan->xs_prev_xmax) &&
!TransactionIdEquals(scan->xs_prev_xmax,
HeapTupleHeaderGetXmin(heapTuple->t_data)))
HeapTupleHeaderGetXmin(heapTuple->t_data)))
break;
/* If it's visible per the snapshot, we must return it */
@ -565,10 +566,10 @@ index_getnext(IndexScanDesc scan, ScanDirection direction)
scan->xs_cbuf))
{
/*
* If the snapshot is MVCC, we know that it could accept
* at most one member of the HOT chain, so we can skip
* examining any more members. Otherwise, check for
* continuation of the HOT-chain, and set state for next time.
* If the snapshot is MVCC, we know that it could accept at
* most one member of the HOT chain, so we can skip examining
* any more members. Otherwise, check for continuation of the
* HOT-chain, and set state for next time.
*/
if (IsMVCCSnapshot(scan->xs_snapshot))
scan->xs_next_hot = InvalidOffsetNumber;
@ -615,7 +616,7 @@ index_getnext(IndexScanDesc scan, ScanDirection direction)
}
else
break; /* end of chain */
} /* loop over a single HOT chain */
} /* loop over a single HOT chain */
LockBuffer(scan->xs_cbuf, BUFFER_LOCK_UNLOCK);
@ -788,7 +789,7 @@ index_vacuum_cleanup(IndexVacuumInfo *info,
* particular indexed attribute are those with both types equal to
* the index opclass' opcintype (note that this is subtly different
* from the indexed attribute's own type: it may be a binary-compatible
* type instead). Only the default functions are stored in relcache
* type instead). Only the default functions are stored in relcache
* entries --- access methods can use the syscache to look up non-default
* functions.
*
@ -822,7 +823,7 @@ index_getprocid(Relation irel,
* index_getprocinfo
*
* This routine allows index AMs to keep fmgr lookup info for
* support procs in the relcache. As above, only the "default"
* support procs in the relcache. As above, only the "default"
* functions for any particular indexed attribute are cached.
*
* Note: the return value points into cached data that will be lost during

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.160 2007/09/20 17:56:30 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.161 2007/11/15 21:14:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -32,7 +32,7 @@ typedef struct
OffsetNumber newitemoff; /* where the new item is to be inserted */
int leftspace; /* space available for items on left page */
int rightspace; /* space available for items on right page */
int olddataitemstotal; /* space taken by old items */
int olddataitemstotal; /* space taken by old items */
bool have_split; /* found a valid split? */
@ -222,7 +222,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
if (!ItemIdIsDead(curitemid))
{
ItemPointerData htid;
bool all_dead;
bool all_dead;
/*
* _bt_compare returns 0 for (1,NULL) and (1,NULL) - this's
@ -239,8 +239,8 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
/*
* We check the whole HOT-chain to see if there is any tuple
* that satisfies SnapshotDirty. This is necessary because
* we have just a single index entry for the entire chain.
* that satisfies SnapshotDirty. This is necessary because we
* have just a single index entry for the entire chain.
*/
if (heap_hot_search(&htid, heapRel, &SnapshotDirty, &all_dead))
{
@ -267,15 +267,16 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
* is itself now committed dead --- if so, don't complain.
* This is a waste of time in normal scenarios but we must
* do it to support CREATE INDEX CONCURRENTLY.
*
*
* We must follow HOT-chains here because during
* concurrent index build, we insert the root TID though
* the actual tuple may be somewhere in the HOT-chain.
* While following the chain we might not stop at the exact
* tuple which triggered the insert, but that's OK because
* if we find a live tuple anywhere in this chain, we have
* a unique key conflict. The other live tuple is not part
* of this chain because it had a different index entry.
* While following the chain we might not stop at the
* exact tuple which triggered the insert, but that's OK
* because if we find a live tuple anywhere in this chain,
* we have a unique key conflict. The other live tuple is
* not part of this chain because it had a different index
* entry.
*/
htid = itup->t_tid;
if (heap_hot_search(&htid, heapRel, SnapshotSelf, NULL))
@ -293,8 +294,8 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
ereport(ERROR,
(errcode(ERRCODE_UNIQUE_VIOLATION),
errmsg("duplicate key value violates unique constraint \"%s\"",
RelationGetRelationName(rel))));
errmsg("duplicate key value violates unique constraint \"%s\"",
RelationGetRelationName(rel))));
}
else if (all_dead)
{
@ -372,7 +373,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
* On entry, *buf and *offsetptr point to the first legal position
* where the new tuple could be inserted. The caller should hold an
* exclusive lock on *buf. *offsetptr can also be set to
* InvalidOffsetNumber, in which case the function will search the right
* InvalidOffsetNumber, in which case the function will search the right
* location within the page if needed. On exit, they point to the chosen
* insert location. If findinsertloc decided to move right, the lock and
* pin on the original page will be released and the new page returned to
@ -389,11 +390,12 @@ _bt_findinsertloc(Relation rel,
ScanKey scankey,
IndexTuple newtup)
{
Buffer buf = *bufptr;
Page page = BufferGetPage(buf);
Size itemsz;
Buffer buf = *bufptr;
Page page = BufferGetPage(buf);
Size itemsz;
BTPageOpaque lpageop;
bool movedright, vacuumed;
bool movedright,
vacuumed;
OffsetNumber newitemoff;
OffsetNumber firstlegaloff = *offsetptr;
@ -447,19 +449,21 @@ _bt_findinsertloc(Relation rel,
Buffer rbuf;
/*
* before considering moving right, see if we can obtain enough
* space by erasing LP_DEAD items
* before considering moving right, see if we can obtain enough space
* by erasing LP_DEAD items
*/
if (P_ISLEAF(lpageop) && P_HAS_GARBAGE(lpageop))
{
_bt_vacuum_one_page(rel, buf);
/* remember that we vacuumed this page, because that makes
* the hint supplied by the caller invalid */
/*
* remember that we vacuumed this page, because that makes the
* hint supplied by the caller invalid
*/
vacuumed = true;
if (PageGetFreeSpace(page) >= itemsz)
break; /* OK, now we have enough space */
break; /* OK, now we have enough space */
}
/*
@ -473,11 +477,10 @@ _bt_findinsertloc(Relation rel,
/*
* step right to next non-dead page
*
* must write-lock that page before releasing write lock on
* current page; else someone else's _bt_check_unique scan could
* fail to see our insertion. write locks on intermediate dead
* pages won't do because we don't know when they will get
* de-linked from the tree.
* must write-lock that page before releasing write lock on current
* page; else someone else's _bt_check_unique scan could fail to see
* our insertion. write locks on intermediate dead pages won't do
* because we don't know when they will get de-linked from the tree.
*/
rbuf = InvalidBuffer;
@ -501,17 +504,16 @@ _bt_findinsertloc(Relation rel,
}
/*
* Now we are on the right page, so find the insert position. If we
* moved right at all, we know we should insert at the start of the
* page. If we didn't move right, we can use the firstlegaloff hint
* if the caller supplied one, unless we vacuumed the page which
* might have moved tuples around making the hint invalid. If we
* didn't move right or can't use the hint, find the position
* by searching.
* Now we are on the right page, so find the insert position. If we moved
* right at all, we know we should insert at the start of the page. If we
* didn't move right, we can use the firstlegaloff hint if the caller
* supplied one, unless we vacuumed the page which might have moved tuples
* around making the hint invalid. If we didn't move right or can't use
* the hint, find the position by searching.
*/
if (movedright)
newitemoff = P_FIRSTDATAKEY(lpageop);
else if(firstlegaloff != InvalidOffsetNumber && !vacuumed)
else if (firstlegaloff != InvalidOffsetNumber && !vacuumed)
newitemoff = firstlegaloff;
else
newitemoff = _bt_binsrch(rel, buf, keysz, scankey, false);
@ -982,8 +984,8 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
* the data by reinserting it into a new left page. (XXX the latter
* comment is probably obsolete.)
*
* We need to do this before writing the WAL record, so that XLogInsert can
* WAL log an image of the page if necessary.
* We need to do this before writing the WAL record, so that XLogInsert
* can WAL log an image of the page if necessary.
*/
PageRestoreTempPage(leftpage, origpage);
@ -1033,10 +1035,10 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
* Log the new item and its offset, if it was inserted on the left
* page. (If it was put on the right page, we don't need to explicitly
* WAL log it because it's included with all the other items on the
* right page.) Show the new item as belonging to the left page buffer,
* so that it is not stored if XLogInsert decides it needs a full-page
* image of the left page. We store the offset anyway, though, to
* support archive compression of these records.
* right page.) Show the new item as belonging to the left page
* buffer, so that it is not stored if XLogInsert decides it needs a
* full-page image of the left page. We store the offset anyway,
* though, to support archive compression of these records.
*/
if (newitemonleft)
{
@ -1052,31 +1054,31 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
lastrdata->data = (char *) newitem;
lastrdata->len = MAXALIGN(newitemsz);
lastrdata->buffer = buf; /* backup block 1 */
lastrdata->buffer = buf; /* backup block 1 */
lastrdata->buffer_std = true;
}
else
{
/*
* Although we don't need to WAL-log the new item, we still
* need XLogInsert to consider storing a full-page image of the
* left page, so make an empty entry referencing that buffer.
* This also ensures that the left page is always backup block 1.
* Although we don't need to WAL-log the new item, we still need
* XLogInsert to consider storing a full-page image of the left
* page, so make an empty entry referencing that buffer. This also
* ensures that the left page is always backup block 1.
*/
lastrdata->next = lastrdata + 1;
lastrdata++;
lastrdata->data = NULL;
lastrdata->len = 0;
lastrdata->buffer = buf; /* backup block 1 */
lastrdata->buffer = buf; /* backup block 1 */
lastrdata->buffer_std = true;
}
/*
* Log the contents of the right page in the format understood by
* _bt_restore_page(). We set lastrdata->buffer to InvalidBuffer,
* because we're going to recreate the whole page anyway, so it
* should never be stored by XLogInsert.
* because we're going to recreate the whole page anyway, so it should
* never be stored by XLogInsert.
*
* Direct access to page is not good but faster - we should implement
* some new func in page API. Note we only store the tuples
@ -1101,7 +1103,7 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
lastrdata->data = NULL;
lastrdata->len = 0;
lastrdata->buffer = sbuf; /* backup block 2 */
lastrdata->buffer = sbuf; /* backup block 2 */
lastrdata->buffer_std = true;
}
@ -1275,9 +1277,10 @@ _bt_findsplitloc(Relation rel,
olddataitemstoleft += itemsz;
}
/* If the new item goes as the last item, check for splitting so that
* all the old items go to the left page and the new item goes to the
* right page.
/*
* If the new item goes as the last item, check for splitting so that all
* the old items go to the left page and the new item goes to the right
* page.
*/
if (newitemoff > maxoff && !goodenoughfound)
_bt_checksplitloc(&state, newitemoff, false, olddataitemstotal, 0);
@ -1314,16 +1317,16 @@ _bt_checksplitloc(FindSplitData *state,
int olddataitemstoleft,
Size firstoldonrightsz)
{
int leftfree,
rightfree;
Size firstrightitemsz;
bool newitemisfirstonright;
int leftfree,
rightfree;
Size firstrightitemsz;
bool newitemisfirstonright;
/* Is the new item going to be the first item on the right page? */
newitemisfirstonright = (firstoldonright == state->newitemoff
&& !newitemonleft);
if(newitemisfirstonright)
if (newitemisfirstonright)
firstrightitemsz = state->newitemsz;
else
firstrightitemsz = firstoldonrightsz;
@ -1334,9 +1337,8 @@ _bt_checksplitloc(FindSplitData *state,
(state->olddataitemstotal - olddataitemstoleft);
/*
* The first item on the right page becomes the high key of the
* left page; therefore it counts against left space as well as right
* space.
* The first item on the right page becomes the high key of the left page;
* therefore it counts against left space as well as right space.
*/
leftfree -= firstrightitemsz;
@ -1875,8 +1877,8 @@ _bt_vacuum_one_page(Relation rel, Buffer buffer)
BTPageOpaque opaque = (BTPageOpaque) PageGetSpecialPointer(page);
/*
* Scan over all items to see which ones need to be deleted
* according to LP_DEAD flags.
* Scan over all items to see which ones need to be deleted according to
* LP_DEAD flags.
*/
minoff = P_FIRSTDATAKEY(opaque);
maxoff = PageGetMaxOffsetNumber(page);

View File

@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.103 2007/09/12 22:10:26 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.104 2007/11/15 21:14:32 momjian Exp $
*
* NOTES
* Postgres btree pages look like ordinary relation pages. The opaque
@ -751,8 +751,8 @@ _bt_parent_deletion_safe(Relation rel, BlockNumber target, BTStack stack)
/*
* In recovery mode, assume the deletion being replayed is valid. We
* can't always check it because we won't have a full search stack,
* and we should complain if there's a problem, anyway.
* can't always check it because we won't have a full search stack, and we
* should complain if there's a problem, anyway.
*/
if (InRecovery)
return true;
@ -781,8 +781,8 @@ _bt_parent_deletion_safe(Relation rel, BlockNumber target, BTStack stack)
{
/*
* It's only child, so safe if parent would itself be removable.
* We have to check the parent itself, and then recurse to
* test the conditions at the parent's parent.
* We have to check the parent itself, and then recurse to test
* the conditions at the parent's parent.
*/
if (P_RIGHTMOST(opaque) || P_ISROOT(opaque))
{
@ -887,18 +887,18 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack, bool vacuum_full)
targetkey = CopyIndexTuple((IndexTuple) PageGetItem(page, itemid));
/*
* To avoid deadlocks, we'd better drop the target page lock before
* going further.
* To avoid deadlocks, we'd better drop the target page lock before going
* further.
*/
_bt_relbuf(rel, buf);
/*
* We need an approximate pointer to the page's parent page. We use
* the standard search mechanism to search for the page's high key; this
* will give us a link to either the current parent or someplace to its
* left (if there are multiple equal high keys). In recursion cases,
* the caller already generated a search stack and we can just re-use
* that work.
* We need an approximate pointer to the page's parent page. We use the
* standard search mechanism to search for the page's high key; this will
* give us a link to either the current parent or someplace to its left
* (if there are multiple equal high keys). In recursion cases, the
* caller already generated a search stack and we can just re-use that
* work.
*/
if (stack == NULL)
{
@ -933,11 +933,11 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack, bool vacuum_full)
/*
* During WAL recovery, we can't use _bt_search (for one reason,
* it might invoke user-defined comparison functions that expect
* facilities not available in recovery mode). Instead, just
* set up a dummy stack pointing to the left end of the parent
* tree level, from which _bt_getstackbuf will walk right to the
* parent page. Painful, but we don't care too much about
* performance in this scenario.
* facilities not available in recovery mode). Instead, just set
* up a dummy stack pointing to the left end of the parent tree
* level, from which _bt_getstackbuf will walk right to the parent
* page. Painful, but we don't care too much about performance in
* this scenario.
*/
pbuf = _bt_get_endpoint(rel, targetlevel + 1, false);
stack = (BTStack) palloc(sizeof(BTStackData));
@ -951,10 +951,10 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack, bool vacuum_full)
/*
* We cannot delete a page that is the rightmost child of its immediate
* parent, unless it is the only child --- in which case the parent has
* to be deleted too, and the same condition applies recursively to it.
* We have to check this condition all the way up before trying to delete.
* We don't need to re-test when deleting a non-leaf page, though.
* parent, unless it is the only child --- in which case the parent has to
* be deleted too, and the same condition applies recursively to it. We
* have to check this condition all the way up before trying to delete. We
* don't need to re-test when deleting a non-leaf page, though.
*/
if (targetlevel == 0 &&
!_bt_parent_deletion_safe(rel, target, stack))
@ -1072,8 +1072,8 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack, bool vacuum_full)
* might be possible to push the fast root even further down, but the odds
* of doing so are slim, and the locking considerations daunting.)
*
* We don't support handling this in the case where the parent is
* becoming half-dead, even though it theoretically could occur.
* We don't support handling this in the case where the parent is becoming
* half-dead, even though it theoretically could occur.
*
* We can safely acquire a lock on the metapage here --- see comments for
* _bt_newroot().
@ -1287,10 +1287,10 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack, bool vacuum_full)
_bt_relbuf(rel, lbuf);
/*
* If parent became half dead, recurse to delete it. Otherwise, if
* right sibling is empty and is now the last child of the parent, recurse
* to try to delete it. (These cases cannot apply at the same time,
* though the second case might itself recurse to the first.)
* If parent became half dead, recurse to delete it. Otherwise, if right
* sibling is empty and is now the last child of the parent, recurse to
* try to delete it. (These cases cannot apply at the same time, though
* the second case might itself recurse to the first.)
*
* When recursing to parent, we hold the lock on the target page until
* done. This delays any insertions into the keyspace that was just

View File

@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.113 2007/05/27 03:50:39 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.114 2007/11/15 21:14:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -637,17 +637,17 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
* even if the row comparison is of ">" or "<" type, because the
* condition applied to all but the last row member is effectively
* ">=" or "<=", and so the extra keys don't break the positioning
* scheme. But, by the same token, if we aren't able to use all
* scheme. But, by the same token, if we aren't able to use all
* the row members, then the part of the row comparison that we
* did use has to be treated as just a ">=" or "<=" condition,
* and so we'd better adjust strat_total accordingly.
* did use has to be treated as just a ">=" or "<=" condition, and
* so we'd better adjust strat_total accordingly.
*/
if (i == keysCount - 1)
{
bool used_all_subkeys = false;
Assert(!(subkey->sk_flags & SK_ROW_END));
for(;;)
for (;;)
{
subkey++;
Assert(subkey->sk_flags & SK_ROW_MEMBER);

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtutils.c,v 1.86 2007/09/12 22:10:26 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtutils.c,v 1.87 2007/11/15 21:14:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -205,7 +205,7 @@ _bt_freestack(BTStack stack)
* that's the only one returned. (So, we return either a single = key,
* or one or two boundary-condition keys for each attr.) However, if we
* cannot compare two keys for lack of a suitable cross-type operator,
* we cannot eliminate either. If there are two such keys of the same
* we cannot eliminate either. If there are two such keys of the same
* operator strategy, the second one is just pushed into the output array
* without further processing here. We may also emit both >/>= or both
* </<= keys if we can't compare them. The logic about required keys still
@ -265,13 +265,13 @@ _bt_preprocess_keys(IndexScanDesc scan)
{
/*
* We treat all btree operators as strict (even if they're not so
* marked in pg_proc). This means that it is impossible for an
* operator condition with a NULL comparison constant to succeed,
* and we can reject it right away.
* marked in pg_proc). This means that it is impossible for an
* operator condition with a NULL comparison constant to succeed, and
* we can reject it right away.
*
* However, we now also support "x IS NULL" clauses as search
* conditions, so in that case keep going. The planner has not
* filled in any particular strategy in this case, so set it to
* conditions, so in that case keep going. The planner has not filled
* in any particular strategy in this case, so set it to
* BTEqualStrategyNumber --- we can treat IS NULL as an equality
* operator for purposes of search strategy.
*/
@ -303,8 +303,8 @@ _bt_preprocess_keys(IndexScanDesc scan)
/*
* Initialize for processing of keys for attr 1.
*
* xform[i] points to the currently best scan key of strategy type i+1;
* it is NULL if we haven't yet found such a key for this attr.
* xform[i] points to the currently best scan key of strategy type i+1; it
* is NULL if we haven't yet found such a key for this attr.
*/
attno = 1;
memset(xform, 0, sizeof(xform));
@ -464,6 +464,7 @@ _bt_preprocess_keys(IndexScanDesc scan)
memcpy(outkey, cur, sizeof(ScanKeyData));
if (numberOfEqualCols == attno - 1)
_bt_mark_scankey_required(outkey);
/*
* We don't support RowCompare using equality; such a qual would
* mess up the numberOfEqualCols tracking.
@ -514,9 +515,9 @@ _bt_preprocess_keys(IndexScanDesc scan)
else
{
/*
* We can't determine which key is more restrictive. Keep
* the previous one in xform[j] and push this one directly
* to the output array.
* We can't determine which key is more restrictive. Keep the
* previous one in xform[j] and push this one directly to the
* output array.
*/
ScanKey outkey = &outkeys[new_numberOfKeys++];
@ -542,7 +543,7 @@ _bt_preprocess_keys(IndexScanDesc scan)
* and amoplefttype/amoprighttype equal to the two argument datatypes.
*
* If the opfamily doesn't supply a complete set of cross-type operators we
* may not be able to make the comparison. If we can make the comparison
* may not be able to make the comparison. If we can make the comparison
* we store the operator result in *result and return TRUE. We return FALSE
* if the comparison could not be made.
*
@ -608,8 +609,8 @@ _bt_compare_scankey_args(IndexScanDesc scan, ScanKey op,
* indexscan initiated by syscache lookup will use cross-data-type
* operators.)
*
* If the sk_strategy was flipped by _bt_mark_scankey_with_indoption,
* we have to un-flip it to get the correct opfamily member.
* If the sk_strategy was flipped by _bt_mark_scankey_with_indoption, we
* have to un-flip it to get the correct opfamily member.
*/
strat = op->sk_strategy;
if (op->sk_flags & SK_BT_DESC)
@ -654,7 +655,7 @@ _bt_compare_scankey_args(IndexScanDesc scan, ScanKey op,
static void
_bt_mark_scankey_with_indoption(ScanKey skey, int16 *indoption)
{
int addflags;
int addflags;
addflags = indoption[skey->sk_attno - 1] << SK_BT_INDOPTION_SHIFT;
if ((addflags & SK_BT_DESC) && !(skey->sk_flags & SK_BT_DESC))
@ -874,8 +875,8 @@ _bt_checkkeys(IndexScanDesc scan,
/*
* Since NULLs are sorted before non-NULLs, we know we have
* reached the lower limit of the range of values for this
* index attr. On a backward scan, we can stop if this qual is
* one of the "must match" subset. On a forward scan,
* index attr. On a backward scan, we can stop if this qual
* is one of the "must match" subset. On a forward scan,
* however, we should keep going.
*/
if ((key->sk_flags & SK_BT_REQBKWD) &&
@ -887,8 +888,8 @@ _bt_checkkeys(IndexScanDesc scan,
/*
* Since NULLs are sorted after non-NULLs, we know we have
* reached the upper limit of the range of values for this
* index attr. On a forward scan, we can stop if this qual is
* one of the "must match" subset. On a backward scan,
* index attr. On a forward scan, we can stop if this qual is
* one of the "must match" subset. On a backward scan,
* however, we should keep going.
*/
if ((key->sk_flags & SK_BT_REQFWD) &&
@ -978,7 +979,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
* Since NULLs are sorted before non-NULLs, we know we have
* reached the lower limit of the range of values for this
* index attr. On a backward scan, we can stop if this qual is
* one of the "must match" subset. On a forward scan,
* one of the "must match" subset. On a forward scan,
* however, we should keep going.
*/
if ((subkey->sk_flags & SK_BT_REQBKWD) &&
@ -991,7 +992,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
* Since NULLs are sorted after non-NULLs, we know we have
* reached the upper limit of the range of values for this
* index attr. On a forward scan, we can stop if this qual is
* one of the "must match" subset. On a backward scan,
* one of the "must match" subset. On a backward scan,
* however, we should keep going.
*/
if ((subkey->sk_flags & SK_BT_REQFWD) &&
@ -1264,8 +1265,8 @@ _bt_start_vacuum(Relation rel)
LWLockAcquire(BtreeVacuumLock, LW_EXCLUSIVE);
/*
* Assign the next cycle ID, being careful to avoid zero as well as
* the reserved high values.
* Assign the next cycle ID, being careful to avoid zero as well as the
* reserved high values.
*/
result = ++(btvacinfo->cycle_ctr);
if (result == 0 || result > MAX_BT_CYCLE_ID)

View File

@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtxlog.c,v 1.46 2007/09/20 17:56:30 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtxlog.c,v 1.47 2007/11/15 21:14:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -40,7 +40,7 @@ typedef struct bt_incomplete_action
BlockNumber rightblk; /* right half of split */
/* these fields are for a delete: */
BlockNumber delblk; /* parent block to be deleted */
} bt_incomplete_action;
} bt_incomplete_action;
static List *incomplete_actions;
@ -271,8 +271,8 @@ btree_xlog_split(bool onleft, bool isroot,
char *datapos;
int datalen;
OffsetNumber newitemoff = 0;
Item newitem = NULL;
Size newitemsz = 0;
Item newitem = NULL;
Size newitemsz = 0;
reln = XLogOpenRelation(xlrec->node);
@ -343,15 +343,15 @@ btree_xlog_split(bool onleft, bool isroot,
* Reconstruct left (original) sibling if needed. Note that this code
* ensures that the items remaining on the left page are in the correct
* item number order, but it does not reproduce the physical order they
* would have had. Is this worth changing? See also _bt_restore_page().
* would have had. Is this worth changing? See also _bt_restore_page().
*/
if (!(record->xl_info & XLR_BKP_BLOCK_1))
{
Buffer lbuf = XLogReadBuffer(reln, xlrec->leftsib, false);
Buffer lbuf = XLogReadBuffer(reln, xlrec->leftsib, false);
if (BufferIsValid(lbuf))
{
Page lpage = (Page) BufferGetPage(lbuf);
Page lpage = (Page) BufferGetPage(lbuf);
BTPageOpaque lopaque = (BTPageOpaque) PageGetSpecialPointer(lpage);
if (!XLByteLE(lsn, PageGetLSN(lpage)))
@ -359,19 +359,20 @@ btree_xlog_split(bool onleft, bool isroot,
OffsetNumber off;
OffsetNumber maxoff = PageGetMaxOffsetNumber(lpage);
OffsetNumber deletable[MaxOffsetNumber];
int ndeletable = 0;
ItemId hiItemId;
Item hiItem;
int ndeletable = 0;
ItemId hiItemId;
Item hiItem;
/*
* Remove the items from the left page that were copied to
* the right page. Also remove the old high key, if any.
* (We must remove everything before trying to insert any
* items, else we risk not having enough space.)
* Remove the items from the left page that were copied to the
* right page. Also remove the old high key, if any. (We must
* remove everything before trying to insert any items, else
* we risk not having enough space.)
*/
if (!P_RIGHTMOST(lopaque))
{
deletable[ndeletable++] = P_HIKEY;
/*
* newitemoff is given to us relative to the original
* page's item numbering, so adjust it for this deletion.
@ -421,11 +422,11 @@ btree_xlog_split(bool onleft, bool isroot,
/* Fix left-link of the page to the right of the new right sibling */
if (xlrec->rnext != P_NONE && !(record->xl_info & XLR_BKP_BLOCK_2))
{
Buffer buffer = XLogReadBuffer(reln, xlrec->rnext, false);
Buffer buffer = XLogReadBuffer(reln, xlrec->rnext, false);
if (BufferIsValid(buffer))
{
Page page = (Page) BufferGetPage(buffer);
Page page = (Page) BufferGetPage(buffer);
if (!XLByteLE(lsn, PageGetLSN(page)))
{
@ -795,7 +796,7 @@ btree_desc(StringInfo buf, uint8 xl_info, char *rec)
xlrec->node.spcNode, xlrec->node.dbNode,
xlrec->node.relNode);
appendStringInfo(buf, "left %u, right %u, next %u, level %u, firstright %d",
xlrec->leftsib, xlrec->rightsib, xlrec->rnext,
xlrec->leftsib, xlrec->rightsib, xlrec->rnext,
xlrec->level, xlrec->firstright);
break;
}
@ -807,7 +808,7 @@ btree_desc(StringInfo buf, uint8 xl_info, char *rec)
xlrec->node.spcNode, xlrec->node.dbNode,
xlrec->node.relNode);
appendStringInfo(buf, "left %u, right %u, next %u, level %u, firstright %d",
xlrec->leftsib, xlrec->rightsib, xlrec->rnext,
xlrec->leftsib, xlrec->rightsib, xlrec->rnext,
xlrec->level, xlrec->firstright);
break;
}
@ -819,7 +820,7 @@ btree_desc(StringInfo buf, uint8 xl_info, char *rec)
xlrec->node.spcNode, xlrec->node.dbNode,
xlrec->node.relNode);
appendStringInfo(buf, "left %u, right %u, next %u, level %u, firstright %d",
xlrec->leftsib, xlrec->rightsib, xlrec->rnext,
xlrec->leftsib, xlrec->rightsib, xlrec->rnext,
xlrec->level, xlrec->firstright);
break;
}
@ -831,7 +832,7 @@ btree_desc(StringInfo buf, uint8 xl_info, char *rec)
xlrec->node.spcNode, xlrec->node.dbNode,
xlrec->node.relNode);
appendStringInfo(buf, "left %u, right %u, next %u, level %u, firstright %d",
xlrec->leftsib, xlrec->rightsib, xlrec->rnext,
xlrec->leftsib, xlrec->rightsib, xlrec->rnext,
xlrec->level, xlrec->firstright);
break;
}

View File

@ -14,19 +14,19 @@
* CLOG page is initialized to zeroes. Other writes of CLOG come from
* recording of transaction commit or abort in xact.c, which generates its
* own XLOG records for these events and will re-perform the status update
* on redo; so we need make no additional XLOG entry here. For synchronous
* on redo; so we need make no additional XLOG entry here. For synchronous
* transaction commits, the XLOG is guaranteed flushed through the XLOG commit
* record before we are called to log a commit, so the WAL rule "write xlog
* before data" is satisfied automatically. However, for async commits we
* must track the latest LSN affecting each CLOG page, so that we can flush
* XLOG that far and satisfy the WAL rule. We don't have to worry about this
* XLOG that far and satisfy the WAL rule. We don't have to worry about this
* for aborts (whether sync or async), since the post-crash assumption would
* be that such transactions failed anyway.
*
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* $PostgreSQL: pgsql/src/backend/access/transam/clog.c,v 1.44 2007/09/05 18:10:47 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/transam/clog.c,v 1.45 2007/11/15 21:14:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -60,8 +60,8 @@
#define TransactionIdToBIndex(xid) ((xid) % (TransactionId) CLOG_XACTS_PER_BYTE)
/* We store the latest async LSN for each group of transactions */
#define CLOG_XACTS_PER_LSN_GROUP 32 /* keep this a power of 2 */
#define CLOG_LSNS_PER_PAGE (CLOG_XACTS_PER_PAGE / CLOG_XACTS_PER_LSN_GROUP)
#define CLOG_XACTS_PER_LSN_GROUP 32 /* keep this a power of 2 */
#define CLOG_LSNS_PER_PAGE (CLOG_XACTS_PER_PAGE / CLOG_XACTS_PER_LSN_GROUP)
#define GetLSNIndex(slotno, xid) ((slotno) * CLOG_LSNS_PER_PAGE + \
((xid) % (TransactionId) CLOG_XACTS_PER_PAGE) / CLOG_XACTS_PER_LSN_GROUP)
@ -85,7 +85,7 @@ static void WriteTruncateXlogRec(int pageno);
* Record the final state of a transaction in the commit log.
*
* lsn must be the WAL location of the commit record when recording an async
* commit. For a synchronous commit it can be InvalidXLogRecPtr, since the
* commit. For a synchronous commit it can be InvalidXLogRecPtr, since the
* caller guarantees the commit record is already flushed in that case. It
* should be InvalidXLogRecPtr for abort cases, too.
*
@ -159,7 +159,7 @@ TransactionIdSetStatus(TransactionId xid, XidStatus status, XLogRecPtr lsn)
* an LSN that is late enough to be able to guarantee that if we flush up to
* that LSN then we will have flushed the transaction's commit record to disk.
* The result is not necessarily the exact LSN of the transaction's commit
* record! For example, for long-past transactions (those whose clog pages
* record! For example, for long-past transactions (those whose clog pages
* already migrated to disk), we'll return InvalidXLogRecPtr. Also, because
* we group transactions on the same clog page to conserve storage, we might
* return the LSN of a later transaction that falls into the same group.
@ -486,8 +486,8 @@ clog_redo(XLogRecPtr lsn, XLogRecord *record)
memcpy(&pageno, XLogRecGetData(record), sizeof(int));
/*
* During XLOG replay, latest_page_number isn't set up yet; insert
* a suitable value to bypass the sanity test in SimpleLruTruncate.
* During XLOG replay, latest_page_number isn't set up yet; insert a
* suitable value to bypass the sanity test in SimpleLruTruncate.
*/
ClogCtl->shared->latest_page_number = pageno;

View File

@ -42,7 +42,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* $PostgreSQL: pgsql/src/backend/access/transam/multixact.c,v 1.25 2007/09/05 18:10:47 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/transam/multixact.c,v 1.26 2007/11/15 21:14:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -380,9 +380,9 @@ MultiXactIdIsRunning(MultiXactId multi)
}
/*
* Checking for myself is cheap compared to looking in shared memory,
* so first do the equivalent of MultiXactIdIsCurrent(). This is not
* needed for correctness, it's just a fast path.
* Checking for myself is cheap compared to looking in shared memory, so
* first do the equivalent of MultiXactIdIsCurrent(). This is not needed
* for correctness, it's just a fast path.
*/
for (i = 0; i < nmembers; i++)
{

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/transam/transam.c,v 1.71 2007/09/08 20:31:14 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/transam/transam.c,v 1.72 2007/11/15 21:14:32 momjian Exp $
*
* NOTES
* This file contains the high level access-method interface to the
@ -440,14 +440,14 @@ TransactionId
TransactionIdLatest(TransactionId mainxid,
int nxids, const TransactionId *xids)
{
TransactionId result;
TransactionId result;
/*
* In practice it is highly likely that the xids[] array is sorted, and
* so we could save some cycles by just taking the last child XID, but
* this probably isn't so performance-critical that it's worth depending
* on that assumption. But just to show we're not totally stupid, scan
* the array back-to-front to avoid useless assignments.
* In practice it is highly likely that the xids[] array is sorted, and so
* we could save some cycles by just taking the last child XID, but this
* probably isn't so performance-critical that it's worth depending on
* that assumption. But just to show we're not totally stupid, scan the
* array back-to-front to avoid useless assignments.
*/
result = mainxid;
while (--nxids >= 0)

View File

@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/transam/twophase.c,v 1.37 2007/10/24 20:55:36 alvherre Exp $
* $PostgreSQL: pgsql/src/backend/access/transam/twophase.c,v 1.38 2007/11/15 21:14:32 momjian Exp $
*
* NOTES
* Each global transaction is associated with a global transaction
@ -397,15 +397,15 @@ LockGXact(const char *gid, Oid user)
errhint("Must be superuser or the user that prepared the transaction.")));
/*
* Note: it probably would be possible to allow committing from another
* database; but at the moment NOTIFY is known not to work and there
* may be some other issues as well. Hence disallow until someone
* gets motivated to make it work.
* Note: it probably would be possible to allow committing from
* another database; but at the moment NOTIFY is known not to work and
* there may be some other issues as well. Hence disallow until
* someone gets motivated to make it work.
*/
if (MyDatabaseId != gxact->proc.databaseId)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("prepared transaction belongs to another database"),
errmsg("prepared transaction belongs to another database"),
errhint("Connect to the database where the transaction was prepared to finish it.")));
/* OK for me to lock it */
@ -937,11 +937,11 @@ EndPrepare(GlobalTransaction gxact)
* odds of a PANIC actually occurring should be very tiny given that we
* were able to write the bogus CRC above.
*
* We have to set inCommit here, too; otherwise a checkpoint
* starting immediately after the WAL record is inserted could complete
* without fsync'ing our state file. (This is essentially the same kind
* of race condition as the COMMIT-to-clog-write case that
* RecordTransactionCommit uses inCommit for; see notes there.)
* We have to set inCommit here, too; otherwise a checkpoint starting
* immediately after the WAL record is inserted could complete without
* fsync'ing our state file. (This is essentially the same kind of race
* condition as the COMMIT-to-clog-write case that RecordTransactionCommit
* uses inCommit for; see notes there.)
*
* We save the PREPARE record's location in the gxact for later use by
* CheckPointTwoPhase.
@ -985,8 +985,8 @@ EndPrepare(GlobalTransaction gxact)
MarkAsPrepared(gxact);
/*
* Now we can mark ourselves as out of the commit critical section:
* a checkpoint starting after this will certainly see the gxact as a
* Now we can mark ourselves as out of the commit critical section: a
* checkpoint starting after this will certainly see the gxact as a
* candidate for fsyncing.
*/
MyProc->inCommit = false;
@ -1272,8 +1272,8 @@ RemoveTwoPhaseFile(TransactionId xid, bool giveWarning)
if (errno != ENOENT || giveWarning)
ereport(WARNING,
(errcode_for_file_access(),
errmsg("could not remove two-phase state file \"%s\": %m",
path)));
errmsg("could not remove two-phase state file \"%s\": %m",
path)));
}
/*
@ -1500,8 +1500,8 @@ PrescanPreparedTransactions(void)
if (buf == NULL)
{
ereport(WARNING,
(errmsg("removing corrupt two-phase state file \"%s\"",
clde->d_name)));
(errmsg("removing corrupt two-phase state file \"%s\"",
clde->d_name)));
RemoveTwoPhaseFile(xid, true);
continue;
}
@ -1511,8 +1511,8 @@ PrescanPreparedTransactions(void)
if (!TransactionIdEquals(hdr->xid, xid))
{
ereport(WARNING,
(errmsg("removing corrupt two-phase state file \"%s\"",
clde->d_name)));
(errmsg("removing corrupt two-phase state file \"%s\"",
clde->d_name)));
RemoveTwoPhaseFile(xid, true);
pfree(buf);
continue;
@ -1599,8 +1599,8 @@ RecoverPreparedTransactions(void)
if (buf == NULL)
{
ereport(WARNING,
(errmsg("removing corrupt two-phase state file \"%s\"",
clde->d_name)));
(errmsg("removing corrupt two-phase state file \"%s\"",
clde->d_name)));
RemoveTwoPhaseFile(xid, true);
continue;
}
@ -1711,9 +1711,9 @@ RecordTransactionCommitPrepared(TransactionId xid,
recptr = XLogInsert(RM_XACT_ID, XLOG_XACT_COMMIT_PREPARED, rdata);
/*
* We don't currently try to sleep before flush here ... nor is there
* any support for async commit of a prepared xact (the very idea is
* probably a contradiction)
* We don't currently try to sleep before flush here ... nor is there any
* support for async commit of a prepared xact (the very idea is probably
* a contradiction)
*/
/* Flush XLOG to disk */

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/transam/twophase_rmgr.c,v 1.5 2007/05/27 03:50:39 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/transam/twophase_rmgr.c,v 1.6 2007/11/15 21:14:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -38,7 +38,7 @@ const TwoPhaseCallback twophase_postcommit_callbacks[TWOPHASE_RM_MAX_ID + 1] =
lock_twophase_postcommit, /* Lock */
inval_twophase_postcommit, /* Inval */
flatfile_twophase_postcommit, /* flat file update */
notify_twophase_postcommit, /* notify/listen */
notify_twophase_postcommit, /* notify/listen */
pgstat_twophase_postcommit /* pgstat */
};

View File

@ -6,7 +6,7 @@
* Copyright (c) 2000-2007, PostgreSQL Global Development Group
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/transam/varsup.c,v 1.79 2007/09/08 20:31:14 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/transam/varsup.c,v 1.80 2007/11/15 21:14:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -73,9 +73,9 @@ GetNewTransactionId(bool isSubXact)
TransactionIdIsValid(ShmemVariableCache->xidVacLimit))
{
/*
* To avoid swamping the postmaster with signals, we issue the
* autovac request only once per 64K transaction starts. This
* still gives plenty of chances before we get into real trouble.
* To avoid swamping the postmaster with signals, we issue the autovac
* request only once per 64K transaction starts. This still gives
* plenty of chances before we get into real trouble.
*/
if (IsUnderPostmaster && (xid % 65536) == 0)
SendPostmasterSignal(PMSIGNAL_START_AUTOVAC_LAUNCHER);
@ -119,9 +119,9 @@ GetNewTransactionId(bool isSubXact)
/*
* We must store the new XID into the shared ProcArray before releasing
* XidGenLock. This ensures that every active XID older than
* latestCompletedXid is present in the ProcArray, which is essential
* for correct OldestXmin tracking; see src/backend/access/transam/README.
* XidGenLock. This ensures that every active XID older than
* latestCompletedXid is present in the ProcArray, which is essential for
* correct OldestXmin tracking; see src/backend/access/transam/README.
*
* XXX by storing xid into MyProc without acquiring ProcArrayLock, we are
* relying on fetch/store of an xid to be atomic, else other backends
@ -249,18 +249,18 @@ SetTransactionIdLimit(TransactionId oldest_datfrozenxid,
xidWarnLimit -= FirstNormalTransactionId;
/*
* We'll start trying to force autovacuums when oldest_datfrozenxid
* gets to be more than autovacuum_freeze_max_age transactions old.
* We'll start trying to force autovacuums when oldest_datfrozenxid gets
* to be more than autovacuum_freeze_max_age transactions old.
*
* Note: guc.c ensures that autovacuum_freeze_max_age is in a sane
* range, so that xidVacLimit will be well before xidWarnLimit.
* Note: guc.c ensures that autovacuum_freeze_max_age is in a sane range,
* so that xidVacLimit will be well before xidWarnLimit.
*
* Note: autovacuum_freeze_max_age is a PGC_POSTMASTER parameter so that
* we don't have to worry about dealing with on-the-fly changes in its
* value. It doesn't look practical to update shared state from a GUC
* assign hook (too many processes would try to execute the hook,
* resulting in race conditions as well as crashes of those not
* connected to shared memory). Perhaps this can be improved someday.
* resulting in race conditions as well as crashes of those not connected
* to shared memory). Perhaps this can be improved someday.
*/
xidVacLimit = oldest_datfrozenxid + autovacuum_freeze_max_age;
if (xidVacLimit < FirstNormalTransactionId)

View File

@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/transam/xact.c,v 1.252 2007/11/10 14:36:44 momjian Exp $
* $PostgreSQL: pgsql/src/backend/access/transam/xact.c,v 1.253 2007/11/15 21:14:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -274,8 +274,8 @@ IsTransactionState(void)
TransactionState s = CurrentTransactionState;
/*
* TRANS_DEFAULT and TRANS_ABORT are obviously unsafe states. However,
* we also reject the startup/shutdown states TRANS_START, TRANS_COMMIT,
* TRANS_DEFAULT and TRANS_ABORT are obviously unsafe states. However, we
* also reject the startup/shutdown states TRANS_START, TRANS_COMMIT,
* TRANS_PREPARE since it might be too soon or too late within those
* transition states to do anything interesting. Hence, the only "valid"
* state is TRANS_INPROGRESS.
@ -372,7 +372,7 @@ GetCurrentTransactionIdIfAny(void)
static void
AssignTransactionId(TransactionState s)
{
bool isSubXact = (s->parent != NULL);
bool isSubXact = (s->parent != NULL);
ResourceOwner currentOwner;
/* Assert that caller didn't screw up */
@ -400,9 +400,9 @@ AssignTransactionId(TransactionState s)
SubTransSetParent(s->transactionId, s->parent->transactionId);
/*
* Acquire lock on the transaction XID. (We assume this cannot block.)
* We have to ensure that the lock is assigned to the transaction's
* own ResourceOwner.
* Acquire lock on the transaction XID. (We assume this cannot block.) We
* have to ensure that the lock is assigned to the transaction's own
* ResourceOwner.
*/
currentOwner = CurrentResourceOwner;
PG_TRY();
@ -626,9 +626,9 @@ AtStart_Memory(void)
/*
* If this is the first time through, create a private context for
* AbortTransaction to work in. By reserving some space now, we can
* insulate AbortTransaction from out-of-memory scenarios. Like
* ErrorContext, we set it up with slow growth rate and a nonzero
* minimum size, so that space will be reserved immediately.
* insulate AbortTransaction from out-of-memory scenarios. Like
* ErrorContext, we set it up with slow growth rate and a nonzero minimum
* size, so that space will be reserved immediately.
*/
if (TransactionAbortContext == NULL)
TransactionAbortContext =
@ -749,7 +749,7 @@ AtSubStart_ResourceOwner(void)
* RecordTransactionCommit
*
* Returns latest XID among xact and its children, or InvalidTransactionId
* if the xact has no XID. (We compute that here just because it's easier.)
* if the xact has no XID. (We compute that here just because it's easier.)
*
* This is exported only to support an ugly hack in VACUUM FULL.
*/
@ -757,7 +757,7 @@ TransactionId
RecordTransactionCommit(void)
{
TransactionId xid = GetTopTransactionIdIfAny();
bool markXidCommitted = TransactionIdIsValid(xid);
bool markXidCommitted = TransactionIdIsValid(xid);
TransactionId latestXid = InvalidTransactionId;
int nrels;
RelFileNode *rels;
@ -770,29 +770,29 @@ RecordTransactionCommit(void)
nchildren = xactGetCommittedChildren(&children);
/*
* If we haven't been assigned an XID yet, we neither can, nor do we
* want to write a COMMIT record.
* If we haven't been assigned an XID yet, we neither can, nor do we want
* to write a COMMIT record.
*/
if (!markXidCommitted)
{
/*
* We expect that every smgrscheduleunlink is followed by a catalog
* update, and hence XID assignment, so we shouldn't get here with
* any pending deletes. Use a real test not just an Assert to check
* this, since it's a bit fragile.
* update, and hence XID assignment, so we shouldn't get here with any
* pending deletes. Use a real test not just an Assert to check this,
* since it's a bit fragile.
*/
if (nrels != 0)
elog(ERROR, "cannot commit a transaction that deleted files but has no xid");
/* Can't have child XIDs either; AssignTransactionId enforces this */
Assert(nchildren == 0);
/*
* If we didn't create XLOG entries, we're done here; otherwise we
* should flush those entries the same as a commit record. (An
* should flush those entries the same as a commit record. (An
* example of a possible record that wouldn't cause an XID to be
* assigned is a sequence advance record due to nextval() --- we
* want to flush that to disk before reporting commit.)
* assigned is a sequence advance record due to nextval() --- we want
* to flush that to disk before reporting commit.)
*/
if (XactLastRecEnd.xrecoff == 0)
goto cleanup;
@ -802,30 +802,29 @@ RecordTransactionCommit(void)
/*
* Begin commit critical section and insert the commit XLOG record.
*/
XLogRecData rdata[3];
int lastrdata = 0;
xl_xact_commit xlrec;
XLogRecData rdata[3];
int lastrdata = 0;
xl_xact_commit xlrec;
/* Tell bufmgr and smgr to prepare for commit */
BufmgrCommit();
/*
* Mark ourselves as within our "commit critical section". This
* Mark ourselves as within our "commit critical section". This
* forces any concurrent checkpoint to wait until we've updated
* pg_clog. Without this, it is possible for the checkpoint to
* set REDO after the XLOG record but fail to flush the pg_clog
* update to disk, leading to loss of the transaction commit if
* the system crashes a little later.
* pg_clog. Without this, it is possible for the checkpoint to set
* REDO after the XLOG record but fail to flush the pg_clog update to
* disk, leading to loss of the transaction commit if the system
* crashes a little later.
*
* Note: we could, but don't bother to, set this flag in
* RecordTransactionAbort. That's because loss of a transaction
* abort is noncritical; the presumption would be that it aborted,
* anyway.
* RecordTransactionAbort. That's because loss of a transaction abort
* is noncritical; the presumption would be that it aborted, anyway.
*
* It's safe to change the inCommit flag of our own backend
* without holding the ProcArrayLock, since we're the only one
* modifying it. This makes checkpoint's determination of which
* xacts are inCommit a bit fuzzy, but it doesn't matter.
* It's safe to change the inCommit flag of our own backend without
* holding the ProcArrayLock, since we're the only one modifying it.
* This makes checkpoint's determination of which xacts are inCommit a
* bit fuzzy, but it doesn't matter.
*/
START_CRIT_SECTION();
MyProc->inCommit = true;
@ -864,7 +863,7 @@ RecordTransactionCommit(void)
* Check if we want to commit asynchronously. If the user has set
* synchronous_commit = off, and we're not doing cleanup of any non-temp
* rels nor committing any command that wanted to force sync commit, then
* we can defer flushing XLOG. (We must not allow asynchronous commit if
* we can defer flushing XLOG. (We must not allow asynchronous commit if
* there are any non-temp tables to be deleted, because we might delete
* the files before the COMMIT record is flushed to disk. We do allow
* asynchronous commit if all to-be-deleted tables are temporary though,
@ -875,15 +874,14 @@ RecordTransactionCommit(void)
/*
* Synchronous commit case.
*
* Sleep before flush! So we can flush more than one commit
* records per single fsync. (The idea is some other backend
* may do the XLogFlush while we're sleeping. This needs work
* still, because on most Unixen, the minimum select() delay
* is 10msec or more, which is way too long.)
* Sleep before flush! So we can flush more than one commit records
* per single fsync. (The idea is some other backend may do the
* XLogFlush while we're sleeping. This needs work still, because on
* most Unixen, the minimum select() delay is 10msec or more, which is
* way too long.)
*
* We do not sleep if enableFsync is not turned on, nor if
* there are fewer than CommitSiblings other backends with
* active transactions.
* We do not sleep if enableFsync is not turned on, nor if there are
* fewer than CommitSiblings other backends with active transactions.
*/
if (CommitDelay > 0 && enableFsync &&
CountActiveBackends() >= CommitSiblings)
@ -906,15 +904,15 @@ RecordTransactionCommit(void)
/*
* Asynchronous commit case.
*
* Report the latest async commit LSN, so that
* the WAL writer knows to flush this commit.
* Report the latest async commit LSN, so that the WAL writer knows to
* flush this commit.
*/
XLogSetAsyncCommitLSN(XactLastRecEnd);
/*
* We must not immediately update the CLOG, since we didn't
* flush the XLOG. Instead, we store the LSN up to which
* the XLOG must be flushed before the CLOG may be updated.
* We must not immediately update the CLOG, since we didn't flush the
* XLOG. Instead, we store the LSN up to which the XLOG must be
* flushed before the CLOG may be updated.
*/
if (markXidCommitted)
{
@ -925,8 +923,8 @@ RecordTransactionCommit(void)
}
/*
* If we entered a commit critical section, leave it now, and
* let checkpoints proceed.
* If we entered a commit critical section, leave it now, and let
* checkpoints proceed.
*/
if (markXidCommitted)
{
@ -1068,11 +1066,11 @@ RecordSubTransactionCommit(void)
* We do not log the subcommit in XLOG; it doesn't matter until the
* top-level transaction commits.
*
* We must mark the subtransaction subcommitted in the CLOG if
* it had a valid XID assigned. If it did not, nobody else will
* ever know about the existence of this subxact. We don't
* have to deal with deletions scheduled for on-commit here, since
* they'll be reassigned to our parent (who might still abort).
* We must mark the subtransaction subcommitted in the CLOG if it had a
* valid XID assigned. If it did not, nobody else will ever know about
* the existence of this subxact. We don't have to deal with deletions
* scheduled for on-commit here, since they'll be reassigned to our parent
* (who might still abort).
*/
if (TransactionIdIsValid(xid))
{
@ -1095,7 +1093,7 @@ RecordSubTransactionCommit(void)
* RecordTransactionAbort
*
* Returns latest XID among xact and its children, or InvalidTransactionId
* if the xact has no XID. (We compute that here just because it's easier.)
* if the xact has no XID. (We compute that here just because it's easier.)
*/
static TransactionId
RecordTransactionAbort(bool isSubXact)
@ -1106,15 +1104,15 @@ RecordTransactionAbort(bool isSubXact)
RelFileNode *rels;
int nchildren;
TransactionId *children;
XLogRecData rdata[3];
int lastrdata = 0;
xl_xact_abort xlrec;
XLogRecData rdata[3];
int lastrdata = 0;
xl_xact_abort xlrec;
/*
* If we haven't been assigned an XID, nobody will care whether we
* aborted or not. Hence, we're done in that case. It does not matter
* if we have rels to delete (note that this routine is not responsible
* for actually deleting 'em). We cannot have any child XIDs, either.
* If we haven't been assigned an XID, nobody will care whether we aborted
* or not. Hence, we're done in that case. It does not matter if we have
* rels to delete (note that this routine is not responsible for actually
* deleting 'em). We cannot have any child XIDs, either.
*/
if (!TransactionIdIsValid(xid))
{
@ -1128,7 +1126,7 @@ RecordTransactionAbort(bool isSubXact)
* We have a valid XID, so we should write an ABORT record for it.
*
* We do not flush XLOG to disk here, since the default assumption after a
* crash would be that we aborted, anyway. For the same reason, we don't
* crash would be that we aborted, anyway. For the same reason, we don't
* need to worry about interlocking against checkpoint start.
*/
@ -1189,10 +1187,10 @@ RecordTransactionAbort(bool isSubXact)
* having flushed the ABORT record to disk, because in event of a crash
* we'd be assumed to have aborted anyway.
*
* The ordering here isn't critical but it seems best to mark the
* parent first. This assures an atomic transition of all the
* subtransactions to aborted state from the point of view of
* concurrent TransactionIdDidAbort calls.
* The ordering here isn't critical but it seems best to mark the parent
* first. This assures an atomic transition of all the subtransactions to
* aborted state from the point of view of concurrent
* TransactionIdDidAbort calls.
*/
TransactionIdAbort(xid);
TransactionIdAbortTree(nchildren, children);
@ -1231,9 +1229,9 @@ static void
AtAbort_Memory(void)
{
/*
* Switch into TransactionAbortContext, which should have some free
* space even if nothing else does. We'll work in this context until
* we've finished cleaning up.
* Switch into TransactionAbortContext, which should have some free space
* even if nothing else does. We'll work in this context until we've
* finished cleaning up.
*
* It is barely possible to get here when we've not been able to create
* TransactionAbortContext yet; if so use TopMemoryContext.
@ -1438,7 +1436,7 @@ StartTransaction(void)
VirtualXactLockTableInsert(vxid);
/*
* Advertise it in the proc array. We assume assignment of
* Advertise it in the proc array. We assume assignment of
* LocalTransactionID is atomic, and the backendId should be set already.
*/
Assert(MyProc->backendId == vxid.backendId);
@ -1449,8 +1447,8 @@ StartTransaction(void)
/*
* set transaction_timestamp() (a/k/a now()). We want this to be the same
* as the first command's statement_timestamp(), so don't do a fresh
* GetCurrentTimestamp() call (which'd be expensive anyway). Also,
* mark xactStopTimestamp as unset.
* GetCurrentTimestamp() call (which'd be expensive anyway). Also, mark
* xactStopTimestamp as unset.
*/
xactStartTimestamp = stmtStartTimestamp;
xactStopTimestamp = 0;
@ -1576,8 +1574,8 @@ CommitTransaction(void)
PG_TRACE1(transaction__commit, MyProc->lxid);
/*
* Let others know about no transaction in progress by me. Note that
* this must be done _before_ releasing locks we hold and _after_
* Let others know about no transaction in progress by me. Note that this
* must be done _before_ releasing locks we hold and _after_
* RecordTransactionCommit.
*/
ProcArrayEndTransaction(MyProc, latestXid);
@ -2503,7 +2501,7 @@ AbortCurrentTransaction(void)
* inside a function or multi-query querystring. (We will always fail if
* this is false, but it's convenient to centralize the check here instead of
* making callers do it.)
* stmtType: statement type name, for error messages.
* stmtType: statement type name, for error messages.
*/
void
PreventTransactionChain(bool isTopLevel, const char *stmtType)

View File

@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* $PostgreSQL: pgsql/src/backend/access/transam/xlog.c,v 1.287 2007/11/15 20:36:40 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/transam/xlog.c,v 1.288 2007/11/15 21:14:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -80,7 +80,7 @@ bool XLOG_DEBUG = false;
* future XLOG segment as long as there aren't already XLOGfileslop future
* segments; else we'll delete it. This could be made a separate GUC
* variable, but at present I think it's sufficient to hardwire it as
* 2*CheckPointSegments+1. Under normal conditions, a checkpoint will free
* 2*CheckPointSegments+1. Under normal conditions, a checkpoint will free
* no more than 2*CheckPointSegments log segments, and we want to recycle all
* of them; the +1 allows boundary cases to happen without wasting a
* delete/create-segment cycle.
@ -287,7 +287,7 @@ typedef struct XLogCtlData
XLogwrtResult LogwrtResult;
uint32 ckptXidEpoch; /* nextXID & epoch of latest checkpoint */
TransactionId ckptXid;
XLogRecPtr asyncCommitLSN; /* LSN of newest async commit */
XLogRecPtr asyncCommitLSN; /* LSN of newest async commit */
/* Protected by WALWriteLock: */
XLogCtlWrite Write;
@ -737,8 +737,8 @@ begin:;
* full-block records into the non-full-block format.
*
* Note: we could just set the flag whenever !forcePageWrites, but
* defining it like this leaves the info bit free for some potential
* other use in records without any backup blocks.
* defining it like this leaves the info bit free for some potential other
* use in records without any backup blocks.
*/
if ((info & XLR_BKP_BLOCK_MASK) && !Insert->forcePageWrites)
info |= XLR_BKP_REMOVABLE;
@ -1345,10 +1345,10 @@ static bool
XLogCheckpointNeeded(void)
{
/*
* A straight computation of segment number could overflow 32
* bits. Rather than assuming we have working 64-bit
* arithmetic, we compare the highest-order bits separately,
* and force a checkpoint immediately when they change.
* A straight computation of segment number could overflow 32 bits.
* Rather than assuming we have working 64-bit arithmetic, we compare the
* highest-order bits separately, and force a checkpoint immediately when
* they change.
*/
uint32 old_segno,
new_segno;
@ -1361,7 +1361,7 @@ XLogCheckpointNeeded(void)
new_segno = (openLogId % XLogSegSize) * XLogSegsPerFile + openLogSeg;
new_highbits = openLogId / XLogSegSize;
if (new_highbits != old_highbits ||
new_segno >= old_segno + (uint32) (CheckPointSegments-1))
new_segno >= old_segno + (uint32) (CheckPointSegments - 1))
return true;
return false;
}
@ -1558,9 +1558,9 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible, bool xlog_switch)
/*
* Signal bgwriter to start a checkpoint if we've consumed too
* much xlog since the last one. For speed, we first check
* using the local copy of RedoRecPtr, which might be
* out of date; if it looks like a checkpoint is needed,
* forcibly update RedoRecPtr and recheck.
* using the local copy of RedoRecPtr, which might be out of
* date; if it looks like a checkpoint is needed, forcibly
* update RedoRecPtr and recheck.
*/
if (IsUnderPostmaster &&
XLogCheckpointNeeded())
@ -1779,9 +1779,9 @@ XLogFlush(XLogRecPtr record)
* We normally flush only completed blocks; but if there is nothing to do on
* that basis, we check for unflushed async commits in the current incomplete
* block, and flush through the latest one of those. Thus, if async commits
* are not being used, we will flush complete blocks only. We can guarantee
* are not being used, we will flush complete blocks only. We can guarantee
* that async commits reach disk after at most three cycles; normally only
* one or two. (We allow XLogWrite to write "flexibly", meaning it can stop
* one or two. (We allow XLogWrite to write "flexibly", meaning it can stop
* at the end of the buffer ring; this makes a difference only with very high
* load or long wal_writer_delay, but imposes one extra cycle for the worst
* case for async commits.)
@ -1861,6 +1861,7 @@ void
XLogAsyncCommitFlush(void)
{
XLogRecPtr WriteRqstPtr;
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
@ -2252,7 +2253,7 @@ InstallXLogFileSegment(uint32 *log, uint32 *seg, char *tmppath,
LWLockRelease(ControlFileLock);
return false;
}
#endif /* WIN32 */
#endif /* WIN32 */
ereport(ERROR,
(errcode_for_file_access(),
@ -2432,8 +2433,8 @@ RestoreArchivedFile(char *path, const char *xlogfname,
int rc;
bool signaled;
struct stat stat_buf;
uint32 restartLog;
uint32 restartSeg;
uint32 restartLog;
uint32 restartSeg;
/*
* When doing archive recovery, we always prefer an archived log file even
@ -2511,8 +2512,8 @@ RestoreArchivedFile(char *path, const char *xlogfname,
sp++;
XLByteToSeg(ControlFile->checkPointCopy.redo,
restartLog, restartSeg);
XLogFileName(lastRestartPointFname,
ControlFile->checkPointCopy.ThisTimeLineID,
XLogFileName(lastRestartPointFname,
ControlFile->checkPointCopy.ThisTimeLineID,
restartLog, restartSeg);
StrNCpy(dp, lastRestartPointFname, endp - dp);
dp += strlen(dp);
@ -2594,17 +2595,17 @@ RestoreArchivedFile(char *path, const char *xlogfname,
* incorrectly. We have to assume the former.
*
* However, if the failure was due to any sort of signal, it's best to
* punt and abort recovery. (If we "return false" here, upper levels
* will assume that recovery is complete and start up the database!)
* It's essential to abort on child SIGINT and SIGQUIT, because per spec
* punt and abort recovery. (If we "return false" here, upper levels will
* assume that recovery is complete and start up the database!) It's
* essential to abort on child SIGINT and SIGQUIT, because per spec
* system() ignores SIGINT and SIGQUIT while waiting; if we see one of
* those it's a good bet we should have gotten it too. Aborting on other
* signals such as SIGTERM seems a good idea as well.
*
* Per the Single Unix Spec, shells report exit status > 128 when
* a called command died on a signal. Also, 126 and 127 are used to
* report problems such as an unfindable command; treat those as fatal
* errors too.
* Per the Single Unix Spec, shells report exit status > 128 when a called
* command died on a signal. Also, 126 and 127 are used to report
* problems such as an unfindable command; treat those as fatal errors
* too.
*/
signaled = WIFSIGNALED(rc) || WEXITSTATUS(rc) > 125;
@ -3981,8 +3982,8 @@ ReadControlFile(void)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
errdetail("The database cluster was initialized with TOAST_MAX_CHUNK_SIZE %d,"
" but the server was compiled with TOAST_MAX_CHUNK_SIZE %d.",
ControlFile->toast_max_chunk_size, (int) TOAST_MAX_CHUNK_SIZE),
" but the server was compiled with TOAST_MAX_CHUNK_SIZE %d.",
ControlFile->toast_max_chunk_size, (int) TOAST_MAX_CHUNK_SIZE),
errhint("It looks like you need to recompile or initdb.")));
#ifdef HAVE_INT64_TIMESTAMP
@ -4430,7 +4431,7 @@ readRecoveryCommandFile(void)
*/
recoveryTargetTime =
DatumGetTimestampTz(DirectFunctionCall3(timestamptz_in,
CStringGetDatum(tok2),
CStringGetDatum(tok2),
ObjectIdGetDatum(InvalidOid),
Int32GetDatum(-1)));
ereport(LOG,
@ -4629,7 +4630,7 @@ recoveryStopsHere(XLogRecord *record, bool *includeThis)
{
bool stopsHere;
uint8 record_info;
TimestampTz recordXtime;
TimestampTz recordXtime;
/* We only consider stopping at COMMIT or ABORT records */
if (record->xl_rmid != RM_XACT_ID)
@ -4781,11 +4782,11 @@ StartupXLOG(void)
(errmsg("database system was interrupted while in recovery at log time %s",
str_time(ControlFile->checkPointCopy.time)),
errhint("If this has occurred more than once some data might be corrupted"
" and you might need to choose an earlier recovery target.")));
" and you might need to choose an earlier recovery target.")));
else if (ControlFile->state == DB_IN_PRODUCTION)
ereport(LOG,
(errmsg("database system was interrupted; last known up at %s",
str_time(ControlFile->time))));
(errmsg("database system was interrupted; last known up at %s",
str_time(ControlFile->time))));
/* This is just to allow attaching to startup process with a debugger */
#ifdef XLOG_REPLAY_DELAY
@ -4879,9 +4880,9 @@ StartupXLOG(void)
wasShutdown = (record->xl_info == XLOG_CHECKPOINT_SHUTDOWN);
ereport(DEBUG1,
(errmsg("redo record is at %X/%X; shutdown %s",
checkPoint.redo.xlogid, checkPoint.redo.xrecoff,
wasShutdown ? "TRUE" : "FALSE")));
(errmsg("redo record is at %X/%X; shutdown %s",
checkPoint.redo.xlogid, checkPoint.redo.xrecoff,
wasShutdown ? "TRUE" : "FALSE")));
ereport(DEBUG1,
(errmsg("next transaction ID: %u/%u; next OID: %u",
checkPoint.nextXidEpoch, checkPoint.nextXid,
@ -4920,7 +4921,7 @@ StartupXLOG(void)
{
if (wasShutdown)
ereport(PANIC,
(errmsg("invalid redo record in shutdown checkpoint")));
(errmsg("invalid redo record in shutdown checkpoint")));
InRecovery = true;
}
else if (ControlFile->state != DB_SHUTDOWNED)
@ -5045,7 +5046,7 @@ StartupXLOG(void)
*/
if (recoveryStopsHere(record, &recoveryApply))
{
reachedStopPoint = true; /* see below */
reachedStopPoint = true; /* see below */
recoveryContinue = false;
if (!recoveryApply)
break;
@ -5087,8 +5088,8 @@ StartupXLOG(void)
ReadRecPtr.xlogid, ReadRecPtr.xrecoff)));
if (recoveryLastXTime)
ereport(LOG,
(errmsg("last completed transaction was at log time %s",
timestamptz_to_str(recoveryLastXTime))));
(errmsg("last completed transaction was at log time %s",
timestamptz_to_str(recoveryLastXTime))));
InRedo = false;
}
else
@ -5116,7 +5117,7 @@ StartupXLOG(void)
if (reachedStopPoint) /* stopped because of stop request */
ereport(FATAL,
(errmsg("requested recovery stop point is before end time of backup dump")));
else /* ran off end of WAL */
else /* ran off end of WAL */
ereport(FATAL,
(errmsg("WAL ends before end time of backup dump")));
}
@ -5124,12 +5125,12 @@ StartupXLOG(void)
/*
* Consider whether we need to assign a new timeline ID.
*
* If we are doing an archive recovery, we always assign a new ID. This
* handles a couple of issues. If we stopped short of the end of WAL
* If we are doing an archive recovery, we always assign a new ID. This
* handles a couple of issues. If we stopped short of the end of WAL
* during recovery, then we are clearly generating a new timeline and must
* assign it a unique new ID. Even if we ran to the end, modifying the
* current last segment is problematic because it may result in trying
* to overwrite an already-archived copy of that segment, and we encourage
* current last segment is problematic because it may result in trying to
* overwrite an already-archived copy of that segment, and we encourage
* DBAs to make their archive_commands reject that. We can dodge the
* problem by making the new active segment have a new timeline ID.
*
@ -5472,7 +5473,7 @@ GetInsertRecPtr(void)
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
XLogRecPtr recptr;
XLogRecPtr recptr;
SpinLockAcquire(&xlogctl->info_lck);
recptr = xlogctl->LogwrtRqst.Write;
@ -5576,8 +5577,12 @@ LogCheckpointStart(int flags)
static void
LogCheckpointEnd(void)
{
long write_secs, sync_secs, total_secs;
int write_usecs, sync_usecs, total_usecs;
long write_secs,
sync_secs,
total_secs;
int write_usecs,
sync_usecs,
total_usecs;
CheckpointStats.ckpt_end_t = GetCurrentTimestamp();
@ -5601,9 +5606,9 @@ LogCheckpointEnd(void)
CheckpointStats.ckpt_segs_added,
CheckpointStats.ckpt_segs_removed,
CheckpointStats.ckpt_segs_recycled,
write_secs, write_usecs/1000,
sync_secs, sync_usecs/1000,
total_secs, total_usecs/1000);
write_secs, write_usecs / 1000,
sync_secs, sync_usecs / 1000,
total_secs, total_usecs / 1000);
}
/*
@ -5665,9 +5670,9 @@ CreateCheckPoint(int flags)
}
/*
* Let smgr prepare for checkpoint; this has to happen before we
* determine the REDO pointer. Note that smgr must not do anything
* that'd have to be undone if we decide no checkpoint is needed.
* Let smgr prepare for checkpoint; this has to happen before we determine
* the REDO pointer. Note that smgr must not do anything that'd have to
* be undone if we decide no checkpoint is needed.
*/
smgrpreckpt();
@ -5761,8 +5766,8 @@ CreateCheckPoint(int flags)
LWLockRelease(WALInsertLock);
/*
* If enabled, log checkpoint start. We postpone this until now
* so as not to log anything if we decided to skip the checkpoint.
* If enabled, log checkpoint start. We postpone this until now so as not
* to log anything if we decided to skip the checkpoint.
*/
if (log_checkpoints)
LogCheckpointStart(flags);
@ -5782,11 +5787,11 @@ CreateCheckPoint(int flags)
* checkpoint take a bit longer than to hold locks longer than necessary.
* (In fact, the whole reason we have this issue is that xact.c does
* commit record XLOG insertion and clog update as two separate steps
* protected by different locks, but again that seems best on grounds
* of minimizing lock contention.)
* protected by different locks, but again that seems best on grounds of
* minimizing lock contention.)
*
* A transaction that has not yet set inCommit when we look cannot be
* at risk, since he's not inserted his commit record yet; and one that's
* A transaction that has not yet set inCommit when we look cannot be at
* risk, since he's not inserted his commit record yet; and one that's
* already cleared it is not at risk either, since he's done fixing clog
* and we will correctly flush the update below. So we cannot miss any
* xacts we need to wait for.
@ -5794,8 +5799,9 @@ CreateCheckPoint(int flags)
nInCommit = GetTransactionsInCommit(&inCommitXids);
if (nInCommit > 0)
{
do {
pg_usleep(10000L); /* wait for 10 msec */
do
{
pg_usleep(10000L); /* wait for 10 msec */
} while (HaveTransactionsInCommit(inCommitXids, nInCommit));
}
pfree(inCommitXids);
@ -5946,7 +5952,7 @@ CheckPointGuts(XLogRecPtr checkPointRedo, int flags)
CheckPointCLOG();
CheckPointSUBTRANS();
CheckPointMultiXact();
CheckPointBuffers(flags); /* performs all required fsyncs */
CheckPointBuffers(flags); /* performs all required fsyncs */
/* We deliberately delay 2PC checkpointing as long as possible */
CheckPointTwoPhase(checkPointRedo);
}
@ -6046,14 +6052,14 @@ XLogPutNextOid(Oid nextOid)
* does.
*
* Note, however, that the above statement only covers state "within" the
* database. When we use a generated OID as a file or directory name,
* we are in a sense violating the basic WAL rule, because that filesystem
* database. When we use a generated OID as a file or directory name, we
* are in a sense violating the basic WAL rule, because that filesystem
* change may reach disk before the NEXTOID WAL record does. The impact
* of this is that if a database crash occurs immediately afterward,
* we might after restart re-generate the same OID and find that it
* conflicts with the leftover file or directory. But since for safety's
* sake we always loop until finding a nonconflicting filename, this poses
* no real problem in practice. See pgsql-hackers discussion 27-Sep-2006.
* of this is that if a database crash occurs immediately afterward, we
* might after restart re-generate the same OID and find that it conflicts
* with the leftover file or directory. But since for safety's sake we
* always loop until finding a nonconflicting filename, this poses no real
* problem in practice. See pgsql-hackers discussion 27-Sep-2006.
*/
}
@ -6673,7 +6679,7 @@ pg_switch_xlog(PG_FUNCTION_ARGS)
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
(errmsg("must be superuser to switch transaction log files"))));
(errmsg("must be superuser to switch transaction log files"))));
switchpoint = RequestXLogSwitch();

View File

@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/bootstrap/bootstrap.c,v 1.236 2007/08/02 23:39:44 adunstan Exp $
* $PostgreSQL: pgsql/src/backend/bootstrap/bootstrap.c,v 1.237 2007/11/15 21:14:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -205,7 +205,7 @@ AuxiliaryProcessMain(int argc, char *argv[])
{
char *progname = argv[0];
int flag;
AuxProcType auxType = CheckerProcess;
AuxProcType auxType = CheckerProcess;
char *userDoption = NULL;
/*
@ -431,7 +431,7 @@ AuxiliaryProcessMain(int argc, char *argv[])
InitXLOGAccess();
WalWriterMain();
proc_exit(1); /* should never return */
default:
elog(PANIC, "unrecognized process type: %d", auxType);
proc_exit(1);
@ -568,7 +568,7 @@ bootstrap_signals(void)
}
/*
* Begin shutdown of an auxiliary process. This is approximately the equivalent
* Begin shutdown of an auxiliary process. This is approximately the equivalent
* of ShutdownPostgres() in postinit.c. We can't run transactions in an
* auxiliary process, so most of the work of AbortTransaction() is not needed,
* but we do need to make sure we've released any LWLocks we are holding.

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/catalog/aclchk.c,v 1.141 2007/10/12 18:55:11 tgl Exp $
* $PostgreSQL: pgsql/src/backend/catalog/aclchk.c,v 1.142 2007/11/15 21:14:32 momjian Exp $
*
* NOTES
* See acl.h.
@ -2348,8 +2348,8 @@ pg_ts_config_ownercheck(Oid cfg_oid, Oid roleid)
if (!HeapTupleIsValid(tuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("text search configuration with OID %u does not exist",
cfg_oid)));
errmsg("text search configuration with OID %u does not exist",
cfg_oid)));
ownerId = ((Form_pg_ts_config) GETSTRUCT(tuple))->cfgowner;

View File

@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/catalog/dependency.c,v 1.67 2007/08/21 01:11:13 tgl Exp $
* $PostgreSQL: pgsql/src/backend/catalog/dependency.c,v 1.68 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -85,29 +85,29 @@ typedef struct
* See also getObjectClass().
*/
static const Oid object_classes[MAX_OCLASS] = {
RelationRelationId, /* OCLASS_CLASS */
ProcedureRelationId, /* OCLASS_PROC */
TypeRelationId, /* OCLASS_TYPE */
CastRelationId, /* OCLASS_CAST */
ConstraintRelationId, /* OCLASS_CONSTRAINT */
ConversionRelationId, /* OCLASS_CONVERSION */
AttrDefaultRelationId, /* OCLASS_DEFAULT */
LanguageRelationId, /* OCLASS_LANGUAGE */
OperatorRelationId, /* OCLASS_OPERATOR */
OperatorClassRelationId, /* OCLASS_OPCLASS */
OperatorFamilyRelationId, /* OCLASS_OPFAMILY */
RelationRelationId, /* OCLASS_CLASS */
ProcedureRelationId, /* OCLASS_PROC */
TypeRelationId, /* OCLASS_TYPE */
CastRelationId, /* OCLASS_CAST */
ConstraintRelationId, /* OCLASS_CONSTRAINT */
ConversionRelationId, /* OCLASS_CONVERSION */
AttrDefaultRelationId, /* OCLASS_DEFAULT */
LanguageRelationId, /* OCLASS_LANGUAGE */
OperatorRelationId, /* OCLASS_OPERATOR */
OperatorClassRelationId, /* OCLASS_OPCLASS */
OperatorFamilyRelationId, /* OCLASS_OPFAMILY */
AccessMethodOperatorRelationId, /* OCLASS_AMOP */
AccessMethodProcedureRelationId, /* OCLASS_AMPROC */
RewriteRelationId, /* OCLASS_REWRITE */
TriggerRelationId, /* OCLASS_TRIGGER */
NamespaceRelationId, /* OCLASS_SCHEMA */
TSParserRelationId, /* OCLASS_TSPARSER */
TSDictionaryRelationId, /* OCLASS_TSDICT */
TSTemplateRelationId, /* OCLASS_TSTEMPLATE */
TSConfigRelationId, /* OCLASS_TSCONFIG */
AuthIdRelationId, /* OCLASS_ROLE */
DatabaseRelationId, /* OCLASS_DATABASE */
TableSpaceRelationId /* OCLASS_TBLSPACE */
RewriteRelationId, /* OCLASS_REWRITE */
TriggerRelationId, /* OCLASS_TRIGGER */
NamespaceRelationId, /* OCLASS_SCHEMA */
TSParserRelationId, /* OCLASS_TSPARSER */
TSDictionaryRelationId, /* OCLASS_TSDICT */
TSTemplateRelationId, /* OCLASS_TSTEMPLATE */
TSConfigRelationId, /* OCLASS_TSCONFIG */
AuthIdRelationId, /* OCLASS_ROLE */
DatabaseRelationId, /* OCLASS_DATABASE */
TableSpaceRelationId /* OCLASS_TBLSPACE */
};
@ -1012,7 +1012,7 @@ doDeletion(const ObjectAddress *object)
RemoveTSConfigurationById(object->objectId);
break;
/* OCLASS_ROLE, OCLASS_DATABASE, OCLASS_TBLSPACE not handled */
/* OCLASS_ROLE, OCLASS_DATABASE, OCLASS_TBLSPACE not handled */
default:
elog(ERROR, "unrecognized object class: %u",
@ -2162,7 +2162,7 @@ getObjectDescription(const ObjectAddress *object)
elog(ERROR, "cache lookup failed for text search parser %u",
object->objectId);
appendStringInfo(&buffer, _("text search parser %s"),
NameStr(((Form_pg_ts_parser) GETSTRUCT(tup))->prsname));
NameStr(((Form_pg_ts_parser) GETSTRUCT(tup))->prsname));
ReleaseSysCache(tup);
break;
}
@ -2178,7 +2178,7 @@ getObjectDescription(const ObjectAddress *object)
elog(ERROR, "cache lookup failed for text search dictionary %u",
object->objectId);
appendStringInfo(&buffer, _("text search dictionary %s"),
NameStr(((Form_pg_ts_dict) GETSTRUCT(tup))->dictname));
NameStr(((Form_pg_ts_dict) GETSTRUCT(tup))->dictname));
ReleaseSysCache(tup);
break;
}
@ -2194,7 +2194,7 @@ getObjectDescription(const ObjectAddress *object)
elog(ERROR, "cache lookup failed for text search template %u",
object->objectId);
appendStringInfo(&buffer, _("text search template %s"),
NameStr(((Form_pg_ts_template) GETSTRUCT(tup))->tmplname));
NameStr(((Form_pg_ts_template) GETSTRUCT(tup))->tmplname));
ReleaseSysCache(tup);
break;
}
@ -2210,7 +2210,7 @@ getObjectDescription(const ObjectAddress *object)
elog(ERROR, "cache lookup failed for text search configuration %u",
object->objectId);
appendStringInfo(&buffer, _("text search configuration %s"),
NameStr(((Form_pg_ts_config) GETSTRUCT(tup))->cfgname));
NameStr(((Form_pg_ts_config) GETSTRUCT(tup))->cfgname));
ReleaseSysCache(tup);
break;
}

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/catalog/heap.c,v 1.325 2007/10/29 19:40:39 tgl Exp $
* $PostgreSQL: pgsql/src/backend/catalog/heap.c,v 1.326 2007/11/15 21:14:33 momjian Exp $
*
*
* INTERFACE ROUTINES
@ -408,7 +408,7 @@ CheckAttributeType(const char *attname, Oid atttypid)
{
/*
* Warn user, but don't fail, if column to be created has UNKNOWN type
* (usually as a result of a 'retrieve into' - jolly)
* (usually as a result of a 'retrieve into' - jolly)
*/
ereport(WARNING,
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
@ -418,8 +418,8 @@ CheckAttributeType(const char *attname, Oid atttypid)
else if (att_typtype == TYPTYPE_PSEUDO)
{
/*
* Refuse any attempt to create a pseudo-type column, except for
* a special hack for pg_statistic: allow ANYARRAY during initdb
* Refuse any attempt to create a pseudo-type column, except for a
* special hack for pg_statistic: allow ANYARRAY during initdb
*/
if (atttypid != ANYARRAYOID || IsUnderPostmaster)
ereport(ERROR,
@ -430,13 +430,13 @@ CheckAttributeType(const char *attname, Oid atttypid)
else if (att_typtype == TYPTYPE_COMPOSITE)
{
/*
* For a composite type, recurse into its attributes. You might
* think this isn't necessary, but since we allow system catalogs
* to break the rule, we have to guard against the case.
* For a composite type, recurse into its attributes. You might think
* this isn't necessary, but since we allow system catalogs to break
* the rule, we have to guard against the case.
*/
Relation relation;
TupleDesc tupdesc;
int i;
Relation relation;
TupleDesc tupdesc;
int i;
relation = relation_open(get_typ_typrelid(atttypid), AccessShareLock);
@ -702,17 +702,17 @@ AddNewRelationTuple(Relation pg_class_desc,
{
/*
* Initialize to the minimum XID that could put tuples in the table.
* We know that no xacts older than RecentXmin are still running,
* so that will do.
* We know that no xacts older than RecentXmin are still running, so
* that will do.
*/
new_rel_reltup->relfrozenxid = RecentXmin;
}
else
{
/*
* Other relation types will not contain XIDs, so set relfrozenxid
* to InvalidTransactionId. (Note: a sequence does contain a tuple,
* but we force its xmin to be FrozenTransactionId always; see
* Other relation types will not contain XIDs, so set relfrozenxid to
* InvalidTransactionId. (Note: a sequence does contain a tuple, but
* we force its xmin to be FrozenTransactionId always; see
* commands/sequence.c.)
*/
new_rel_reltup->relfrozenxid = InvalidTransactionId;
@ -740,7 +740,7 @@ AddNewRelationType(const char *typeName,
Oid typeNamespace,
Oid new_rel_oid,
char new_rel_kind,
Oid new_array_type)
Oid new_array_type)
{
return
TypeCreate(InvalidOid, /* no predetermined OID */
@ -760,7 +760,7 @@ AddNewRelationType(const char *typeName,
InvalidOid, /* analyze procedure - default */
InvalidOid, /* array element type - irrelevant */
false, /* this is not an array type */
new_array_type, /* array type if any */
new_array_type, /* array type if any */
InvalidOid, /* domain base type - irrelevant */
NULL, /* default value - none */
NULL, /* default binary representation */
@ -797,7 +797,7 @@ heap_create_with_catalog(const char *relname,
Relation new_rel_desc;
Oid old_type_oid;
Oid new_type_oid;
Oid new_array_oid = InvalidOid;
Oid new_array_oid = InvalidOid;
pg_class_desc = heap_open(RelationRelationId, RowExclusiveLock);
@ -815,9 +815,9 @@ heap_create_with_catalog(const char *relname,
/*
* Since we are going to create a rowtype as well, also check for
* collision with an existing type name. If there is one and it's
* an autogenerated array, we can rename it out of the way; otherwise
* we can at least give a good error message.
* collision with an existing type name. If there is one and it's an
* autogenerated array, we can rename it out of the way; otherwise we can
* at least give a good error message.
*/
old_type_oid = GetSysCacheOid(TYPENAMENSP,
CStringGetDatum(relname),
@ -829,9 +829,9 @@ heap_create_with_catalog(const char *relname,
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
errmsg("type \"%s\" already exists", relname),
errhint("A relation has an associated type of the same name, "
"so you must use a name that doesn't conflict "
"with any existing type.")));
errhint("A relation has an associated type of the same name, "
"so you must use a name that doesn't conflict "
"with any existing type.")));
}
/*
@ -880,9 +880,9 @@ heap_create_with_catalog(const char *relname,
Assert(relid == RelationGetRelid(new_rel_desc));
/*
* Decide whether to create an array type over the relation's rowtype.
* We do not create any array types for system catalogs (ie, those made
* during initdb). We create array types for regular relations, views,
* Decide whether to create an array type over the relation's rowtype. We
* do not create any array types for system catalogs (ie, those made
* during initdb). We create array types for regular relations, views,
* and composite types ... but not, eg, for toast tables or sequences.
*/
if (IsUnderPostmaster && (relkind == RELKIND_RELATION ||
@ -890,7 +890,7 @@ heap_create_with_catalog(const char *relname,
relkind == RELKIND_COMPOSITE_TYPE))
{
/* OK, so pre-assign a type OID for the array type */
Relation pg_type = heap_open(TypeRelationId, AccessShareLock);
Relation pg_type = heap_open(TypeRelationId, AccessShareLock);
new_array_oid = GetNewOid(pg_type);
heap_close(pg_type, AccessShareLock);
@ -901,14 +901,15 @@ heap_create_with_catalog(const char *relname,
* system type corresponding to the new relation.
*
* NOTE: we could get a unique-index failure here, in case someone else is
* creating the same type name in parallel but hadn't committed yet
* when we checked for a duplicate name above.
* creating the same type name in parallel but hadn't committed yet when
* we checked for a duplicate name above.
*/
new_type_oid = AddNewRelationType(relname,
relnamespace,
relid,
relkind,
new_array_oid);
new_array_oid);
/*
* Now make the array type if wanted.
*/
@ -919,32 +920,32 @@ heap_create_with_catalog(const char *relname,
relarrayname = makeArrayTypeName(relname, relnamespace);
TypeCreate(new_array_oid, /* force the type's OID to this */
relarrayname, /* Array type name */
relnamespace, /* Same namespace as parent */
InvalidOid, /* Not composite, no relationOid */
0, /* relkind, also N/A here */
-1, /* Internal size (varlena) */
TYPTYPE_BASE, /* Not composite - typelem is */
relarrayname, /* Array type name */
relnamespace, /* Same namespace as parent */
InvalidOid, /* Not composite, no relationOid */
0, /* relkind, also N/A here */
-1, /* Internal size (varlena) */
TYPTYPE_BASE, /* Not composite - typelem is */
DEFAULT_TYPDELIM, /* default array delimiter */
F_ARRAY_IN, /* array input proc */
F_ARRAY_OUT, /* array output proc */
F_ARRAY_RECV, /* array recv (bin) proc */
F_ARRAY_SEND, /* array send (bin) proc */
InvalidOid, /* typmodin procedure - none */
InvalidOid, /* typmodout procedure - none */
InvalidOid, /* analyze procedure - default */
new_type_oid, /* array element type - the rowtype */
true, /* yes, this is an array type */
InvalidOid, /* this has no array type */
InvalidOid, /* domain base type - irrelevant */
NULL, /* default value - none */
NULL, /* default binary representation */
false, /* passed by reference */
'd', /* alignment - must be the largest! */
'x', /* fully TOASTable */
-1, /* typmod */
0, /* array dimensions for typBaseType */
false); /* Type NOT NULL */
F_ARRAY_IN, /* array input proc */
F_ARRAY_OUT, /* array output proc */
F_ARRAY_RECV, /* array recv (bin) proc */
F_ARRAY_SEND, /* array send (bin) proc */
InvalidOid, /* typmodin procedure - none */
InvalidOid, /* typmodout procedure - none */
InvalidOid, /* analyze procedure - default */
new_type_oid, /* array element type - the rowtype */
true, /* yes, this is an array type */
InvalidOid, /* this has no array type */
InvalidOid, /* domain base type - irrelevant */
NULL, /* default value - none */
NULL, /* default binary representation */
false, /* passed by reference */
'd', /* alignment - must be the largest! */
'x', /* fully TOASTable */
-1, /* typmod */
0, /* array dimensions for typBaseType */
false); /* Type NOT NULL */
pfree(relarrayname);
}
@ -1723,9 +1724,9 @@ AddRelationRawConstraints(Relation rel,
NameStr(atp->attname));
/*
* If the expression is just a NULL constant, we do not bother
* to make an explicit pg_attrdef entry, since the default behavior
* is equivalent.
* If the expression is just a NULL constant, we do not bother to make
* an explicit pg_attrdef entry, since the default behavior is
* equivalent.
*
* Note a nonobvious property of this test: if the column is of a
* domain type, what we'll get is not a bare null Const but a
@ -1734,7 +1735,7 @@ AddRelationRawConstraints(Relation rel,
* override any default that the domain might have.
*/
if (expr == NULL ||
(IsA(expr, Const) && ((Const *) expr)->constisnull))
(IsA(expr, Const) &&((Const *) expr)->constisnull))
continue;
StoreAttrDefault(rel, colDef->attnum, nodeToString(expr));

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/catalog/index.c,v 1.287 2007/11/08 23:22:54 tgl Exp $
* $PostgreSQL: pgsql/src/backend/catalog/index.c,v 1.288 2007/11/15 21:14:33 momjian Exp $
*
*
* INTERFACE ROUTINES
@ -724,7 +724,7 @@ index_create(Oid heapRelationId,
}
else
{
bool have_simple_col = false;
bool have_simple_col = false;
/* Create auto dependencies on simply-referenced columns */
for (i = 0; i < indexInfo->ii_NumIndexAttrs; i++)
@ -742,15 +742,15 @@ index_create(Oid heapRelationId,
}
/*
* It's possible for an index to not depend on any columns of
* the table at all, in which case we need to give it a dependency
* on the table as a whole; else it won't get dropped when the
* table is dropped. This edge case is not totally useless;
* for example, a unique index on a constant expression can serve
* to prevent a table from containing more than one row.
* It's possible for an index to not depend on any columns of the
* table at all, in which case we need to give it a dependency on
* the table as a whole; else it won't get dropped when the table
* is dropped. This edge case is not totally useless; for
* example, a unique index on a constant expression can serve to
* prevent a table from containing more than one row.
*/
if (!have_simple_col &&
!contain_vars_of_level((Node *) indexInfo->ii_Expressions, 0) &&
!contain_vars_of_level((Node *) indexInfo->ii_Expressions, 0) &&
!contain_vars_of_level((Node *) indexInfo->ii_Predicate, 0))
{
referenced.classId = RelationRelationId;
@ -1360,15 +1360,15 @@ index_build(Relation heapRelation,
Assert(PointerIsValid(stats));
/*
* If we found any potentially broken HOT chains, mark the index as
* not being usable until the current transaction is below the event
* horizon. See src/backend/access/heap/README.HOT for discussion.
* If we found any potentially broken HOT chains, mark the index as not
* being usable until the current transaction is below the event horizon.
* See src/backend/access/heap/README.HOT for discussion.
*/
if (indexInfo->ii_BrokenHotChain)
{
Oid indexId = RelationGetRelid(indexRelation);
Relation pg_index;
HeapTuple indexTuple;
Oid indexId = RelationGetRelid(indexRelation);
Relation pg_index;
HeapTuple indexTuple;
Form_pg_index indexForm;
pg_index = heap_open(IndexRelationId, RowExclusiveLock);
@ -1515,19 +1515,19 @@ IndexBuildHeapScan(Relation heapRelation,
CHECK_FOR_INTERRUPTS();
/*
* When dealing with a HOT-chain of updated tuples, we want to
* index the values of the live tuple (if any), but index it
* under the TID of the chain's root tuple. This approach is
* necessary to preserve the HOT-chain structure in the heap.
* So we need to be able to find the root item offset for every
* tuple that's in a HOT-chain. When first reaching a new page
* of the relation, call heap_get_root_tuples() to build a map
* of root item offsets on the page.
* When dealing with a HOT-chain of updated tuples, we want to index
* the values of the live tuple (if any), but index it under the TID
* of the chain's root tuple. This approach is necessary to preserve
* the HOT-chain structure in the heap. So we need to be able to find
* the root item offset for every tuple that's in a HOT-chain. When
* first reaching a new page of the relation, call
* heap_get_root_tuples() to build a map of root item offsets on the
* page.
*
* It might look unsafe to use this information across buffer
* lock/unlock. However, we hold ShareLock on the table so no
* ordinary insert/update/delete should occur; and we hold pin on
* the buffer continuously while visiting the page, so no pruning
* ordinary insert/update/delete should occur; and we hold pin on the
* buffer continuously while visiting the page, so no pruning
* operation can occur either.
*
* Note the implied assumption that there is no more than one live
@ -1535,7 +1535,7 @@ IndexBuildHeapScan(Relation heapRelation,
*/
if (scan->rs_cblock != root_blkno)
{
Page page = BufferGetPage(scan->rs_cbuf);
Page page = BufferGetPage(scan->rs_cbuf);
LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
heap_get_root_tuples(page, root_offsets);
@ -1549,12 +1549,13 @@ IndexBuildHeapScan(Relation heapRelation,
/* do our own time qual check */
bool indexIt;
recheck:
recheck:
/*
* We could possibly get away with not locking the buffer here,
* since caller should hold ShareLock on the relation, but let's
* be conservative about it. (This remark is still correct
* even with HOT-pruning: our pin on the buffer prevents pruning.)
* be conservative about it. (This remark is still correct even
* with HOT-pruning: our pin on the buffer prevents pruning.)
*/
LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
@ -1580,9 +1581,9 @@ IndexBuildHeapScan(Relation heapRelation,
* building it, and may need to see such tuples.)
*
* However, if it was HOT-updated then we must only index
* the live tuple at the end of the HOT-chain. Since this
* breaks semantics for pre-existing snapshots, mark
* the index as unusable for them.
* the live tuple at the end of the HOT-chain. Since this
* breaks semantics for pre-existing snapshots, mark the
* index as unusable for them.
*
* If we've already decided that the index will be unsafe
* for old snapshots, we may as well stop indexing
@ -1611,13 +1612,13 @@ IndexBuildHeapScan(Relation heapRelation,
* followed by CREATE INDEX within a transaction.) An
* exception occurs when reindexing a system catalog,
* because we often release lock on system catalogs before
* committing. In that case we wait for the inserting
* committing. In that case we wait for the inserting
* transaction to finish and check again. (We could do
* that on user tables too, but since the case is not
* expected it seems better to throw an error.)
*/
if (!TransactionIdIsCurrentTransactionId(
HeapTupleHeaderGetXmin(heapTuple->t_data)))
HeapTupleHeaderGetXmin(heapTuple->t_data)))
{
if (!IsSystemRelation(heapRelation))
elog(ERROR, "concurrent insert in progress");
@ -1627,11 +1628,13 @@ IndexBuildHeapScan(Relation heapRelation,
* Must drop the lock on the buffer before we wait
*/
TransactionId xwait = HeapTupleHeaderGetXmin(heapTuple->t_data);
LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
XactLockTableWait(xwait);
goto recheck;
}
}
/*
* We must index such tuples, since if the index build
* commits then they're good.
@ -1648,14 +1651,14 @@ IndexBuildHeapScan(Relation heapRelation,
* followed by CREATE INDEX within a transaction.) An
* exception occurs when reindexing a system catalog,
* because we often release lock on system catalogs before
* committing. In that case we wait for the deleting
* committing. In that case we wait for the deleting
* transaction to finish and check again. (We could do
* that on user tables too, but since the case is not
* expected it seems better to throw an error.)
*/
Assert(!(heapTuple->t_data->t_infomask & HEAP_XMAX_IS_MULTI));
if (!TransactionIdIsCurrentTransactionId(
HeapTupleHeaderGetXmax(heapTuple->t_data)))
HeapTupleHeaderGetXmax(heapTuple->t_data)))
{
if (!IsSystemRelation(heapRelation))
elog(ERROR, "concurrent delete in progress");
@ -1665,11 +1668,13 @@ IndexBuildHeapScan(Relation heapRelation,
* Must drop the lock on the buffer before we wait
*/
TransactionId xwait = HeapTupleHeaderGetXmax(heapTuple->t_data);
LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
XactLockTableWait(xwait);
goto recheck;
}
}
/*
* Otherwise, we have to treat these tuples just like
* RECENTLY_DELETED ones.
@ -1689,7 +1694,7 @@ IndexBuildHeapScan(Relation heapRelation,
break;
default:
elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
indexIt = tupleIsAlive = false; /* keep compiler quiet */
indexIt = tupleIsAlive = false; /* keep compiler quiet */
break;
}
@ -1741,11 +1746,11 @@ IndexBuildHeapScan(Relation heapRelation,
if (HeapTupleIsHeapOnly(heapTuple))
{
/*
* For a heap-only tuple, pretend its TID is that of the root.
* See src/backend/access/heap/README.HOT for discussion.
* For a heap-only tuple, pretend its TID is that of the root. See
* src/backend/access/heap/README.HOT for discussion.
*/
HeapTupleData rootTuple;
OffsetNumber offnum;
HeapTupleData rootTuple;
OffsetNumber offnum;
rootTuple = *heapTuple;
offnum = ItemPointerGetOffsetNumber(&heapTuple->t_self);
@ -1787,11 +1792,11 @@ IndexBuildHeapScan(Relation heapRelation,
* We do a concurrent index build by first inserting the catalog entry for the
* index via index_create(), marking it not indisready and not indisvalid.
* Then we commit our transaction and start a new one, then we wait for all
* transactions that could have been modifying the table to terminate. Now
* transactions that could have been modifying the table to terminate. Now
* we know that any subsequently-started transactions will see the index and
* honor its constraints on HOT updates; so while existing HOT-chains might
* be broken with respect to the index, no currently live tuple will have an
* incompatible HOT update done to it. We now build the index normally via
* incompatible HOT update done to it. We now build the index normally via
* index_build(), while holding a weak lock that allows concurrent
* insert/update/delete. Also, we index only tuples that are valid
* as of the start of the scan (see IndexBuildHeapScan), whereas a normal
@ -1805,7 +1810,7 @@ IndexBuildHeapScan(Relation heapRelation,
*
* Next, we mark the index "indisready" (but still not "indisvalid") and
* commit the second transaction and start a third. Again we wait for all
* transactions that could have been modifying the table to terminate. Now
* transactions that could have been modifying the table to terminate. Now
* we know that any subsequently-started transactions will see the index and
* insert their new tuples into it. We then take a new reference snapshot
* which is passed to validate_index(). Any tuples that are valid according
@ -1945,8 +1950,8 @@ validate_index_heapscan(Relation heapRelation,
EState *estate;
ExprContext *econtext;
BlockNumber root_blkno = InvalidBlockNumber;
OffsetNumber root_offsets[MaxHeapTuplesPerPage];
bool in_index[MaxHeapTuplesPerPage];
OffsetNumber root_offsets[MaxHeapTuplesPerPage];
bool in_index[MaxHeapTuplesPerPage];
/* state variables for the merge */
ItemPointer indexcursor = NULL;
@ -1989,29 +1994,29 @@ validate_index_heapscan(Relation heapRelation,
{
ItemPointer heapcursor = &heapTuple->t_self;
ItemPointerData rootTuple;
OffsetNumber root_offnum;
OffsetNumber root_offnum;
CHECK_FOR_INTERRUPTS();
state->htups += 1;
/*
* As commented in IndexBuildHeapScan, we should index heap-only tuples
* under the TIDs of their root tuples; so when we advance onto a new
* heap page, build a map of root item offsets on the page.
* As commented in IndexBuildHeapScan, we should index heap-only
* tuples under the TIDs of their root tuples; so when we advance onto
* a new heap page, build a map of root item offsets on the page.
*
* This complicates merging against the tuplesort output: we will
* visit the live tuples in order by their offsets, but the root
* offsets that we need to compare against the index contents might
* be ordered differently. So we might have to "look back" within
* the tuplesort output, but only within the current page. We handle
* that by keeping a bool array in_index[] showing all the
* already-passed-over tuplesort output TIDs of the current page.
* We clear that array here, when advancing onto a new heap page.
* offsets that we need to compare against the index contents might be
* ordered differently. So we might have to "look back" within the
* tuplesort output, but only within the current page. We handle that
* by keeping a bool array in_index[] showing all the
* already-passed-over tuplesort output TIDs of the current page. We
* clear that array here, when advancing onto a new heap page.
*/
if (scan->rs_cblock != root_blkno)
{
Page page = BufferGetPage(scan->rs_cbuf);
Page page = BufferGetPage(scan->rs_cbuf);
LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
heap_get_root_tuples(page, root_offsets);
@ -2102,14 +2107,14 @@ validate_index_heapscan(Relation heapRelation,
/*
* If the tuple is already committed dead, you might think we
* could suppress uniqueness checking, but this is no longer
* true in the presence of HOT, because the insert is actually
* a proxy for a uniqueness check on the whole HOT-chain. That
* is, the tuple we have here could be dead because it was already
* could suppress uniqueness checking, but this is no longer true
* in the presence of HOT, because the insert is actually a proxy
* for a uniqueness check on the whole HOT-chain. That is, the
* tuple we have here could be dead because it was already
* HOT-updated, and if so the updating transaction will not have
* thought it should insert index entries. The index AM will
* check the whole HOT-chain and correctly detect a conflict
* if there is one.
* thought it should insert index entries. The index AM will
* check the whole HOT-chain and correctly detect a conflict if
* there is one.
*/
index_insert(indexRelation,

View File

@ -13,7 +13,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/catalog/namespace.c,v 1.99 2007/08/27 03:36:08 tgl Exp $
* $PostgreSQL: pgsql/src/backend/catalog/namespace.c,v 1.100 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -75,11 +75,11 @@
*
* The textual specification of search_path can include "$user" to refer to
* the namespace named the same as the current user, if any. (This is just
* ignored if there is no such namespace.) Also, it can include "pg_temp"
* ignored if there is no such namespace.) Also, it can include "pg_temp"
* to refer to the current backend's temp namespace. This is usually also
* ignorable if the temp namespace hasn't been set up, but there's a special
* case: if "pg_temp" appears first then it should be the default creation
* target. We kluge this case a little bit so that the temp namespace isn't
* target. We kluge this case a little bit so that the temp namespace isn't
* set up until the first attempt to create something in it. (The reason for
* klugery is that we can't create the temp namespace outside a transaction,
* but initial GUC processing of search_path happens outside a transaction.)
@ -144,10 +144,10 @@ static bool baseSearchPathValid = true;
typedef struct
{
List *searchPath; /* the desired search path */
List *searchPath; /* the desired search path */
Oid creationNamespace; /* the desired creation namespace */
int nestLevel; /* subtransaction nesting level */
} OverrideStackEntry;
int nestLevel; /* subtransaction nesting level */
} OverrideStackEntry;
static List *overrideStack = NIL;
@ -157,7 +157,7 @@ static List *overrideStack = NIL;
* command is first executed). Thereafter it's the OID of the temp namespace.
*
* myTempToastNamespace is the OID of the namespace for my temp tables' toast
* tables. It is set when myTempNamespace is, and is InvalidOid before that.
* tables. It is set when myTempNamespace is, and is InvalidOid before that.
*
* myTempNamespaceSubID shows whether we've created the TEMP namespace in the
* current subtransaction. The flag propagates up the subtransaction tree,
@ -241,10 +241,10 @@ RangeVarGetRelid(const RangeVar *relation, bool failOK)
if (relation->schemaname)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
errmsg("temporary tables cannot specify a schema name")));
errmsg("temporary tables cannot specify a schema name")));
if (OidIsValid(myTempNamespace))
relId = get_relname_relid(relation->relname, myTempNamespace);
else /* this probably can't happen? */
else /* this probably can't happen? */
relId = InvalidOid;
}
else if (relation->schemaname)
@ -308,7 +308,7 @@ RangeVarGetCreationNamespace(const RangeVar *newRelation)
if (newRelation->schemaname)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
errmsg("temporary tables cannot specify a schema name")));
errmsg("temporary tables cannot specify a schema name")));
/* Initialize temp namespace if first time through */
if (!OidIsValid(myTempNamespace))
InitTempTableNamespace();
@ -619,8 +619,8 @@ FuncnameGetCandidates(List *names, int nargs)
else
{
/*
* Consider only procs that are in the search path and are not
* in the temp namespace.
* Consider only procs that are in the search path and are not in
* the temp namespace.
*/
ListCell *nsp;
@ -949,8 +949,8 @@ OpernameGetCandidates(List *names, char oprkind)
else
{
/*
* Consider only opers that are in the search path and are not
* in the temp namespace.
* Consider only opers that are in the search path and are not in
* the temp namespace.
*/
ListCell *nsp;
@ -1377,7 +1377,7 @@ TSParserGetPrsid(List *names, bool failOK)
namespaceId = lfirst_oid(l);
if (namespaceId == myTempNamespace)
continue; /* do not look in temp namespace */
continue; /* do not look in temp namespace */
prsoid = GetSysCacheOid(TSPARSERNAMENSP,
PointerGetDatum(parser_name),
@ -1433,8 +1433,8 @@ TSParserIsVisible(Oid prsId)
{
/*
* If it is in the path, it might still not be visible; it could be
* hidden by another parser of the same name earlier in the path. So we
* must do a slow check for conflicting parsers.
* hidden by another parser of the same name earlier in the path. So
* we must do a slow check for conflicting parsers.
*/
char *name = NameStr(form->prsname);
ListCell *l;
@ -1445,7 +1445,7 @@ TSParserIsVisible(Oid prsId)
Oid namespaceId = lfirst_oid(l);
if (namespaceId == myTempNamespace)
continue; /* do not look in temp namespace */
continue; /* do not look in temp namespace */
if (namespaceId == namespace)
{
@ -1505,7 +1505,7 @@ TSDictionaryGetDictid(List *names, bool failOK)
namespaceId = lfirst_oid(l);
if (namespaceId == myTempNamespace)
continue; /* do not look in temp namespace */
continue; /* do not look in temp namespace */
dictoid = GetSysCacheOid(TSDICTNAMENSP,
PointerGetDatum(dict_name),
@ -1562,8 +1562,8 @@ TSDictionaryIsVisible(Oid dictId)
{
/*
* If it is in the path, it might still not be visible; it could be
* hidden by another dictionary of the same name earlier in the
* path. So we must do a slow check for conflicting dictionaries.
* hidden by another dictionary of the same name earlier in the path.
* So we must do a slow check for conflicting dictionaries.
*/
char *name = NameStr(form->dictname);
ListCell *l;
@ -1574,7 +1574,7 @@ TSDictionaryIsVisible(Oid dictId)
Oid namespaceId = lfirst_oid(l);
if (namespaceId == myTempNamespace)
continue; /* do not look in temp namespace */
continue; /* do not look in temp namespace */
if (namespaceId == namespace)
{
@ -1634,7 +1634,7 @@ TSTemplateGetTmplid(List *names, bool failOK)
namespaceId = lfirst_oid(l);
if (namespaceId == myTempNamespace)
continue; /* do not look in temp namespace */
continue; /* do not look in temp namespace */
tmploid = GetSysCacheOid(TSTEMPLATENAMENSP,
PointerGetDatum(template_name),
@ -1690,8 +1690,8 @@ TSTemplateIsVisible(Oid tmplId)
{
/*
* If it is in the path, it might still not be visible; it could be
* hidden by another template of the same name earlier in the path.
* So we must do a slow check for conflicting templates.
* hidden by another template of the same name earlier in the path. So
* we must do a slow check for conflicting templates.
*/
char *name = NameStr(form->tmplname);
ListCell *l;
@ -1702,7 +1702,7 @@ TSTemplateIsVisible(Oid tmplId)
Oid namespaceId = lfirst_oid(l);
if (namespaceId == myTempNamespace)
continue; /* do not look in temp namespace */
continue; /* do not look in temp namespace */
if (namespaceId == namespace)
{
@ -1762,7 +1762,7 @@ TSConfigGetCfgid(List *names, bool failOK)
namespaceId = lfirst_oid(l);
if (namespaceId == myTempNamespace)
continue; /* do not look in temp namespace */
continue; /* do not look in temp namespace */
cfgoid = GetSysCacheOid(TSCONFIGNAMENSP,
PointerGetDatum(config_name),
@ -1785,7 +1785,7 @@ TSConfigGetCfgid(List *names, bool failOK)
/*
* TSConfigIsVisible
* Determine whether a text search configuration (identified by OID)
* is visible in the current search path. Visible means "would be found
* is visible in the current search path. Visible means "would be found
* by searching for the unqualified text search configuration name".
*/
bool
@ -1831,7 +1831,7 @@ TSConfigIsVisible(Oid cfgid)
Oid namespaceId = lfirst_oid(l);
if (namespaceId == myTempNamespace)
continue; /* do not look in temp namespace */
continue; /* do not look in temp namespace */
if (namespaceId == namespace)
{
@ -1925,11 +1925,12 @@ LookupExplicitNamespace(const char *nspname)
{
if (OidIsValid(myTempNamespace))
return myTempNamespace;
/*
* Since this is used only for looking up existing objects, there
* is no point in trying to initialize the temp namespace here;
* and doing so might create problems for some callers.
* Just fall through and give the "does not exist" error.
* Since this is used only for looking up existing objects, there is
* no point in trying to initialize the temp namespace here; and doing
* so might create problems for some callers. Just fall through and
* give the "does not exist" error.
*/
}
@ -2166,7 +2167,7 @@ bool
isTempOrToastNamespace(Oid namespaceId)
{
if (OidIsValid(myTempNamespace) &&
(myTempNamespace == namespaceId || myTempToastNamespace == namespaceId))
(myTempNamespace == namespaceId || myTempToastNamespace == namespaceId))
return true;
return false;
}
@ -2208,7 +2209,7 @@ isOtherTempNamespace(Oid namespaceId)
/*
* GetTempToastNamespace - get the OID of my temporary-toast-table namespace,
* which must already be assigned. (This is only used when creating a toast
* which must already be assigned. (This is only used when creating a toast
* table for a temp table, so we must have already done InitTempTableNamespace)
*/
Oid
@ -2265,7 +2266,7 @@ GetOverrideSearchPath(MemoryContext context)
* search_path variable is ignored while an override is active.
*/
void
PushOverrideSearchPath(OverrideSearchPath *newpath)
PushOverrideSearchPath(OverrideSearchPath * newpath)
{
OverrideStackEntry *entry;
List *oidlist;
@ -2315,7 +2316,7 @@ PushOverrideSearchPath(OverrideSearchPath *newpath)
/* And make it active. */
activeSearchPath = entry->searchPath;
activeCreationNamespace = entry->creationNamespace;
activeTempCreationPending = false; /* XXX is this OK? */
activeTempCreationPending = false; /* XXX is this OK? */
MemoryContextSwitchTo(oldcxt);
}
@ -2349,7 +2350,7 @@ PopOverrideSearchPath(void)
entry = (OverrideStackEntry *) linitial(overrideStack);
activeSearchPath = entry->searchPath;
activeCreationNamespace = entry->creationNamespace;
activeTempCreationPending = false; /* XXX is this OK? */
activeTempCreationPending = false; /* XXX is this OK? */
}
else
{
@ -2392,7 +2393,7 @@ FindConversionByName(List *name)
namespaceId = lfirst_oid(l);
if (namespaceId == myTempNamespace)
continue; /* do not look in temp namespace */
continue; /* do not look in temp namespace */
conoid = FindConversion(conversion_name, namespaceId);
if (OidIsValid(conoid))
@ -2533,7 +2534,7 @@ recomputeNamespacePath(void)
}
/*
* Remember the first member of the explicit list. (Note: this is
* Remember the first member of the explicit list. (Note: this is
* nominally wrong if temp_missing, but we need it anyway to distinguish
* explicit from implicit mention of pg_catalog.)
*/
@ -2696,7 +2697,7 @@ AtEOXact_Namespace(bool isCommit)
{
myTempNamespace = InvalidOid;
myTempToastNamespace = InvalidOid;
baseSearchPathValid = false; /* need to rebuild list */
baseSearchPathValid = false; /* need to rebuild list */
}
myTempNamespaceSubID = InvalidSubTransactionId;
}
@ -2748,7 +2749,7 @@ AtEOSubXact_Namespace(bool isCommit, SubTransactionId mySubid,
/* TEMP namespace creation failed, so reset state */
myTempNamespace = InvalidOid;
myTempToastNamespace = InvalidOid;
baseSearchPathValid = false; /* need to rebuild list */
baseSearchPathValid = false; /* need to rebuild list */
}
}
@ -2773,7 +2774,7 @@ AtEOSubXact_Namespace(bool isCommit, SubTransactionId mySubid,
entry = (OverrideStackEntry *) linitial(overrideStack);
activeSearchPath = entry->searchPath;
activeCreationNamespace = entry->creationNamespace;
activeTempCreationPending = false; /* XXX is this OK? */
activeTempCreationPending = false; /* XXX is this OK? */
}
else
{
@ -2983,9 +2984,9 @@ fetch_search_path(bool includeImplicit)
recomputeNamespacePath();
/*
* If the temp namespace should be first, force it to exist. This is
* so that callers can trust the result to reflect the actual default
* creation namespace. It's a bit bogus to do this here, since
* If the temp namespace should be first, force it to exist. This is so
* that callers can trust the result to reflect the actual default
* creation namespace. It's a bit bogus to do this here, since
* current_schema() is supposedly a stable function without side-effects,
* but the alternatives seem worse.
*/

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/catalog/pg_aggregate.c,v 1.87 2007/09/03 00:39:14 tgl Exp $
* $PostgreSQL: pgsql/src/backend/catalog/pg_aggregate.c,v 1.88 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -172,8 +172,8 @@ AggregateCreate(const char *aggName,
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("cannot determine result data type"),
errdetail("An aggregate returning a polymorphic type "
"must have at least one polymorphic argument.")));
errdetail("An aggregate returning a polymorphic type "
"must have at least one polymorphic argument.")));
/* handle sortop, if supplied */
if (aggsortopName)
@ -213,8 +213,8 @@ AggregateCreate(const char *aggName,
PointerGetDatum(NULL), /* parameterModes */
PointerGetDatum(NULL), /* parameterNames */
PointerGetDatum(NULL), /* proconfig */
1, /* procost */
0); /* prorows */
1, /* procost */
0); /* prorows */
/*
* Okay to create the pg_aggregate entry.

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/catalog/pg_constraint.c,v 1.35 2007/02/14 01:58:56 tgl Exp $
* $PostgreSQL: pgsql/src/backend/catalog/pg_constraint.c,v 1.36 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -286,10 +286,10 @@ CreateConstraintEntry(const char *constraintName,
if (foreignNKeys > 0)
{
/*
* Register normal dependencies on the equality operators that
* support a foreign-key constraint. If the PK and FK types
* are the same then all three operators for a column are the
* same; otherwise they are different.
* Register normal dependencies on the equality operators that support
* a foreign-key constraint. If the PK and FK types are the same then
* all three operators for a column are the same; otherwise they are
* different.
*/
ObjectAddress oprobject;

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/catalog/pg_conversion.c,v 1.38 2007/09/24 01:29:28 adunstan Exp $
* $PostgreSQL: pgsql/src/backend/catalog/pg_conversion.c,v 1.39 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -275,4 +275,3 @@ FindConversion(const char *conname, Oid connamespace)
return conoid;
}

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/catalog/pg_enum.c,v 1.2 2007/04/02 22:14:17 adunstan Exp $
* $PostgreSQL: pgsql/src/backend/catalog/pg_enum.c,v 1.3 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -37,32 +37,33 @@ EnumValuesCreate(Oid enumTypeOid, List *vals)
TupleDesc tupDesc;
NameData enumlabel;
Oid *oids;
int i, n;
int i,
n;
Datum values[Natts_pg_enum];
char nulls[Natts_pg_enum];
ListCell *lc;
HeapTuple tup;
HeapTuple tup;
n = list_length(vals);
/*
* XXX we do not bother to check the list of values for duplicates ---
* if you have any, you'll get a less-than-friendly unique-index
* violation. Is it worth trying harder?
* XXX we do not bother to check the list of values for duplicates --- if
* you have any, you'll get a less-than-friendly unique-index violation.
* Is it worth trying harder?
*/
pg_enum = heap_open(EnumRelationId, RowExclusiveLock);
tupDesc = pg_enum->rd_att;
/*
* Allocate oids. While this method does not absolutely guarantee
* that we generate no duplicate oids (since we haven't entered each
* oid into the table before allocating the next), trouble could only
* occur if the oid counter wraps all the way around before we finish.
* Which seems unlikely.
* Allocate oids. While this method does not absolutely guarantee that we
* generate no duplicate oids (since we haven't entered each oid into the
* table before allocating the next), trouble could only occur if the oid
* counter wraps all the way around before we finish. Which seems
* unlikely.
*/
oids = (Oid *) palloc(n * sizeof(Oid));
for(i = 0; i < n; i++)
for (i = 0; i < n; i++)
{
oids[i] = GetNewOid(pg_enum);
}
@ -76,9 +77,9 @@ EnumValuesCreate(Oid enumTypeOid, List *vals)
i = 0;
foreach(lc, vals)
{
char *lab = strVal(lfirst(lc));
char *lab = strVal(lfirst(lc));
/*
/*
* labels are stored in a name field, for easier syscache lookup, so
* check the length to make sure it's within range.
*/
@ -86,9 +87,9 @@ EnumValuesCreate(Oid enumTypeOid, List *vals)
if (strlen(lab) > (NAMEDATALEN - 1))
ereport(ERROR,
(errcode(ERRCODE_INVALID_NAME),
errmsg("invalid enum label \"%s\", must be %d characters or less",
lab,
NAMEDATALEN - 1)));
errmsg("invalid enum label \"%s\", must be %d characters or less",
lab,
NAMEDATALEN - 1)));
values[Anum_pg_enum_enumtypid - 1] = ObjectIdGetDatum(enumTypeOid);
@ -148,8 +149,8 @@ EnumValuesDelete(Oid enumTypeOid)
static int
oid_cmp(const void *p1, const void *p2)
{
Oid v1 = *((const Oid *) p1);
Oid v2 = *((const Oid *) p2);
Oid v1 = *((const Oid *) p1);
Oid v2 = *((const Oid *) p2);
if (v1 < v2)
return -1;

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/catalog/pg_operator.c,v 1.101 2007/11/07 12:24:24 petere Exp $
* $PostgreSQL: pgsql/src/backend/catalog/pg_operator.c,v 1.102 2007/11/15 21:14:33 momjian Exp $
*
* NOTES
* these routines moved here from commands/define.c and somewhat cleaned up.
@ -868,7 +868,7 @@ makeOperatorDependencies(HeapTuple tuple)
* operators oprcom and oprnegate. We would not want to delete this
* operator if those go away, but only reset the link fields; which is not
* a function that the dependency code can presently handle. (Something
* could perhaps be done with objectSubId though.) For now, it's okay to
* could perhaps be done with objectSubId though.) For now, it's okay to
* let those links dangle if a referenced operator is removed.
*/

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/catalog/pg_proc.c,v 1.146 2007/09/03 00:39:14 tgl Exp $
* $PostgreSQL: pgsql/src/backend/catalog/pg_proc.c,v 1.147 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -139,7 +139,7 @@ ProcedureCreate(const char *procedureName,
/*
* Do not allow polymorphic return type unless at least one input argument
* is polymorphic. Also, do not allow return type INTERNAL unless at
* is polymorphic. Also, do not allow return type INTERNAL unless at
* least one input argument is INTERNAL.
*/
for (i = 0; i < parameterCount; i++)

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/catalog/pg_shdepend.c,v 1.20 2007/05/14 20:07:01 tgl Exp $
* $PostgreSQL: pgsql/src/backend/catalog/pg_shdepend.c,v 1.21 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -487,7 +487,7 @@ checkSharedDependencies(Oid classId, Oid objectId)
/*
* We limit the number of dependencies reported to the client to
* MAX_REPORTED_DEPS, since client software may not deal well with
* enormous error strings. The server log always gets a full report,
* enormous error strings. The server log always gets a full report,
* which is collected in a separate StringInfo if and only if we detect
* that the client report is going to be truncated.
*/
@ -662,7 +662,7 @@ checkSharedDependencies(Oid classId, Oid objectId)
if (numNotReportedDeps > 0 || numNotReportedDbs > 0)
{
ObjectAddress obj;
ObjectAddress obj;
obj.classId = classId;
obj.objectId = objectId;

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/catalog/pg_type.c,v 1.113 2007/05/12 00:54:59 tgl Exp $
* $PostgreSQL: pgsql/src/backend/catalog/pg_type.c,v 1.114 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -88,7 +88,7 @@ TypeShellMake(const char *typeName, Oid typeNamespace)
values[i++] = ObjectIdGetDatum(GetUserId()); /* typowner */
values[i++] = Int16GetDatum(sizeof(int4)); /* typlen */
values[i++] = BoolGetDatum(true); /* typbyval */
values[i++] = CharGetDatum(TYPTYPE_PSEUDO); /* typtype */
values[i++] = CharGetDatum(TYPTYPE_PSEUDO); /* typtype */
values[i++] = BoolGetDatum(false); /* typisdefined */
values[i++] = CharGetDatum(DEFAULT_TYPDELIM); /* typdelim */
values[i++] = ObjectIdGetDatum(InvalidOid); /* typrelid */
@ -255,13 +255,13 @@ TypeCreate(Oid newTypeOid,
values[i++] = CharGetDatum(typDelim); /* typdelim */
values[i++] = ObjectIdGetDatum(relationOid); /* typrelid */
values[i++] = ObjectIdGetDatum(elementType); /* typelem */
values[i++] = ObjectIdGetDatum(arrayType); /* typarray */
values[i++] = ObjectIdGetDatum(arrayType); /* typarray */
values[i++] = ObjectIdGetDatum(inputProcedure); /* typinput */
values[i++] = ObjectIdGetDatum(outputProcedure); /* typoutput */
values[i++] = ObjectIdGetDatum(receiveProcedure); /* typreceive */
values[i++] = ObjectIdGetDatum(sendProcedure); /* typsend */
values[i++] = ObjectIdGetDatum(typmodinProcedure); /* typmodin */
values[i++] = ObjectIdGetDatum(typmodoutProcedure); /* typmodout */
values[i++] = ObjectIdGetDatum(typmodoutProcedure); /* typmodout */
values[i++] = ObjectIdGetDatum(analyzeProcedure); /* typanalyze */
values[i++] = CharGetDatum(alignment); /* typalign */
values[i++] = CharGetDatum(storage); /* typstorage */
@ -397,8 +397,8 @@ TypeCreate(Oid newTypeOid,
void
GenerateTypeDependencies(Oid typeNamespace,
Oid typeObjectId,
Oid relationOid, /* only for relation rowtypes */
char relationKind, /* ditto */
Oid relationOid, /* only for relation rowtypes */
char relationKind, /* ditto */
Oid owner,
Oid inputProcedure,
Oid outputProcedure,
@ -534,7 +534,7 @@ GenerateTypeDependencies(Oid typeNamespace,
referenced.objectId = elementType;
referenced.objectSubId = 0;
recordDependencyOn(&myself, &referenced,
isImplicitArray ? DEPENDENCY_INTERNAL : DEPENDENCY_NORMAL);
isImplicitArray ? DEPENDENCY_INTERNAL : DEPENDENCY_NORMAL);
}
/* Normal dependency from a domain to its base type. */
@ -604,7 +604,7 @@ TypeRename(Oid typeOid, const char *newTypeName, Oid typeNamespace)
/* If the type has an array type, recurse to handle that */
if (OidIsValid(arrayOid))
{
char *arrname = makeArrayTypeName(newTypeName, typeNamespace);
char *arrname = makeArrayTypeName(newTypeName, typeNamespace);
TypeRename(arrayOid, arrname, typeNamespace);
pfree(arrname);
@ -622,12 +622,12 @@ char *
makeArrayTypeName(const char *typeName, Oid typeNamespace)
{
char *arr;
int i;
int i;
Relation pg_type_desc;
/*
* The idea is to prepend underscores as needed until we make a name
* that doesn't collide with anything...
* The idea is to prepend underscores as needed until we make a name that
* doesn't collide with anything...
*/
arr = palloc(NAMEDATALEN);
@ -647,10 +647,10 @@ makeArrayTypeName(const char *typeName, Oid typeNamespace)
heap_close(pg_type_desc, AccessShareLock);
if (i >= NAMEDATALEN-1)
if (i >= NAMEDATALEN - 1)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
errmsg("could not form array type name for type \"%s\"",
errmsg("could not form array type name for type \"%s\"",
typeName)));
return arr;
@ -698,10 +698,10 @@ moveArrayTypeName(Oid typeOid, const char *typeName, Oid typeNamespace)
return false;
/*
* OK, use makeArrayTypeName to pick an unused modification of the
* name. Note that since makeArrayTypeName is an iterative process,
* this will produce a name that it might have produced the first time,
* had the conflicting type we are about to create already existed.
* OK, use makeArrayTypeName to pick an unused modification of the name.
* Note that since makeArrayTypeName is an iterative process, this will
* produce a name that it might have produced the first time, had the
* conflicting type we are about to create already existed.
*/
newname = makeArrayTypeName(typeName, typeNamespace);

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/analyze.c,v 1.110 2007/10/24 20:55:36 alvherre Exp $
* $PostgreSQL: pgsql/src/backend/commands/analyze.c,v 1.111 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -118,7 +118,7 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt,
totaldeadrows;
HeapTuple *rows;
PGRUsage ru0;
TimestampTz starttime = 0;
TimestampTz starttime = 0;
if (vacstmt->verbose)
elevel = INFO;
@ -1346,7 +1346,7 @@ typedef struct
FmgrInfo *cmpFn;
int cmpFlags;
int *tupnoLink;
} CompareScalarsContext;
} CompareScalarsContext;
static void compute_minimal_stats(VacAttrStatsP stats,

View File

@ -11,7 +11,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/cluster.c,v 1.164 2007/09/29 18:05:20 tgl Exp $
* $PostgreSQL: pgsql/src/backend/commands/cluster.c,v 1.165 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -80,7 +80,7 @@ static List *get_tables_to_cluster(MemoryContext cluster_context);
*
* The single-relation case does not have any such overhead.
*
* We also allow a relation to be specified without index. In that case,
* We also allow a relation to be specified without index. In that case,
* the indisclustered bit will be looked up, and an ERROR will be thrown
* if there is no index with the bit set.
*---------------------------------------------------------------------------
@ -107,13 +107,13 @@ cluster(ClusterStmt *stmt, bool isTopLevel)
RelationGetRelationName(rel));
/*
* Reject clustering a remote temp table ... their local buffer manager
* is not going to cope.
* Reject clustering a remote temp table ... their local buffer
* manager is not going to cope.
*/
if (isOtherTempNamespace(RelationGetNamespace(rel)))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot cluster temporary tables of other sessions")));
errmsg("cannot cluster temporary tables of other sessions")));
if (stmt->indexname == NULL)
{
@ -289,7 +289,7 @@ cluster_rel(RelToCluster *rvtc, bool recheck)
* check in the "recheck" case is appropriate (which currently means
* somebody is executing a database-wide CLUSTER), because there is
* another check in cluster() which will stop any attempt to cluster
* remote temp tables by name. There is another check in
* remote temp tables by name. There is another check in
* check_index_is_clusterable which is redundant, but we leave it for
* extra safety.
*/
@ -733,8 +733,8 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex)
/*
* compute xids used to freeze and weed out dead tuples. We use -1
* freeze_min_age to avoid having CLUSTER freeze tuples earlier than
* a plain VACUUM would.
* freeze_min_age to avoid having CLUSTER freeze tuples earlier than a
* plain VACUUM would.
*/
vacuum_set_xid_limits(-1, OldHeap->rd_rel->relisshared,
&OldestXmin, &FreezeXid);
@ -745,8 +745,8 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex)
/*
* Scan through the OldHeap in OldIndex order and copy each tuple into the
* NewHeap. To ensure we see recently-dead tuples that still need to be
* copied, we scan with SnapshotAny and use HeapTupleSatisfiesVacuum
* for the visibility test.
* copied, we scan with SnapshotAny and use HeapTupleSatisfiesVacuum for
* the visibility test.
*/
scan = index_beginscan(OldHeap, OldIndex,
SnapshotAny, 0, (ScanKey) NULL);
@ -774,31 +774,33 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex)
isdead = false;
break;
case HEAPTUPLE_INSERT_IN_PROGRESS:
/*
* We should not see this unless it's been inserted earlier
* in our own transaction.
* We should not see this unless it's been inserted earlier in
* our own transaction.
*/
if (!TransactionIdIsCurrentTransactionId(
HeapTupleHeaderGetXmin(tuple->t_data)))
HeapTupleHeaderGetXmin(tuple->t_data)))
elog(ERROR, "concurrent insert in progress");
/* treat as live */
isdead = false;
break;
case HEAPTUPLE_DELETE_IN_PROGRESS:
/*
* We should not see this unless it's been deleted earlier
* in our own transaction.
* We should not see this unless it's been deleted earlier in
* our own transaction.
*/
Assert(!(tuple->t_data->t_infomask & HEAP_XMAX_IS_MULTI));
if (!TransactionIdIsCurrentTransactionId(
HeapTupleHeaderGetXmax(tuple->t_data)))
HeapTupleHeaderGetXmax(tuple->t_data)))
elog(ERROR, "concurrent delete in progress");
/* treat as recently dead */
isdead = false;
break;
default:
elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
isdead = false; /* keep compiler quiet */
isdead = false; /* keep compiler quiet */
break;
}

View File

@ -7,7 +7,7 @@
* Copyright (c) 1996-2007, PostgreSQL Global Development Group
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/comment.c,v 1.98 2007/11/11 19:22:48 tgl Exp $
* $PostgreSQL: pgsql/src/backend/commands/comment.c,v 1.99 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -1493,7 +1493,7 @@ CommentTSParser(List *qualname, char *comment)
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("must be superuser to comment on text search parser")));
errmsg("must be superuser to comment on text search parser")));
CreateComments(prsId, TSParserRelationId, 0, comment);
}
@ -1522,7 +1522,7 @@ CommentTSTemplate(List *qualname, char *comment)
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("must be superuser to comment on text search template")));
errmsg("must be superuser to comment on text search template")));
CreateComments(tmplId, TSTemplateRelationId, 0, comment);
}

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/copy.c,v 1.287 2007/09/12 20:49:27 adunstan Exp $
* $PostgreSQL: pgsql/src/backend/commands/copy.c,v 1.288 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -997,7 +997,7 @@ DoCopy(const CopyStmt *stmt, const char *queryString)
errmsg("COPY (SELECT) WITH OIDS is not supported")));
/*
* Run parse analysis and rewrite. Note this also acquires sufficient
* Run parse analysis and rewrite. Note this also acquires sufficient
* locks on the source table(s).
*
* Because the parser and planner tend to scribble on their input, we
@ -1638,8 +1638,8 @@ CopyFrom(CopyState cstate)
MemoryContext oldcontext = CurrentMemoryContext;
ErrorContextCallback errcontext;
CommandId mycid = GetCurrentCommandId();
bool use_wal = true; /* by default, use WAL logging */
bool use_fsm = true; /* by default, use FSM for free space */
bool use_wal = true; /* by default, use WAL logging */
bool use_fsm = true; /* by default, use FSM for free space */
Assert(cstate->rel);
@ -2148,7 +2148,7 @@ CopyFrom(CopyState cstate)
cstate->filename)));
}
/*
/*
* If we skipped writing WAL, then we need to sync the heap (but not
* indexes since those use WAL anyway)
*/
@ -2685,7 +2685,7 @@ CopyReadAttributesText(CopyState cstate, int maxfields, char **fieldvals)
char *start_ptr;
char *end_ptr;
int input_len;
bool saw_high_bit = false;
bool saw_high_bit = false;
/* Make sure space remains in fieldvals[] */
if (fieldno >= maxfields)
@ -2776,7 +2776,7 @@ CopyReadAttributesText(CopyState cstate, int maxfields, char **fieldvals)
}
c = val & 0xff;
if (IS_HIGHBIT_SET(c))
saw_high_bit = true;
saw_high_bit = true;
}
}
break;
@ -2804,7 +2804,7 @@ CopyReadAttributesText(CopyState cstate, int maxfields, char **fieldvals)
* literally
*/
}
}
}
/* Add c to output string */
*output_ptr++ = c;
@ -2813,13 +2813,15 @@ CopyReadAttributesText(CopyState cstate, int maxfields, char **fieldvals)
/* Terminate attribute value in output area */
*output_ptr++ = '\0';
/* If we de-escaped a char with the high bit set, make sure
* we still have valid data for the db encoding. Avoid calling strlen
* here for the sake of efficiency.
/*
* If we de-escaped a char with the high bit set, make sure we still
* have valid data for the db encoding. Avoid calling strlen here for
* the sake of efficiency.
*/
if (saw_high_bit)
{
char *fld = fieldvals[fieldno];
char *fld = fieldvals[fieldno];
pg_verifymbstr(fld, output_ptr - (fld + 1), false);
}
@ -3077,15 +3079,15 @@ CopyAttributeOutText(CopyState cstate, char *string)
* We have to grovel through the string searching for control characters
* and instances of the delimiter character. In most cases, though, these
* are infrequent. To avoid overhead from calling CopySendData once per
* character, we dump out all characters between escaped characters in
* a single call. The loop invariant is that the data from "start" to
* "ptr" can be sent literally, but hasn't yet been.
* character, we dump out all characters between escaped characters in a
* single call. The loop invariant is that the data from "start" to "ptr"
* can be sent literally, but hasn't yet been.
*
* We can skip pg_encoding_mblen() overhead when encoding is safe, because
* in valid backend encodings, extra bytes of a multibyte character never
* look like ASCII. This loop is sufficiently performance-critical that
* it's worth making two copies of it to get the IS_HIGHBIT_SET() test
* out of the normal safe-encoding path.
* it's worth making two copies of it to get the IS_HIGHBIT_SET() test out
* of the normal safe-encoding path.
*/
if (cstate->encoding_embeds_ascii)
{
@ -3096,13 +3098,16 @@ CopyAttributeOutText(CopyState cstate, char *string)
{
DUMPSOFAR();
CopySendChar(cstate, '\\');
start = ptr++; /* we include char in next run */
start = ptr++; /* we include char in next run */
}
else if ((unsigned char) c < (unsigned char) 0x20)
{
switch (c)
{
/* \r and \n must be escaped, the others are traditional */
/*
* \r and \n must be escaped, the others are
* traditional
*/
case '\b':
case '\f':
case '\n':
@ -3134,13 +3139,16 @@ CopyAttributeOutText(CopyState cstate, char *string)
{
DUMPSOFAR();
CopySendChar(cstate, '\\');
start = ptr++; /* we include char in next run */
start = ptr++; /* we include char in next run */
}
else if ((unsigned char) c < (unsigned char) 0x20)
{
switch (c)
{
/* \r and \n must be escaped, the others are traditional */
/*
* \r and \n must be escaped, the others are
* traditional
*/
case '\b':
case '\f':
case '\n':

View File

@ -13,7 +13,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/dbcommands.c,v 1.202 2007/10/16 11:30:16 mha Exp $
* $PostgreSQL: pgsql/src/backend/commands/dbcommands.c,v 1.203 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -260,17 +260,17 @@ createdb(const CreatedbStmt *stmt)
* Check whether encoding matches server locale settings. We allow
* mismatch in three cases:
*
* 1. ctype_encoding = SQL_ASCII, which means either that the locale
* is C/POSIX which works with any encoding, or that we couldn't determine
* 1. ctype_encoding = SQL_ASCII, which means either that the locale is
* C/POSIX which works with any encoding, or that we couldn't determine
* the locale's encoding and have to trust the user to get it right.
*
* 2. selected encoding is SQL_ASCII, but only if you're a superuser.
* This is risky but we have historically allowed it --- notably, the
* 2. selected encoding is SQL_ASCII, but only if you're a superuser. This
* is risky but we have historically allowed it --- notably, the
* regression tests require it.
*
* 3. selected encoding is UTF8 and platform is win32. This is because
* UTF8 is a pseudo codepage that is supported in all locales since
* it's converted to UTF16 before being used.
* UTF8 is a pseudo codepage that is supported in all locales since it's
* converted to UTF16 before being used.
*
* Note: if you change this policy, fix initdb to match.
*/
@ -286,8 +286,8 @@ createdb(const CreatedbStmt *stmt)
(errmsg("encoding %s does not match server's locale %s",
pg_encoding_to_char(encoding),
setlocale(LC_CTYPE, NULL)),
errdetail("The server's LC_CTYPE setting requires encoding %s.",
pg_encoding_to_char(ctype_encoding))));
errdetail("The server's LC_CTYPE setting requires encoding %s.",
pg_encoding_to_char(ctype_encoding))));
/* Resolve default tablespace for new database */
if (dtablespacename && dtablespacename->arg)
@ -313,7 +313,7 @@ createdb(const CreatedbStmt *stmt)
if (dst_deftablespace == GLOBALTABLESPACE_OID)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("pg_global cannot be used as default tablespace")));
errmsg("pg_global cannot be used as default tablespace")));
/*
* If we are trying to change the default tablespace of the template,
@ -375,12 +375,12 @@ createdb(const CreatedbStmt *stmt)
if (CheckOtherDBBackends(src_dboid))
ereport(ERROR,
(errcode(ERRCODE_OBJECT_IN_USE),
errmsg("source database \"%s\" is being accessed by other users",
dbtemplate)));
errmsg("source database \"%s\" is being accessed by other users",
dbtemplate)));
/*
* Select an OID for the new database, checking that it doesn't have
* a filename conflict with anything already existing in the tablespace
* Select an OID for the new database, checking that it doesn't have a
* filename conflict with anything already existing in the tablespace
* directories.
*/
pg_database_rel = heap_open(DatabaseRelationId, RowExclusiveLock);
@ -558,9 +558,9 @@ createdb(const CreatedbStmt *stmt)
/*
* Set flag to update flat database file at commit. Note: this also
* forces synchronous commit, which minimizes the window between
* creation of the database files and commital of the transaction.
* If we crash before committing, we'll have a DB that's taking up
* disk space but is not in pg_database, which is not good.
* creation of the database files and commital of the transaction. If
* we crash before committing, we'll have a DB that's taking up disk
* space but is not in pg_database, which is not good.
*/
database_file_update_needed();
}
@ -721,10 +721,10 @@ dropdb(const char *dbname, bool missing_ok)
/*
* Set flag to update flat database file at commit. Note: this also
* forces synchronous commit, which minimizes the window between
* removal of the database files and commital of the transaction.
* If we crash before committing, we'll have a DB that's gone on disk
* but still there according to pg_database, which is not good.
* forces synchronous commit, which minimizes the window between removal
* of the database files and commital of the transaction. If we crash
* before committing, we'll have a DB that's gone on disk but still there
* according to pg_database, which is not good.
*/
database_file_update_needed();
}

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/discard.c,v 1.1 2007/04/26 16:13:10 neilc Exp $
* $PostgreSQL: pgsql/src/backend/commands/discard.c,v 1.2 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -28,7 +28,7 @@ static void DiscardAll(bool isTopLevel);
* DISCARD { ALL | TEMP | PLANS }
*/
void
DiscardCommand(DiscardStmt *stmt, bool isTopLevel)
DiscardCommand(DiscardStmt * stmt, bool isTopLevel)
{
switch (stmt->target)
{
@ -54,10 +54,10 @@ DiscardAll(bool isTopLevel)
{
/*
* Disallow DISCARD ALL in a transaction block. This is arguably
* inconsistent (we don't make a similar check in the command
* sequence that DISCARD ALL is equivalent to), but the idea is
* to catch mistakes: DISCARD ALL inside a transaction block
* would leave the transaction still uncommitted.
* inconsistent (we don't make a similar check in the command sequence
* that DISCARD ALL is equivalent to), but the idea is to catch mistakes:
* DISCARD ALL inside a transaction block would leave the transaction
* still uncommitted.
*/
PreventTransactionChain(isTopLevel, "DISCARD ALL");

View File

@ -7,7 +7,7 @@
* Portions Copyright (c) 1994-5, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/explain.c,v 1.165 2007/08/15 21:39:50 tgl Exp $
* $PostgreSQL: pgsql/src/backend/commands/explain.c,v 1.166 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -35,6 +35,7 @@
/* Hook for plugins to get control in ExplainOneQuery() */
ExplainOneQuery_hook_type ExplainOneQuery_hook = NULL;
/* Hook for plugins to get control in explain_get_index_name() */
explain_get_index_name_hook_type explain_get_index_name_hook = NULL;
@ -50,10 +51,10 @@ typedef struct ExplainState
} ExplainState;
static void ExplainOneQuery(Query *query, ExplainStmt *stmt,
const char *queryString,
ParamListInfo params, TupOutputState *tstate);
const char *queryString,
ParamListInfo params, TupOutputState *tstate);
static void report_triggers(ResultRelInfo *rInfo, bool show_relname,
StringInfo buf);
StringInfo buf);
static double elapsed_time(instr_time *starttime);
static void explain_outNode(StringInfo str,
Plan *plan, PlanState *planstate,
@ -90,14 +91,14 @@ ExplainQuery(ExplainStmt *stmt, const char *queryString,
getParamListTypes(params, &param_types, &num_params);
/*
* Run parse analysis and rewrite. Note this also acquires sufficient
* Run parse analysis and rewrite. Note this also acquires sufficient
* locks on the source table(s).
*
* Because the parser and planner tend to scribble on their input, we
* make a preliminary copy of the source querytree. This prevents
* problems in the case that the EXPLAIN is in a portal or plpgsql
* function and is executed repeatedly. (See also the same hack in
* DECLARE CURSOR and PREPARE.) XXX FIXME someday.
* Because the parser and planner tend to scribble on their input, we make
* a preliminary copy of the source querytree. This prevents problems in
* the case that the EXPLAIN is in a portal or plpgsql function and is
* executed repeatedly. (See also the same hack in DECLARE CURSOR and
* PREPARE.) XXX FIXME someday.
*/
rewritten = pg_analyze_and_rewrite((Node *) copyObject(stmt->query),
queryString, param_types, num_params);
@ -215,7 +216,7 @@ ExplainOneUtility(Node *utilityStmt, ExplainStmt *stmt,
* to call it.
*/
void
ExplainOnePlan(PlannedStmt *plannedstmt, ParamListInfo params,
ExplainOnePlan(PlannedStmt * plannedstmt, ParamListInfo params,
ExplainStmt *stmt, TupOutputState *tstate)
{
QueryDesc *queryDesc;
@ -376,8 +377,8 @@ report_triggers(ResultRelInfo *rInfo, bool show_relname, StringInfo buf)
InstrEndLoop(instr);
/*
* We ignore triggers that were never invoked; they likely
* aren't relevant to the current query type.
* We ignore triggers that were never invoked; they likely aren't
* relevant to the current query type.
*/
if (instr->ntuples == 0)
continue;
@ -624,7 +625,7 @@ explain_outNode(StringInfo str,
if (ScanDirectionIsBackward(((IndexScan *) plan)->indexorderdir))
appendStringInfoString(str, " Backward");
appendStringInfo(str, " using %s",
explain_get_index_name(((IndexScan *) plan)->indexid));
explain_get_index_name(((IndexScan *) plan)->indexid));
/* FALL THRU */
case T_SeqScan:
case T_BitmapHeapScan:
@ -1137,7 +1138,7 @@ show_sort_keys(Plan *sortplan, int nkeys, AttrNumber *keycols,
/* Set up deparsing context */
context = deparse_context_for_plan((Node *) outerPlan(sortplan),
NULL, /* Sort has no innerPlan */
NULL, /* Sort has no innerPlan */
es->rtable);
useprefix = list_length(es->rtable) > 1;
@ -1192,7 +1193,7 @@ show_sort_info(SortState *sortstate,
static const char *
explain_get_index_name(Oid indexId)
{
const char *result;
const char *result;
if (explain_get_index_name_hook)
result = (*explain_get_index_name_hook) (indexId);

View File

@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/functioncmds.c,v 1.86 2007/11/11 19:22:48 tgl Exp $
* $PostgreSQL: pgsql/src/backend/commands/functioncmds.c,v 1.87 2007/11/15 21:14:33 momjian Exp $
*
* DESCRIPTION
* These routines take the parse tree and pick out the
@ -56,7 +56,7 @@
static void AlterFunctionOwner_internal(Relation rel, HeapTuple tup,
Oid newOwnerId);
Oid newOwnerId);
/*
@ -121,8 +121,8 @@ compute_return_type(TypeName *returnType, Oid languageOid,
if (returnType->typmods != NIL)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("type modifier cannot be specified for shell type \"%s\"",
typnam)));
errmsg("type modifier cannot be specified for shell type \"%s\"",
typnam)));
/* Otherwise, go ahead and make a shell type */
ereport(NOTICE,
@ -285,7 +285,7 @@ examine_parameter_list(List *parameters, Oid languageOid,
* FUNCTION and ALTER FUNCTION and return it via one of the out
* parameters. Returns true if the passed option was recognized. If
* the out parameter we were going to assign to points to non-NULL,
* raise a duplicate-clause error. (We don't try to detect duplicate
* raise a duplicate-clause error. (We don't try to detect duplicate
* SET parameters though --- if you're redundant, the last one wins.)
*/
static bool
@ -390,7 +390,7 @@ update_proconfig_value(ArrayType *a, List *set_items)
if (valuestr)
a = GUCArrayAdd(a, sstmt->name, valuestr);
else /* RESET */
else /* RESET */
a = GUCArrayDelete(a, sstmt->name);
}
}
@ -1598,9 +1598,9 @@ DropCast(DropCastStmt *stmt)
TypeNameToString(stmt->targettype))));
else
ereport(NOTICE,
(errmsg("cast from type %s to type %s does not exist, skipping",
TypeNameToString(stmt->sourcetype),
TypeNameToString(stmt->targettype))));
(errmsg("cast from type %s to type %s does not exist, skipping",
TypeNameToString(stmt->sourcetype),
TypeNameToString(stmt->targettype))));
return;
}

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/indexcmds.c,v 1.166 2007/09/20 17:56:31 tgl Exp $
* $PostgreSQL: pgsql/src/backend/commands/indexcmds.c,v 1.167 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -396,10 +396,9 @@ DefineIndex(RangeVar *heapRelation,
}
/*
* Parse AM-specific options, convert to text array form,
* validate. The src_options introduced due to using indexes
* via the "CREATE LIKE INCLUDING INDEXES" statement also need to
* be merged here
* Parse AM-specific options, convert to text array form, validate. The
* src_options introduced due to using indexes via the "CREATE LIKE
* INCLUDING INDEXES" statement also need to be merged here
*/
if (src_options)
reloptions = unflatten_reloptions(src_options);
@ -452,7 +451,7 @@ DefineIndex(RangeVar *heapRelation,
{
indexRelationId =
index_create(relationId, indexRelationName, indexRelationId,
indexInfo, accessMethodId, tablespaceId, classObjectId,
indexInfo, accessMethodId, tablespaceId, classObjectId,
coloptions, reloptions, primary, isconstraint,
allowSystemTableMods, skip_build, concurrent);
@ -461,18 +460,18 @@ DefineIndex(RangeVar *heapRelation,
/*
* For a concurrent build, we next insert the catalog entry and add
* constraints. We don't build the index just yet; we must first make
* the catalog entry so that the new index is visible to updating
* constraints. We don't build the index just yet; we must first make the
* catalog entry so that the new index is visible to updating
* transactions. That will prevent them from making incompatible HOT
* updates. The new index will be marked not indisready and not
* indisvalid, so that no one else tries to either insert into it or use
* it for queries. We pass skip_build = true to prevent the build.
* it for queries. We pass skip_build = true to prevent the build.
*/
indexRelationId =
index_create(relationId, indexRelationName, indexRelationId,
indexInfo, accessMethodId, tablespaceId, classObjectId,
coloptions, reloptions, primary, isconstraint,
allowSystemTableMods, true, concurrent);
allowSystemTableMods, true, concurrent);
/*
* We must commit our current transaction so that the index becomes
@ -506,15 +505,15 @@ DefineIndex(RangeVar *heapRelation,
* xacts that open the table for writing after this point; they will see
* the new index when they open it.
*
* Note: the reason we use actual lock acquisition here, rather than
* just checking the ProcArray and sleeping, is that deadlock is possible
* if one of the transactions in question is blocked trying to acquire
* an exclusive lock on our table. The lock code will detect deadlock
* and error out properly.
* Note: the reason we use actual lock acquisition here, rather than just
* checking the ProcArray and sleeping, is that deadlock is possible if
* one of the transactions in question is blocked trying to acquire an
* exclusive lock on our table. The lock code will detect deadlock and
* error out properly.
*
* Note: GetLockConflicts() never reports our own xid, hence we need not
* check for that. Also, prepared xacts are not reported, which is
* fine since they certainly aren't going to do anything more.
* check for that. Also, prepared xacts are not reported, which is fine
* since they certainly aren't going to do anything more.
*/
old_lockholders = GetLockConflicts(&heaplocktag, ShareLock);
@ -530,15 +529,15 @@ DefineIndex(RangeVar *heapRelation,
* indexes. We have waited out all the existing transactions and any new
* transaction will have the new index in its list, but the index is still
* marked as "not-ready-for-inserts". The index is consulted while
* deciding HOT-safety though. This arrangement ensures that no new HOT
* deciding HOT-safety though. This arrangement ensures that no new HOT
* chains can be created where the new tuple and the old tuple in the
* chain have different index keys.
*
* We now take a new snapshot, and build the index using all tuples that
* are visible in this snapshot. We can be sure that any HOT updates
* to these tuples will be compatible with the index, since any updates
* made by transactions that didn't know about the index are now committed
* or rolled back. Thus, each visible tuple is either the end of its
* are visible in this snapshot. We can be sure that any HOT updates to
* these tuples will be compatible with the index, since any updates made
* by transactions that didn't know about the index are now committed or
* rolled back. Thus, each visible tuple is either the end of its
* HOT-chain or the extension of the chain is HOT-safe for this index.
*/
@ -565,10 +564,9 @@ DefineIndex(RangeVar *heapRelation,
index_close(indexRelation, NoLock);
/*
* Update the pg_index row to mark the index as ready for inserts.
* Once we commit this transaction, any new transactions that
* open the table must insert new entries into the index for insertions
* and non-HOT updates.
* Update the pg_index row to mark the index as ready for inserts. Once we
* commit this transaction, any new transactions that open the table must
* insert new entries into the index for insertions and non-HOT updates.
*/
pg_index = heap_open(IndexRelationId, RowExclusiveLock);
@ -611,8 +609,8 @@ DefineIndex(RangeVar *heapRelation,
/*
* Now take the "reference snapshot" that will be used by validate_index()
* to filter candidate tuples. Beware! There might still be snapshots
* in use that treat some transaction as in-progress that our reference
* to filter candidate tuples. Beware! There might still be snapshots in
* use that treat some transaction as in-progress that our reference
* snapshot treats as committed. If such a recently-committed transaction
* deleted tuples in the table, we will not include them in the index; yet
* those transactions which see the deleting one as still-in-progress will
@ -636,15 +634,15 @@ DefineIndex(RangeVar *heapRelation,
* The index is now valid in the sense that it contains all currently
* interesting tuples. But since it might not contain tuples deleted just
* before the reference snap was taken, we have to wait out any
* transactions that might have older snapshots. Obtain a list of
* VXIDs of such transactions, and wait for them individually.
* transactions that might have older snapshots. Obtain a list of VXIDs
* of such transactions, and wait for them individually.
*
* We can exclude any running transactions that have xmin >= the xmax of
* our reference snapshot, since they are clearly not interested in any
* missing older tuples. Transactions in other DBs aren't a problem
* either, since they'll never even be able to see this index.
* Also, GetCurrentVirtualXIDs never reports our own vxid, so we
* need not check for that.
* either, since they'll never even be able to see this index. Also,
* GetCurrentVirtualXIDs never reports our own vxid, so we need not check
* for that.
*/
old_snapshots = GetCurrentVirtualXIDs(ActiveSnapshot->xmax, false);
@ -681,8 +679,8 @@ DefineIndex(RangeVar *heapRelation,
* relcache entries for the index itself, but we should also send a
* relcache inval on the parent table to force replanning of cached plans.
* Otherwise existing sessions might fail to use the new index where it
* would be useful. (Note that our earlier commits did not create
* reasons to replan; relcache flush on the index itself was sufficient.)
* would be useful. (Note that our earlier commits did not create reasons
* to replan; relcache flush on the index itself was sufficient.)
*/
CacheInvalidateRelcacheByRelid(heaprelid.relId);
@ -837,9 +835,9 @@ ComputeIndexAttrs(IndexInfo *indexInfo,
accessMethodId);
/*
* Set up the per-column options (indoption field). For now, this
* is zero for any un-ordered index, while ordered indexes have DESC
* and NULLS FIRST/LAST options.
* Set up the per-column options (indoption field). For now, this is
* zero for any un-ordered index, while ordered indexes have DESC and
* NULLS FIRST/LAST options.
*/
colOptionP[attn] = 0;
if (amcanorder)

View File

@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/opclasscmds.c,v 1.55 2007/11/11 19:22:48 tgl Exp $
* $PostgreSQL: pgsql/src/backend/commands/opclasscmds.c,v 1.56 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -52,33 +52,33 @@ typedef struct
Oid lefttype; /* lefttype */
Oid righttype; /* righttype */
bool recheck; /* oper recheck flag (unused for proc) */
} OpFamilyMember;
} OpFamilyMember;
static void AlterOpFamilyAdd(List *opfamilyname, Oid amoid, Oid opfamilyoid,
int maxOpNumber, int maxProcNumber,
List *items);
static void AlterOpFamilyDrop(List *opfamilyname, Oid amoid, Oid opfamilyoid,
int maxOpNumber, int maxProcNumber,
List *items);
int maxOpNumber, int maxProcNumber,
List *items);
static void processTypesSpec(List *args, Oid *lefttype, Oid *righttype);
static void assignOperTypes(OpFamilyMember *member, Oid amoid, Oid typeoid);
static void assignProcTypes(OpFamilyMember *member, Oid amoid, Oid typeoid);
static void addFamilyMember(List **list, OpFamilyMember *member, bool isProc);
static void assignOperTypes(OpFamilyMember * member, Oid amoid, Oid typeoid);
static void assignProcTypes(OpFamilyMember * member, Oid amoid, Oid typeoid);
static void addFamilyMember(List **list, OpFamilyMember * member, bool isProc);
static void storeOperators(List *opfamilyname, Oid amoid,
Oid opfamilyoid, Oid opclassoid,
List *operators, bool isAdd);
Oid opfamilyoid, Oid opclassoid,
List *operators, bool isAdd);
static void storeProcedures(List *opfamilyname, Oid amoid,
Oid opfamilyoid, Oid opclassoid,
List *procedures, bool isAdd);
Oid opfamilyoid, Oid opclassoid,
List *procedures, bool isAdd);
static void dropOperators(List *opfamilyname, Oid amoid, Oid opfamilyoid,
List *operators);
List *operators);
static void dropProcedures(List *opfamilyname, Oid amoid, Oid opfamilyoid,
List *procedures);
List *procedures);
static void AlterOpClassOwner_internal(Relation rel, HeapTuple tuple,
Oid newOwnerId);
static void AlterOpFamilyOwner_internal(Relation rel, HeapTuple tuple,
Oid newOwnerId);
Oid newOwnerId);
/*
@ -111,7 +111,7 @@ OpFamilyCacheLookup(Oid amID, List *opfamilyname)
else
{
/* Unqualified opfamily name, so search the search path */
Oid opfID = OpfamilynameGetOpfid(amID, opfname);
Oid opfID = OpfamilynameGetOpfid(amID, opfname);
if (!OidIsValid(opfID))
return NULL;
@ -151,7 +151,7 @@ OpClassCacheLookup(Oid amID, List *opclassname)
else
{
/* Unqualified opclass name, so search the search path */
Oid opcID = OpclassnameGetOpcid(amID, opcname);
Oid opcID = OpclassnameGetOpcid(amID, opcname);
if (!OidIsValid(opcID))
return NULL;
@ -348,8 +348,9 @@ DefineOpClass(CreateOpClassStmt *stmt)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("operator family \"%s\" does not exist for access method \"%s\"",
NameListToString(stmt->opfamilyname), stmt->amname)));
NameListToString(stmt->opfamilyname), stmt->amname)));
opfamilyoid = HeapTupleGetOid(tup);
/*
* XXX given the superuser check above, there's no need for an
* ownership check here
@ -367,6 +368,7 @@ DefineOpClass(CreateOpClassStmt *stmt)
if (HeapTupleIsValid(tup))
{
opfamilyoid = HeapTupleGetOid(tup);
/*
* XXX given the superuser check above, there's no need for an
* ownership check here
@ -597,7 +599,7 @@ DefineOpClass(CreateOpClassStmt *stmt)
opclassoid, procedures, false);
/*
* Create dependencies for the opclass proper. Note: we do not create a
* Create dependencies for the opclass proper. Note: we do not create a
* dependency link to the AM, because we don't currently support DROP
* ACCESS METHOD.
*/
@ -644,7 +646,7 @@ DefineOpClass(CreateOpClassStmt *stmt)
* Define a new index operator family.
*/
void
DefineOpFamily(CreateOpFamilyStmt *stmt)
DefineOpFamily(CreateOpFamilyStmt * stmt)
{
char *opfname; /* name of opfamily we're creating */
Oid amoid, /* our AM's oid */
@ -686,8 +688,8 @@ DefineOpFamily(CreateOpFamilyStmt *stmt)
ReleaseSysCache(tup);
/*
* Currently, we require superuser privileges to create an opfamily.
* See comments in DefineOpClass.
* Currently, we require superuser privileges to create an opfamily. See
* comments in DefineOpClass.
*
* XXX re-enable NOT_USED code sections below if you remove this test.
*/
@ -763,7 +765,7 @@ DefineOpFamily(CreateOpFamilyStmt *stmt)
* different code paths.
*/
void
AlterOpFamily(AlterOpFamilyStmt *stmt)
AlterOpFamily(AlterOpFamilyStmt * stmt)
{
Oid amoid, /* our AM's oid */
opfamilyoid; /* oid of opfamily */
@ -876,7 +878,7 @@ AlterOpFamilyAdd(List *opfamilyname, Oid amoid, Oid opfamilyoid,
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("operator argument types must be specified in ALTER OPERATOR FAMILY")));
operOid = InvalidOid; /* keep compiler quiet */
operOid = InvalidOid; /* keep compiler quiet */
}
#ifdef NOT_USED
@ -932,7 +934,7 @@ AlterOpFamilyAdd(List *opfamilyname, Oid amoid, Oid opfamilyoid,
case OPCLASS_ITEM_STORAGETYPE:
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("STORAGE cannot be specified in ALTER OPERATOR FAMILY")));
errmsg("STORAGE cannot be specified in ALTER OPERATOR FAMILY")));
break;
default:
elog(ERROR, "unrecognized item type: %d", item->itemtype);
@ -1057,7 +1059,7 @@ processTypesSpec(List *args, Oid *lefttype, Oid *righttype)
* and do any validity checking we can manage.
*/
static void
assignOperTypes(OpFamilyMember *member, Oid amoid, Oid typeoid)
assignOperTypes(OpFamilyMember * member, Oid amoid, Oid typeoid)
{
Operator optup;
Form_pg_operator opform;
@ -1098,7 +1100,7 @@ assignOperTypes(OpFamilyMember *member, Oid amoid, Oid typeoid)
* and do any validity checking we can manage.
*/
static void
assignProcTypes(OpFamilyMember *member, Oid amoid, Oid typeoid)
assignProcTypes(OpFamilyMember * member, Oid amoid, Oid typeoid)
{
HeapTuple proctup;
Form_pg_proc procform;
@ -1156,10 +1158,10 @@ assignProcTypes(OpFamilyMember *member, Oid amoid, Oid typeoid)
else
{
/*
* The default for GiST and GIN in CREATE OPERATOR CLASS is to use
* the class' opcintype as lefttype and righttype. In CREATE or
* ALTER OPERATOR FAMILY, opcintype isn't available, so make the
* user specify the types.
* The default for GiST and GIN in CREATE OPERATOR CLASS is to use the
* class' opcintype as lefttype and righttype. In CREATE or ALTER
* OPERATOR FAMILY, opcintype isn't available, so make the user
* specify the types.
*/
if (!OidIsValid(member->lefttype))
member->lefttype = typeoid;
@ -1179,7 +1181,7 @@ assignProcTypes(OpFamilyMember *member, Oid amoid, Oid typeoid)
* duplicated strategy or proc number.
*/
static void
addFamilyMember(List **list, OpFamilyMember *member, bool isProc)
addFamilyMember(List **list, OpFamilyMember * member, bool isProc)
{
ListCell *l;
@ -1560,7 +1562,7 @@ RemoveOpClass(RemoveOpClassStmt *stmt)
* Deletes an opfamily.
*/
void
RemoveOpFamily(RemoveOpFamilyStmt *stmt)
RemoveOpFamily(RemoveOpFamilyStmt * stmt)
{
Oid amID,
opfID;
@ -1589,11 +1591,11 @@ RemoveOpFamily(RemoveOpFamilyStmt *stmt)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("operator family \"%s\" does not exist for access method \"%s\"",
NameListToString(stmt->opfamilyname), stmt->amname)));
NameListToString(stmt->opfamilyname), stmt->amname)));
else
ereport(NOTICE,
(errmsg("operator family \"%s\" does not exist for access method \"%s\"",
NameListToString(stmt->opfamilyname), stmt->amname)));
NameListToString(stmt->opfamilyname), stmt->amname)));
return;
}
@ -2120,7 +2122,7 @@ AlterOpFamilyOwner(List *name, const char *access_method, Oid newOwnerId)
}
/*
* The first parameter is pg_opfamily, opened and suitably locked. The second
* The first parameter is pg_opfamily, opened and suitably locked. The second
* parameter is a copy of the tuple from pg_opfamily we want to modify.
*/
static void

View File

@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/operatorcmds.c,v 1.37 2007/11/11 19:22:48 tgl Exp $
* $PostgreSQL: pgsql/src/backend/commands/operatorcmds.c,v 1.38 2007/11/15 21:14:33 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@ -65,7 +65,7 @@ DefineOperator(List *names, List *parameters)
Oid oprNamespace;
AclResult aclresult;
bool canMerge = false; /* operator merges */
bool canHash = false; /* operator hashes */
bool canHash = false; /* operator hashes */
List *functionName = NIL; /* function for operator */
TypeName *typeName1 = NULL; /* first type name */
TypeName *typeName2 = NULL; /* second type name */

View File

@ -14,7 +14,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/portalcmds.c,v 1.66 2007/10/24 23:27:08 tgl Exp $
* $PostgreSQL: pgsql/src/backend/commands/portalcmds.c,v 1.67 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -39,7 +39,7 @@
* utilityStmt field is set.
*/
void
PerformCursorOpen(PlannedStmt *stmt, ParamListInfo params,
PerformCursorOpen(PlannedStmt * stmt, ParamListInfo params,
const char *queryString, bool isTopLevel)
{
DeclareCursorStmt *cstmt = (DeclareCursorStmt *) stmt->utilityStmt;
@ -102,7 +102,7 @@ PerformCursorOpen(PlannedStmt *stmt, ParamListInfo params,
*
* If the user didn't specify a SCROLL type, allow or disallow scrolling
* based on whether it would require any additional runtime overhead to do
* so. Also, we disallow scrolling for FOR UPDATE cursors.
* so. Also, we disallow scrolling for FOR UPDATE cursors.
*/
portal->cursorOptions = cstmt->options;
if (!(portal->cursorOptions & (CURSOR_OPT_SCROLL | CURSOR_OPT_NO_SCROLL)))
@ -369,8 +369,8 @@ PersistHoldablePortal(Portal portal)
* to be at, but the tuplestore API doesn't support that. So we start
* at the beginning of the tuplestore and iterate through it until we
* reach where we need to be. FIXME someday? (Fortunately, the
* typical case is that we're supposed to be at or near the start
* of the result set, so this isn't as bad as it sounds.)
* typical case is that we're supposed to be at or near the start of
* the result set, so this isn't as bad as it sounds.)
*/
MemoryContextSwitchTo(portal->holdContext);
@ -378,7 +378,7 @@ PersistHoldablePortal(Portal portal)
{
/* we can handle this case even if posOverflow */
while (tuplestore_advance(portal->holdStore, true))
/* continue */ ;
/* continue */ ;
}
else
{

View File

@ -10,7 +10,7 @@
* Copyright (c) 2002-2007, PostgreSQL Global Development Group
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/prepare.c,v 1.78 2007/11/11 19:22:48 tgl Exp $
* $PostgreSQL: pgsql/src/backend/commands/prepare.c,v 1.79 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -44,7 +44,7 @@ static HTAB *prepared_queries = NULL;
static void InitQueryHashTable(void);
static ParamListInfo EvaluateParams(PreparedStatement *pstmt, List *params,
const char *queryString, EState *estate);
const char *queryString, EState *estate);
static Datum build_regtype_array(Oid *param_types, int num_params);
/*
@ -101,8 +101,8 @@ PrepareQuery(PrepareStmt *stmt, const char *queryString)
* passed in from above us will not be visible to it), allowing
* information about unknown parameters to be deduced from context.
*
* Because parse analysis scribbles on the raw querytree, we must make
* a copy to ensure we have a pristine raw tree to cache. FIXME someday.
* Because parse analysis scribbles on the raw querytree, we must make a
* copy to ensure we have a pristine raw tree to cache. FIXME someday.
*/
query = parse_analyze_varparams((Node *) copyObject(stmt->query),
queryString,
@ -155,7 +155,7 @@ PrepareQuery(PrepareStmt *stmt, const char *queryString)
CreateCommandTag((Node *) query),
argtypes,
nargs,
0, /* default cursor options */
0, /* default cursor options */
plan_list,
true);
}
@ -299,8 +299,8 @@ EvaluateParams(PreparedStatement *pstmt, List *params,
if (nparams != num_params)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("wrong number of parameters for prepared statement \"%s\"",
pstmt->stmt_name),
errmsg("wrong number of parameters for prepared statement \"%s\"",
pstmt->stmt_name),
errdetail("Expected %d parameters but got %d.",
num_params, nparams)));
@ -309,8 +309,8 @@ EvaluateParams(PreparedStatement *pstmt, List *params,
return NULL;
/*
* We have to run parse analysis for the expressions. Since the
* parser is not cool about scribbling on its input, copy first.
* We have to run parse analysis for the expressions. Since the parser is
* not cool about scribbling on its input, copy first.
*/
params = (List *) copyObject(params);
@ -334,7 +334,7 @@ EvaluateParams(PreparedStatement *pstmt, List *params,
if (pstate->p_hasAggs)
ereport(ERROR,
(errcode(ERRCODE_GROUPING_ERROR),
errmsg("cannot use aggregate function in EXECUTE parameter")));
errmsg("cannot use aggregate function in EXECUTE parameter")));
given_type_id = exprType(expr);
@ -350,7 +350,7 @@ EvaluateParams(PreparedStatement *pstmt, List *params,
i + 1,
format_type_be(given_type_id),
format_type_be(expected_type_id)),
errhint("You will need to rewrite or cast the expression.")));
errhint("You will need to rewrite or cast the expression.")));
lfirst(l) = expr;
i++;
@ -734,8 +734,8 @@ pg_prepared_statement(PG_FUNCTION_ARGS)
oldcontext = MemoryContextSwitchTo(per_query_ctx);
/*
* build tupdesc for result tuples. This must match the definition of
* the pg_prepared_statements view in system_views.sql
* build tupdesc for result tuples. This must match the definition of the
* pg_prepared_statements view in system_views.sql
*/
tupdesc = CreateTemplateTupleDesc(5, false);
TupleDescInitEntry(tupdesc, (AttrNumber) 1, "name",
@ -780,11 +780,11 @@ pg_prepared_statement(PG_FUNCTION_ARGS)
nulls[1] = true;
else
values[1] = DirectFunctionCall1(textin,
CStringGetDatum(prep_stmt->plansource->query_string));
CStringGetDatum(prep_stmt->plansource->query_string));
values[2] = TimestampTzGetDatum(prep_stmt->prepare_time);
values[3] = build_regtype_array(prep_stmt->plansource->param_types,
prep_stmt->plansource->num_params);
prep_stmt->plansource->num_params);
values[4] = BoolGetDatum(prep_stmt->from_sql);
tuple = heap_form_tuple(tupdesc, values, nulls);

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/schemacmds.c,v 1.46 2007/06/23 22:12:50 tgl Exp $
* $PostgreSQL: pgsql/src/backend/commands/schemacmds.c,v 1.47 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -111,17 +111,17 @@ CreateSchemaCommand(CreateSchemaStmt *stmt, const char *queryString)
/*
* Examine the list of commands embedded in the CREATE SCHEMA command, and
* reorganize them into a sequentially executable order with no forward
* references. Note that the result is still a list of raw parsetrees
* --- we cannot, in general, run parse analysis on one statement until
* we have actually executed the prior ones.
* references. Note that the result is still a list of raw parsetrees ---
* we cannot, in general, run parse analysis on one statement until we
* have actually executed the prior ones.
*/
parsetree_list = transformCreateSchemaStmt(stmt);
/*
* Execute each command contained in the CREATE SCHEMA. Since the
* grammar allows only utility commands in CREATE SCHEMA, there is
* no need to pass them through parse_analyze() or the rewriter;
* we can just hand them straight to ProcessUtility.
* Execute each command contained in the CREATE SCHEMA. Since the grammar
* allows only utility commands in CREATE SCHEMA, there is no need to pass
* them through parse_analyze() or the rewriter; we can just hand them
* straight to ProcessUtility.
*/
foreach(parsetree_item, parsetree_list)
{
@ -131,7 +131,7 @@ CreateSchemaCommand(CreateSchemaStmt *stmt, const char *queryString)
ProcessUtility(stmt,
queryString,
NULL,
false, /* not top level */
false, /* not top level */
None_Receiver,
NULL);
/* make sure later steps can see the object created here */

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/sequence.c,v 1.147 2007/10/25 18:54:03 tgl Exp $
* $PostgreSQL: pgsql/src/backend/commands/sequence.c,v 1.148 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -1145,8 +1145,8 @@ init_params(List *options, bool isInit,
snprintf(bufm, sizeof(bufm), INT64_FORMAT, new->max_value);
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("START value (%s) cannot be greater than MAXVALUE (%s)",
bufs, bufm)));
errmsg("START value (%s) cannot be greater than MAXVALUE (%s)",
bufs, bufm)));
}
/* CACHE */
@ -1221,7 +1221,7 @@ process_owned_by(Relation seqrel, List *owned_by)
if (seqrel->rd_rel->relowner != tablerel->rd_rel->relowner)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("sequence must have same owner as table it is linked to")));
errmsg("sequence must have same owner as table it is linked to")));
if (RelationGetNamespace(seqrel) != RelationGetNamespace(tablerel))
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/tablecmds.c,v 1.235 2007/11/11 19:22:48 tgl Exp $
* $PostgreSQL: pgsql/src/backend/commands/tablecmds.c,v 1.236 2007/11/15 21:14:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -169,7 +169,7 @@ static List *MergeAttributes(List *schema, List *supers, bool istemp,
static void MergeConstraintsIntoExisting(Relation child_rel, Relation parent_rel);
static void MergeAttributesIntoExisting(Relation child_rel, Relation parent_rel);
static void add_nonduplicate_constraint(Constraint *cdef,
ConstrCheck *check, int *ncheck);
ConstrCheck *check, int *ncheck);
static bool change_varattnos_walker(Node *node, const AttrNumber *newattno);
static void StoreCatalogInheritance(Oid relationId, List *supers);
static void StoreCatalogInheritance1(Oid relationId, Oid parentOid,
@ -256,7 +256,7 @@ static void ATExecSetRelOptions(Relation rel, List *defList, bool isReset);
static void ATExecEnableDisableTrigger(Relation rel, char *trigname,
char fires_when, bool skip_system);
static void ATExecEnableDisableRule(Relation rel, char *rulename,
char fires_when);
char fires_when);
static void ATExecAddInherit(Relation rel, RangeVar *parent);
static void ATExecDropInherit(Relation rel, RangeVar *parent);
static void copy_relation_data(Relation rel, SMgrRelation dst);
@ -395,6 +395,7 @@ DefineRelation(CreateStmt *stmt, char relkind)
if (cdef->contype == CONSTR_CHECK)
add_nonduplicate_constraint(cdef, check, &ncheck);
}
/*
* parse_utilcmd.c might have passed some precooked constraints too,
* due to LIKE tab INCLUDING CONSTRAINTS
@ -841,8 +842,8 @@ MergeAttributes(List *schema, List *supers, bool istemp,
if (list_member_oid(parentOids, RelationGetRelid(relation)))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_TABLE),
errmsg("relation \"%s\" would be inherited from more than once",
parent->relname)));
errmsg("relation \"%s\" would be inherited from more than once",
parent->relname)));
parentOids = lappend_oid(parentOids, RelationGetRelid(relation));
@ -888,8 +889,8 @@ MergeAttributes(List *schema, List *supers, bool istemp,
exist_attno = findAttrByName(attributeName, inhSchema);
if (exist_attno > 0)
{
Oid defTypeId;
int32 deftypmod;
Oid defTypeId;
int32 deftypmod;
/*
* Yes, try to merge the two column definitions. They must
@ -1032,8 +1033,10 @@ MergeAttributes(List *schema, List *supers, bool istemp,
if (exist_attno > 0)
{
ColumnDef *def;
Oid defTypeId, newTypeId;
int32 deftypmod, newtypmod;
Oid defTypeId,
newTypeId;
int32 deftypmod,
newtypmod;
/*
* Yes, try to merge the two column definitions. They must
@ -1632,8 +1635,8 @@ renamerel(Oid myrelid, const char *newrelname, ObjectType reltype)
bool relhastriggers;
/*
* Grab an exclusive lock on the target table, index, sequence or
* view, which we will NOT release until end of transaction.
* Grab an exclusive lock on the target table, index, sequence or view,
* which we will NOT release until end of transaction.
*/
targetrelation = relation_open(myrelid, AccessExclusiveLock);
@ -1647,9 +1650,8 @@ renamerel(Oid myrelid, const char *newrelname, ObjectType reltype)
RelationGetRelationName(targetrelation))));
/*
* For compatibility with prior releases, we don't complain if
* ALTER TABLE or ALTER INDEX is used to rename a sequence or
* view.
* For compatibility with prior releases, we don't complain if ALTER TABLE
* or ALTER INDEX is used to rename a sequence or view.
*/
relkind = targetrelation->rd_rel->relkind;
if (reltype == OBJECT_SEQUENCE && relkind != 'S')
@ -1746,19 +1748,19 @@ renamerel(Oid myrelid, const char *newrelname, ObjectType reltype)
void
AlterTable(AlterTableStmt *stmt)
{
Relation rel = relation_openrv(stmt->relation, AccessExclusiveLock);
Relation rel = relation_openrv(stmt->relation, AccessExclusiveLock);
int expected_refcnt;
/*
* Disallow ALTER TABLE when the current backend has any open reference
* to it besides the one we just got (such as an open cursor or active
* plan); our AccessExclusiveLock doesn't protect us against stomping on
* our own foot, only other people's feet!
* Disallow ALTER TABLE when the current backend has any open reference to
* it besides the one we just got (such as an open cursor or active plan);
* our AccessExclusiveLock doesn't protect us against stomping on our own
* foot, only other people's feet!
*
* Note: the only case known to cause serious trouble is ALTER COLUMN TYPE,
* and some changes are obviously pretty benign, so this could possibly
* be relaxed to only error out for certain types of alterations. But
* the use-case for allowing any of these things is not obvious, so we
* Note: the only case known to cause serious trouble is ALTER COLUMN
* TYPE, and some changes are obviously pretty benign, so this could
* possibly be relaxed to only error out for certain types of alterations.
* But the use-case for allowing any of these things is not obvious, so we
* won't work hard at it for now.
*/
expected_refcnt = rel->rd_isnailed ? 2 : 1;
@ -1784,7 +1786,7 @@ AlterTable(AlterTableStmt *stmt)
void
AlterTableInternal(Oid relid, List *cmds, bool recurse)
{
Relation rel = relation_open(relid, AccessExclusiveLock);
Relation rel = relation_open(relid, AccessExclusiveLock);
ATController(rel, cmds, recurse);
}
@ -2153,54 +2155,54 @@ ATExecCmd(AlteredTableInfo *tab, Relation rel, AlterTableCmd *cmd)
ATExecSetRelOptions(rel, (List *) cmd->def, true);
break;
case AT_EnableTrig: /* ENABLE TRIGGER name */
ATExecEnableDisableTrigger(rel, cmd->name,
TRIGGER_FIRES_ON_ORIGIN, false);
case AT_EnableTrig: /* ENABLE TRIGGER name */
ATExecEnableDisableTrigger(rel, cmd->name,
TRIGGER_FIRES_ON_ORIGIN, false);
break;
case AT_EnableAlwaysTrig: /* ENABLE ALWAYS TRIGGER name */
ATExecEnableDisableTrigger(rel, cmd->name,
TRIGGER_FIRES_ALWAYS, false);
case AT_EnableAlwaysTrig: /* ENABLE ALWAYS TRIGGER name */
ATExecEnableDisableTrigger(rel, cmd->name,
TRIGGER_FIRES_ALWAYS, false);
break;
case AT_EnableReplicaTrig: /* ENABLE REPLICA TRIGGER name */
ATExecEnableDisableTrigger(rel, cmd->name,
TRIGGER_FIRES_ON_REPLICA, false);
case AT_EnableReplicaTrig: /* ENABLE REPLICA TRIGGER name */
ATExecEnableDisableTrigger(rel, cmd->name,
TRIGGER_FIRES_ON_REPLICA, false);
break;
case AT_DisableTrig: /* DISABLE TRIGGER name */
ATExecEnableDisableTrigger(rel, cmd->name,
TRIGGER_DISABLED, false);
ATExecEnableDisableTrigger(rel, cmd->name,
TRIGGER_DISABLED, false);
break;
case AT_EnableTrigAll: /* ENABLE TRIGGER ALL */
ATExecEnableDisableTrigger(rel, NULL,
TRIGGER_FIRES_ON_ORIGIN, false);
ATExecEnableDisableTrigger(rel, NULL,
TRIGGER_FIRES_ON_ORIGIN, false);
break;
case AT_DisableTrigAll: /* DISABLE TRIGGER ALL */
ATExecEnableDisableTrigger(rel, NULL,
TRIGGER_DISABLED, false);
ATExecEnableDisableTrigger(rel, NULL,
TRIGGER_DISABLED, false);
break;
case AT_EnableTrigUser: /* ENABLE TRIGGER USER */
ATExecEnableDisableTrigger(rel, NULL,
TRIGGER_FIRES_ON_ORIGIN, true);
ATExecEnableDisableTrigger(rel, NULL,
TRIGGER_FIRES_ON_ORIGIN, true);
break;
case AT_DisableTrigUser: /* DISABLE TRIGGER USER */
ATExecEnableDisableTrigger(rel, NULL,
TRIGGER_DISABLED, true);
ATExecEnableDisableTrigger(rel, NULL,
TRIGGER_DISABLED, true);
break;
case AT_EnableRule: /* ENABLE RULE name */
ATExecEnableDisableRule(rel, cmd->name,
RULE_FIRES_ON_ORIGIN);
case AT_EnableRule: /* ENABLE RULE name */
ATExecEnableDisableRule(rel, cmd->name,
RULE_FIRES_ON_ORIGIN);
break;
case AT_EnableAlwaysRule: /* ENABLE ALWAYS RULE name */
ATExecEnableDisableRule(rel, cmd->name,
RULE_FIRES_ALWAYS);
case AT_EnableAlwaysRule: /* ENABLE ALWAYS RULE name */
ATExecEnableDisableRule(rel, cmd->name,
RULE_FIRES_ALWAYS);
break;
case AT_EnableReplicaRule: /* ENABLE REPLICA RULE name */
ATExecEnableDisableRule(rel, cmd->name,
RULE_FIRES_ON_REPLICA);
case AT_EnableReplicaRule: /* ENABLE REPLICA RULE name */
ATExecEnableDisableRule(rel, cmd->name,
RULE_FIRES_ON_REPLICA);
break;
case AT_DisableRule: /* DISABLE RULE name */
ATExecEnableDisableRule(rel, cmd->name,
RULE_DISABLED);
ATExecEnableDisableRule(rel, cmd->name,
RULE_DISABLED);
break;
case AT_AddInherit:
@ -2303,8 +2305,8 @@ ATRewriteTables(List **wqueue)
/*
* Swap the physical files of the old and new heaps. Since we are
* generating a new heap, we can use RecentXmin for the table's new
* relfrozenxid because we rewrote all the tuples on
* generating a new heap, we can use RecentXmin for the table's
* new relfrozenxid because we rewrote all the tuples on
* ATRewriteTable, so no older Xid remains on the table.
*/
swap_relation_files(tab->relid, OIDNewHeap, RecentXmin);
@ -3011,8 +3013,8 @@ ATExecAddColumn(AlteredTableInfo *tab, Relation rel,
if (HeapTupleIsValid(tuple))
{
Form_pg_attribute childatt = (Form_pg_attribute) GETSTRUCT(tuple);
Oid ctypeId;
int32 ctypmod;
Oid ctypeId;
int32 ctypmod;
/* Okay if child matches by type */
ctypeId = typenameTypeId(NULL, colDef->typename, &ctypmod);
@ -3819,8 +3821,8 @@ ATExecAddConstraint(AlteredTableInfo *tab, Relation rel, Node *newConstraint)
/*
* Currently, we only expect to see CONSTR_CHECK nodes
* arriving here (see the preprocessing done in
* parse_utilcmd.c). Use a switch anyway to make it easier
* to add more code later.
* parse_utilcmd.c). Use a switch anyway to make it easier to
* add more code later.
*/
switch (constr->contype)
{
@ -4030,7 +4032,7 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
*
* Note that we have to be careful about the difference between the actual
* PK column type and the opclass' declared input type, which might be
* only binary-compatible with it. The declared opcintype is the right
* only binary-compatible with it. The declared opcintype is the right
* thing to probe pg_amop with.
*/
if (numfks != numpks)
@ -4067,10 +4069,10 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
/*
* Check it's a btree; currently this can never fail since no other
* index AMs support unique indexes. If we ever did have other
* types of unique indexes, we'd need a way to determine which
* operator strategy number is equality. (Is it reasonable to
* insist that every such index AM use btree's number for equality?)
* index AMs support unique indexes. If we ever did have other types
* of unique indexes, we'd need a way to determine which operator
* strategy number is equality. (Is it reasonable to insist that
* every such index AM use btree's number for equality?)
*/
if (amid != BTREE_AM_OID)
elog(ERROR, "only b-tree indexes are supported for foreign keys");
@ -4088,8 +4090,8 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
eqstrategy, opcintype, opcintype, opfamily);
/*
* Are there equality operators that take exactly the FK type?
* Assume we should look through any domain here.
* Are there equality operators that take exactly the FK type? Assume
* we should look through any domain here.
*/
fktyped = getBaseType(fktype);
@ -4099,21 +4101,21 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
ffeqop = get_opfamily_member(opfamily, fktyped, fktyped,
eqstrategy);
else
ffeqop = InvalidOid; /* keep compiler quiet */
ffeqop = InvalidOid; /* keep compiler quiet */
if (!(OidIsValid(pfeqop) && OidIsValid(ffeqop)))
{
/*
* Otherwise, look for an implicit cast from the FK type to
* the opcintype, and if found, use the primary equality operator.
* Otherwise, look for an implicit cast from the FK type to the
* opcintype, and if found, use the primary equality operator.
* This is a bit tricky because opcintype might be a generic type
* such as ANYARRAY, and so what we have to test is whether the
* two actual column types can be concurrently cast to that type.
* (Otherwise, we'd fail to reject combinations such as int[] and
* point[].)
*/
Oid input_typeids[2];
Oid target_typeids[2];
Oid input_typeids[2];
Oid target_typeids[2];
input_typeids[0] = pktype;
input_typeids[1] = fktype;
@ -5255,10 +5257,10 @@ ATPostAlterTypeParse(char *cmd, List **wqueue)
ListCell *list_item;
/*
* We expect that we will get only ALTER TABLE and CREATE INDEX statements.
* Hence, there is no need to pass them through parse_analyze() or the
* rewriter, but instead we need to pass them through parse_utilcmd.c
* to make them ready for execution.
* We expect that we will get only ALTER TABLE and CREATE INDEX
* statements. Hence, there is no need to pass them through
* parse_analyze() or the rewriter, but instead we need to pass them
* through parse_utilcmd.c to make them ready for execution.
*/
raw_parsetree_list = raw_parser(cmd);
querytree_list = NIL;
@ -5272,8 +5274,8 @@ ATPostAlterTypeParse(char *cmd, List **wqueue)
cmd));
else if (IsA(stmt, AlterTableStmt))
querytree_list = list_concat(querytree_list,
transformAlterTableStmt((AlterTableStmt *) stmt,
cmd));
transformAlterTableStmt((AlterTableStmt *) stmt,
cmd));
else
querytree_list = lappend(querytree_list, stmt);
}
@ -5528,7 +5530,7 @@ ATExecChangeOwner(Oid relationOid, Oid newOwnerId, bool recursing)
*/
if (tuple_class->relkind != RELKIND_INDEX)
AlterTypeOwnerInternal(tuple_class->reltype, newOwnerId,
tuple_class->relkind == RELKIND_COMPOSITE_TYPE);
tuple_class->relkind == RELKIND_COMPOSITE_TYPE);
/*
* If we are operating on a table, also change the ownership of any
@ -5983,7 +5985,7 @@ ATExecEnableDisableTrigger(Relation rel, char *trigname,
*/
static void
ATExecEnableDisableRule(Relation rel, char *trigname,
char fires_when)
char fires_when)
{
EnableDisableRule(rel, trigname, fires_when);
}
@ -6051,8 +6053,8 @@ ATExecAddInherit(Relation child_rel, RangeVar *parent)
if (inh->inhparent == RelationGetRelid(parent_rel))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_TABLE),
errmsg("relation \"%s\" would be inherited from more than once",
RelationGetRelationName(parent_rel))));
errmsg("relation \"%s\" would be inherited from more than once",
RelationGetRelationName(parent_rel))));
if (inh->inhseqno > inhseqno)
inhseqno = inh->inhseqno;
}
@ -6063,12 +6065,12 @@ ATExecAddInherit(Relation child_rel, RangeVar *parent)
* (In particular, this disallows making a rel inherit from itself.)
*
* This is not completely bulletproof because of race conditions: in
* multi-level inheritance trees, someone else could concurrently
* be making another inheritance link that closes the loop but does
* not join either of the rels we have locked. Preventing that seems
* to require exclusive locks on the entire inheritance tree, which is
* a cure worse than the disease. find_all_inheritors() will cope with
* circularity anyway, so don't sweat it too much.
* multi-level inheritance trees, someone else could concurrently be
* making another inheritance link that closes the loop but does not join
* either of the rels we have locked. Preventing that seems to require
* exclusive locks on the entire inheritance tree, which is a cure worse
* than the disease. find_all_inheritors() will cope with circularity
* anyway, so don't sweat it too much.
*/
children = find_all_inheritors(RelationGetRelid(child_rel));
@ -6095,7 +6097,7 @@ ATExecAddInherit(Relation child_rel, RangeVar *parent)
MergeConstraintsIntoExisting(child_rel, parent_rel);
/*
* OK, it looks valid. Make the catalog entries that show inheritance.
* OK, it looks valid. Make the catalog entries that show inheritance.
*/
StoreCatalogInheritance1(RelationGetRelid(child_rel),
RelationGetRelid(parent_rel),
@ -6189,8 +6191,8 @@ MergeAttributesIntoExisting(Relation child_rel, Relation parent_rel)
if (attribute->attnotnull && !childatt->attnotnull)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("column \"%s\" in child table must be marked NOT NULL",
attributeName)));
errmsg("column \"%s\" in child table must be marked NOT NULL",
attributeName)));
/*
* OK, bump the child column's inheritance count. (If we fail
@ -6345,20 +6347,20 @@ ATExecDropInherit(Relation rel, RangeVar *parent)
bool found = false;
/*
* AccessShareLock on the parent is probably enough, seeing that DROP TABLE
* doesn't lock parent tables at all. We need some lock since we'll be
* inspecting the parent's schema.
* AccessShareLock on the parent is probably enough, seeing that DROP
* TABLE doesn't lock parent tables at all. We need some lock since we'll
* be inspecting the parent's schema.
*/
parent_rel = heap_openrv(parent, AccessShareLock);
/*
* We don't bother to check ownership of the parent table --- ownership
* of the child is presumed enough rights.
* We don't bother to check ownership of the parent table --- ownership of
* the child is presumed enough rights.
*/
/*
* Find and destroy the pg_inherits entry linking the two, or error out
* if there is none.
* Find and destroy the pg_inherits entry linking the two, or error out if
* there is none.
*/
catalogRelation = heap_open(InheritsRelationId, RowExclusiveLock);
ScanKeyInit(&key[0],
@ -6508,9 +6510,9 @@ AlterTableNamespace(RangeVar *relation, const char *newschema)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot move an owned sequence into another schema"),
errdetail("Sequence \"%s\" is linked to table \"%s\".",
RelationGetRelationName(rel),
get_rel_name(tableId))));
errdetail("Sequence \"%s\" is linked to table \"%s\".",
RelationGetRelationName(rel),
get_rel_name(tableId))));
}
break;
case RELKIND_COMPOSITE_TYPE:

Some files were not shown because too many files have changed in this diff Show More