pgindent run over code.

This commit is contained in:
Bruce Momjian 1999-05-25 16:15:34 +00:00
parent 4b04b01aaa
commit 07842084fe
413 changed files with 11723 additions and 10769 deletions

View File

@ -43,8 +43,8 @@ array_iterator(Oid elemtype, Oid proc, int and, ArrayType *array, Datum value)
int ndim,
*dim;
char *p;
FmgrInfo finf; /*Tobias Gabele Jan 18 1999*/
FmgrInfo finf; /* Tobias Gabele Jan 18 1999 */
/* Sanity checks */
if ((array == (ArrayType *) NULL)
@ -75,9 +75,9 @@ array_iterator(Oid elemtype, Oid proc, int and, ArrayType *array, Datum value)
/* Lookup the function entry point */
proc_fn = (func_ptr) NULL;
fmgr_info(proc,&finf); /*Tobias Gabele Jan 18 1999*/
proc_fn=finf.fn_addr; /*Tobias Gabele Jan 18 1999*/
pronargs=finf.fn_nargs; /*Tobias Gabele Jan 18 1999*/
fmgr_info(proc, &finf); /* Tobias Gabele Jan 18 1999 */
proc_fn = finf.fn_addr; /* Tobias Gabele Jan 18 1999 */
pronargs = finf.fn_nargs; /* Tobias Gabele Jan 18 1999 */
if ((proc_fn == NULL) || (pronargs != 2))
{
elog(ERROR, "array_iterator: fmgr_info lookup failed for oid %d", proc);
@ -110,38 +110,26 @@ array_iterator(Oid elemtype, Oid proc, int and, ArrayType *array, Datum value)
{
result = (int) (*proc_fn) (p, value);
if (typlen > 0)
{
p += typlen;
}
else
{
p += INTALIGN(*(int32 *) p);
}
}
if (result)
{
if (!and)
{
return (1);
}
}
else
{
if (and)
{
return (0);
}
}
}
if (and && result)
{
return (1);
}
else
{
return (0);
}
}
/*
@ -344,7 +332,7 @@ int32
array_oideq(ArrayType *array, Oid value)
{
return array_iterator((Oid) 26, /* oid */
(Oid) 184, /* oideq */
(Oid) 184, /* oideq */
0, /* logical or */
array, (Datum) value);
}

View File

@ -1,8 +1,7 @@
#ifndef ARRAY_ITERATOR_H
#define ARRAY_ITERATOR_H
static int32
array_iterator(Oid elemtype, Oid proc, int and,
static int32 array_iterator(Oid elemtype, Oid proc, int and,
ArrayType *array, Datum value);
int32 array_texteq(ArrayType *array, char *value);
int32 array_all_texteq(ArrayType *array, char *value);
@ -24,6 +23,7 @@ int32 array_int4lt(ArrayType *array, int4 value);
int32 array_all_int4lt(ArrayType *array, int4 value);
int32 array_int4le(ArrayType *array, int4 value);
int32 array_all_int4le(ArrayType *array, int4 value);
int32 array_oideq(ArrayType *array, Oid value);
int32 array_all_oidne(ArrayType *array, Oid value);
int32 array_oideq(ArrayType *array, Oid value);
int32 array_all_oidne(ArrayType *array, Oid value);
#endif

View File

@ -225,8 +225,8 @@ currentdate()
/*
* Local variables:
* tab-width: 4
* c-indent-level: 4
* c-basic-offset: 4
* tab-width: 4
* c-indent-level: 4
* c-basic-offset: 4
* End:
*/

View File

@ -4,23 +4,24 @@
#include <postgres.h>
#include <utils/geo_decls.h> /* for Pt */
#include <utils/palloc.h> /* for palloc */
#include <utils/palloc.h> /* for palloc */
/* Earth's radius is in statute miles. */
const EARTH_RADIUS = 3958.747716;
const TWO_PI = 2.0 * M_PI;
const EARTH_RADIUS = 3958.747716;
const TWO_PI = 2.0 * M_PI;
/******************************************************
*
* degtorad - convert degrees to radians
*
* arg: double, angle in degrees
* arg: double, angle in degrees
*
* returns: double, same angle in radians
* returns: double, same angle in radians
******************************************************/
static double
degtorad (double degrees) {
degtorad(double degrees)
{
return (degrees / 360.0) * TWO_PI;
}
@ -30,36 +31,40 @@ degtorad (double degrees) {
* geo_distance - distance between points
*
* args:
* a pair of points - for each point,
* x-coordinate is longitude in degrees west of Greenwich
* y-coordinate is latitude in degrees above equator
* a pair of points - for each point,
* x-coordinate is longitude in degrees west of Greenwich
* y-coordinate is latitude in degrees above equator
*
* returns: double
* distance between the points in miles on earth's surface
* returns: double
* distance between the points in miles on earth's surface
******************************************************/
double *
geo_distance (Point *pt1, Point *pt2) {
geo_distance(Point *pt1, Point *pt2)
{
double long1, lat1, long2, lat2;
double longdiff;
double * resultp = palloc (sizeof(double));
double long1,
lat1,
long2,
lat2;
double longdiff;
double *resultp = palloc(sizeof(double));
/* convert degrees to radians */
long1 = degtorad (pt1->x);
lat1 = degtorad (pt1->y);
long1 = degtorad(pt1->x);
lat1 = degtorad(pt1->y);
long2 = degtorad (pt2->x);
lat2 = degtorad (pt2->y);
long2 = degtorad(pt2->x);
lat2 = degtorad(pt2->y);
/* compute difference in longitudes - want < 180 degrees */
longdiff = fabs (long1 - long2);
longdiff = fabs(long1 - long2);
if (longdiff > M_PI)
longdiff = TWO_PI - longdiff;
* resultp = EARTH_RADIUS * acos
(sin (lat1) * sin (lat2) + cos (lat1) * cos (lat2) * cos (longdiff));
*resultp = EARTH_RADIUS * acos
(sin(lat1) * sin(lat2) + cos(lat1) * cos(lat2) * cos(longdiff));
return resultp;
}

View File

@ -9,7 +9,8 @@
#include <libpq-fe.h>
#include "pginterface.h"
PGresult *attres, *relres;
PGresult *attres,
*relres;
int
main(int argc, char **argv)
@ -55,7 +56,7 @@ main(int argc, char **argv)
");
doquery("FETCH ALL IN c_relations");
relres = get_result();
set_result(attres);
while (fetch(typname, relname, attname) != END_OF_TUPLES)
{
@ -65,17 +66,17 @@ main(int argc, char **argv)
{
unset_result(relres);
if (strcmp(typname, "oid") == 0)
sprintf(query,"\
sprintf(query, "\
DECLARE c_matches BINARY CURSOR FOR \
SELECT count(*)
FROM %s t1, %s t2 \
WHERE t1.%s = t2.oid", relname, relname2, attname);
SELECT count(*) \
FROM % s t1, %s t2 \
WHERE t1.% s = t2.oid ", relname, relname2, attname);
else
sprintf(query,"\
sprintf(query, "\
DECLARE c_matches BINARY CURSOR FOR \
SELECT count(*)
FROM %s t1, %s t2 \
WHERE RegprocToOid(t1.%s) = t2.oid", relname, relname2, attname);
SELECT count(*) \
FROM % s t1, %s t2 \
WHERE RegprocToOid(t1.% s) = t2.oid ", relname, relname2, attname);
doquery(query);
doquery("FETCH ALL IN c_matches");
@ -96,7 +97,7 @@ main(int argc, char **argv)
doquery("CLOSE c_attributes");
PQclear(attres);
unset_result(attres);
doquery("COMMIT WORK");
disconnectdb();

View File

@ -1,18 +1,18 @@
#include "postgres.h"
#include "executor/spi.h"
#include "commands/trigger.h"
#include <ctype.h> /* tolower */
#include <stdio.h> /* debugging */
#include <ctype.h> /* tolower */
#include <stdio.h> /* debugging */
/*
* Trigger function takes 2 arguments:
1. relation in which to store the substrings
2. field to extract substrings from
1. relation in which to store the substrings
2. field to extract substrings from
The relation in which to insert *must* have the following layout:
string varchar(#)
id oid
string varchar(#)
id oid
Example:
@ -24,9 +24,9 @@ create index title_fti_idx on title_fti (string);
create trigger title_fti_trigger after update or insert or delete on product
for each row execute procedure fti(title_fti, title);
^^^^^^^^^
^^^^^^^^^
where to store index in
^^^^^
^^^^^
which column to index
ofcourse don't forget to create an index on title_idx, column string, else
@ -39,8 +39,8 @@ select p.* from product p, title_fti f1, title_fti f2 where
*/
/*
march 4 1998 Changed breakup() to return less substrings. Only breakup
in word parts which are in turn shortened from the start
march 4 1998 Changed breakup() to return less substrings. Only breakup
in word parts which are in turn shortened from the start
of the word (ie. word, ord, rd)
Did allocation of substring buffer outside of breakup()
oct. 5 1997, fixed a bug in string breakup (where there are more nonalpha
@ -62,14 +62,14 @@ select p.* from product p, title_fti f1, title_fti f2 where
*/
HeapTuple fti(void);
char *breakup(char*, char*);
bool is_stopword(char*);
char *breakup(char *, char *);
bool is_stopword(char *);
bool new_tuple = false;
bool new_tuple = false;
/* THIS LIST MUST BE IN SORTED ORDER, A BINARY SEARCH IS USED!!!! */
char *StopWords[] = { /* list of words to skip in indexing */
char *StopWords[] = { /* list of words to skip in indexing */
#ifdef SAMPLE_STOP_WORDS
"no"
"the",
@ -88,7 +88,7 @@ typedef struct
static EPlan *InsertPlans = NULL;
static EPlan *DeletePlans = NULL;
static int nInsertPlans = 0;
static int nDeletePlans = 0;
static int nDeletePlans = 0;
static EPlan *find_plan(char *ident, EPlan ** eplan, int *nplans);
@ -96,28 +96,28 @@ static EPlan *find_plan(char *ident, EPlan ** eplan, int *nplans);
HeapTuple
fti()
{
Trigger *trigger; /* to get trigger name */
int nargs; /* # of arguments */
char **args; /* arguments */
char *relname; /* triggered relation name */
Relation rel; /* triggered relation */
char *indexname; /* name of table for substrings */
Trigger *trigger; /* to get trigger name */
int nargs; /* # of arguments */
char **args; /* arguments */
char *relname; /* triggered relation name */
Relation rel; /* triggered relation */
char *indexname; /* name of table for substrings */
HeapTuple rettuple = NULL;
TupleDesc tupdesc; /* tuple description */
bool isinsert=false;
bool isdelete=false;
TupleDesc tupdesc; /* tuple description */
bool isinsert = false;
bool isdelete = false;
int ret;
char query[8192];
Oid oid;
/*
FILE *debug;
*/
/*
debug = fopen("/dev/xconsole", "w");
fprintf(debug, "FTI: entered function\n");
fflush(debug);
*/
* FILE *debug;
*/
/*
* debug = fopen("/dev/xconsole", "w"); fprintf(debug, "FTI: entered
* function\n"); fflush(debug);
*/
if (!CurrentTriggerData)
elog(ERROR, "Full Text Indexing: triggers are not initialized");
@ -127,47 +127,53 @@ fti()
elog(ERROR, "Full Text Indexing: must be fired AFTER event");
if (TRIGGER_FIRED_BY_INSERT(CurrentTriggerData->tg_event))
isinsert=true;
isinsert = true;
if (TRIGGER_FIRED_BY_UPDATE(CurrentTriggerData->tg_event))
{ isdelete=true;isinsert=true;}
{
isdelete = true;
isinsert = true;
}
if (TRIGGER_FIRED_BY_DELETE(CurrentTriggerData->tg_event))
isdelete=true;
isdelete = true;
trigger = CurrentTriggerData->tg_trigger;
rel = CurrentTriggerData->tg_relation;
relname = SPI_getrelname(rel);
rettuple=CurrentTriggerData->tg_trigtuple;
if (isdelete&&isinsert) /* is an UPDATE */
rettuple=CurrentTriggerData->tg_newtuple;
rettuple = CurrentTriggerData->tg_trigtuple;
if (isdelete && isinsert) /* is an UPDATE */
rettuple = CurrentTriggerData->tg_newtuple;
CurrentTriggerData = NULL; /* invalidate 'normal' calls to this function */
CurrentTriggerData = NULL; /* invalidate 'normal' calls to this
* function */
if ((ret = SPI_connect()) < 0)
elog(ERROR, "Full Text Indexing: SPI_connect failed, returned %d\n", ret);
if ((ret = SPI_connect()) <0)
elog(ERROR,"Full Text Indexing: SPI_connect failed, returned %d\n",ret);
nargs = trigger->tgnargs;
if (nargs != 2)
elog(ERROR, "Full Text Indexing: trigger can only have 2 arguments");
args = trigger->tgargs;
indexname = args[0];
tupdesc = rel->rd_att; /* what the tuple looks like (?) */
tupdesc = rel->rd_att; /* what the tuple looks like (?) */
/* get oid of current tuple, needed by all, so place here */
oid = rettuple->t_data->t_oid;
if (!OidIsValid(oid))
elog(ERROR,"Full Text Indexing: oid of current tuple is NULL");
elog(ERROR, "Full Text Indexing: oid of current tuple is NULL");
if (isdelete) {
void *pplan;
Oid *argtypes;
Datum values[1];
EPlan *plan;
if (isdelete)
{
void *pplan;
Oid *argtypes;
Datum values[1];
EPlan *plan;
sprintf(query, "D%s$%s", args[0], args[1]);
plan = find_plan(query, &DeletePlans, &nDeletePlans);
if (plan->nplans <= 0) {
argtypes = (Oid *)palloc(sizeof(Oid));
if (plan->nplans <= 0)
{
argtypes = (Oid *) palloc(sizeof(Oid));
argtypes[0] = OIDOID;
@ -181,7 +187,7 @@ fti()
elog(ERROR, "Full Text Indexing: SPI_saveplan returned NULL "
"in delete");
plan->splan = (void **)malloc(sizeof(void*));
plan->splan = (void **) malloc(sizeof(void *));
*(plan->splan) = pplan;
plan->nplans = 1;
}
@ -192,26 +198,29 @@ fti()
if (ret != SPI_OK_DELETE)
elog(ERROR, "Full Text Indexing: error executing plan in delete");
}
if (isinsert) {
char *substring, *column;
void *pplan;
Oid *argtypes;
Datum values[2];
int colnum;
if (isinsert)
{
char *substring,
*column;
void *pplan;
Oid *argtypes;
Datum values[2];
int colnum;
struct varlena *data;
EPlan *plan;
EPlan *plan;
sprintf(query, "I%s$%s", args[0], args[1]);
plan = find_plan(query, &InsertPlans, &nInsertPlans);
/* no plan yet, so allocate mem for argtypes */
if (plan->nplans <= 0) {
argtypes = (Oid *)palloc(2*sizeof(Oid));
if (plan->nplans <= 0)
{
argtypes = (Oid *) palloc(2 * sizeof(Oid));
argtypes[0] = VARCHAROID; /*create table t_name
(string varchar, */
argtypes[1] = OIDOID; /* id oid); */
argtypes[0] = VARCHAROID; /* create table t_name (string
* varchar, */
argtypes[1] = OIDOID; /* id oid); */
/* prepare plan to gain speed */
sprintf(query, "INSERT INTO %s (string, id) VALUES ($1, $2)",
@ -226,45 +235,49 @@ fti()
elog(ERROR, "Full Text Indexing: SPI_saveplan returned NULL"
" in insert");
plan->splan = (void **)malloc(sizeof(void*));
plan->splan = (void **) malloc(sizeof(void *));
*(plan->splan) = pplan;
plan->nplans = 1;
}
/* prepare plan for query */
colnum=SPI_fnumber(tupdesc, args[1]);
colnum = SPI_fnumber(tupdesc, args[1]);
if (colnum == SPI_ERROR_NOATTRIBUTE)
elog(ERROR, "Full Text Indexing: column '%s' of '%s' not found",
args[1], args[0]);
/* Get the char* representation of the column with name args[1] */
column = SPI_getvalue(rettuple, tupdesc, colnum);
if (column) { /* make sure we don't try to index NULL's */
char *buff;
char *string = column;
while(*string != '\0') { /* placed 'really' inline. */
*string = tolower(*string); /* some compilers will choke */
string++; /* on 'inline' keyword */
if (column)
{ /* make sure we don't try to index NULL's */
char *buff;
char *string = column;
while (*string != '\0')
{ /* placed 'really' inline. */
*string = tolower(*string); /* some compilers will
* choke */
string++; /* on 'inline' keyword */
}
data = (struct varlena*)palloc(sizeof(int32)+strlen(column)+1);
data = (struct varlena *) palloc(sizeof(int32) + strlen(column) +1);
buff = palloc(strlen(column) + 1);
/* saves lots of calls in while-loop and in breakup()*/
/* saves lots of calls in while-loop and in breakup() */
new_tuple=true;
while ((substring = breakup(column, buff))) {
int l;
new_tuple = true;
while ((substring = breakup(column, buff)))
{
int l;
l = strlen(substring);
data->vl_len = l+sizeof(int32);
data->vl_len = l + sizeof(int32);
memcpy(VARDATA(data), substring, l);
values[0] = PointerGetDatum(data);
values[1] = oid;
ret = SPI_execp(*(plan->splan), values, NULL, 0);
if (ret != SPI_OK_INSERT)
elog(ERROR, "Full Text Indexing: error executing plan "
@ -279,76 +292,83 @@ fti()
return (rettuple);
}
char *breakup(char *string, char *substring)
char *
breakup(char *string, char *substring)
{
static char *last_start;
static char *cur_pos;
if (new_tuple)
{
cur_pos=last_start=&string[strlen(string)-1];
new_tuple=false; /* don't initialize this next time */
cur_pos = last_start = &string[strlen(string) - 1];
new_tuple = false; /* don't initialize this next time */
}
while (cur_pos > string) /* don't read before start of 'string' */
while (cur_pos > string) /* don't read before start of 'string' */
{
/* skip pieces at the end of a string that are not
alfa-numeric (ie. 'string$%^&', last_start first points to
'&', and after this to 'g' */
if (!isalnum((int)*last_start)) {
while (!isalnum((int)*last_start) &&
/*
* skip pieces at the end of a string that are not alfa-numeric
* (ie. 'string$%^&', last_start first points to '&', and after
* this to 'g'
*/
if (!isalnum((int) *last_start))
{
while (!isalnum((int) *last_start) &&
last_start > string)
last_start--;
cur_pos=last_start;
cur_pos = last_start;
}
cur_pos--; /* substrings are at minimum 2 characters long */
cur_pos--; /* substrings are at minimum 2 characters
* long */
if (isalnum((int)*cur_pos))
if (isalnum((int) *cur_pos))
{
/* Houston, we have a substring! :) */
memcpy(substring, cur_pos, last_start - cur_pos + 1);
substring[last_start-cur_pos+1]='\0';
if (!is_stopword(substring)) return substring;
substring[last_start - cur_pos + 1] = '\0';
if (!is_stopword(substring))
return substring;
}
else
{
last_start=cur_pos-1;
last_start = cur_pos - 1;
cur_pos = last_start;
}
}
return NULL; /* we've processed all of 'string' */
return NULL; /* we've processed all of 'string' */
}
/* copied from src/backend/parser/keywords.c and adjusted for our situation*/
bool
is_stopword(char *text)
{
char **StopLow; /* for list of stop-words */
char **StopHigh;
char **StopMiddle;
unsigned int difference;
char **StopLow; /* for list of stop-words */
char **StopHigh;
char **StopMiddle;
unsigned int difference;
StopLow = &StopWords[0]; /* initialize stuff for binary search */
StopLow = &StopWords[0]; /* initialize stuff for binary search */
StopHigh = endof(StopWords);
if (lengthof(StopWords) == 0)
return false;
while (StopLow <= StopHigh)
{
StopMiddle = StopLow + (StopHigh - StopLow) / 2;
difference = strcmp(*StopMiddle, text);
if (difference == 0)
return (true);
else if (difference < 0)
StopLow = StopMiddle + 1;
else
StopHigh = StopMiddle - 1;
}
return (false);
while (StopLow <= StopHigh)
{
StopMiddle = StopLow + (StopHigh - StopLow) / 2;
difference = strcmp(*StopMiddle, text);
if (difference == 0)
return (true);
else if (difference < 0)
StopLow = StopMiddle + 1;
else
StopHigh = StopMiddle - 1;
}
return (false);
}
/* for caching of query plans, stolen from contrib/spi/\*.c */

View File

@ -1,7 +1,7 @@
/*
* PostgreSQL type definitions for ISBNs.
*
* $Id: isbn.c,v 1.1 1998/08/17 03:35:04 scrappy Exp $
* $Id: isbn.c,v 1.2 1999/05/25 16:05:40 momjian Exp $
*/
#include <stdio.h>
@ -16,8 +16,8 @@
typedef struct isbn
{
char num[13];
char pad[3];
char num[13];
char pad[3];
} isbn;
/*
@ -50,13 +50,15 @@ isbn_in(char *str)
char *cp;
int count;
if (strlen(str) != 13) {
if (strlen(str) != 13)
{
elog(ERROR, "isbn_in: invalid ISBN \"%s\"", str);
return (NULL);
}
if (isbn_sum(str) != 0) {
if (isbn_sum(str) != 0)
{
elog(ERROR, "isbn_in: purported ISBN \"%s\" failed checksum",
str);
str);
return (NULL);
}
@ -84,28 +86,40 @@ isbn_in(char *str)
int4
isbn_sum(char *str)
{
int4 sum = 0, dashes = 0, val;
int i;
int4 sum = 0,
dashes = 0,
val;
int i;
for (i = 0; str[i] && i < 13; i++) {
switch(str[i]) {
case '-':
if (++dashes > 3)
for (i = 0; str[i] && i < 13; i++)
{
switch (str[i])
{
case '-':
if (++dashes > 3)
return 12;
continue;
case '0':
case '1':
case '2':
case '3':
case '4':
case '5':
case '6':
case '7':
case '8':
case '9':
val = str[i] - '0';
break;
case 'X':
case 'x':
val = 10;
break;
default:
return 12;
continue;
case '0': case '1': case '2': case '3':
case '4': case '5': case '6': case '7':
case '8': case '9':
val = str[i] - '0';
break;
case 'X': case 'x':
val = 10;
break;
default:
return 12;
}
sum += val * (i + 1 - dashes);

View File

@ -1,7 +1,7 @@
/*
* PostgreSQL type definitions for ISSNs.
*
* $Id: issn.c,v 1.1 1998/08/17 03:35:05 scrappy Exp $
* $Id: issn.c,v 1.2 1999/05/25 16:05:42 momjian Exp $
*/
#include <stdio.h>
@ -16,8 +16,8 @@
typedef struct issn
{
char num[9];
char pad[7];
char num[9];
char pad[7];
} issn;
/*
@ -50,13 +50,15 @@ issn_in(char *str)
char *cp;
int count;
if (strlen(str) != 9) {
if (strlen(str) != 9)
{
elog(ERROR, "issn_in: invalid ISSN \"%s\"", str);
return (NULL);
}
if (issn_sum(str) != 0) {
if (issn_sum(str) != 0)
{
elog(ERROR, "issn_in: purported ISSN \"%s\" failed checksum",
str);
str);
return (NULL);
}
@ -75,28 +77,40 @@ issn_in(char *str)
int4
issn_sum(char *str)
{
int4 sum = 0, dashes = 0, val;
int i;
int4 sum = 0,
dashes = 0,
val;
int i;
for (i = 0; str[i] && i < 9; i++) {
switch(str[i]) {
case '-':
if (++dashes > 1)
for (i = 0; str[i] && i < 9; i++)
{
switch (str[i])
{
case '-':
if (++dashes > 1)
return 12;
continue;
case '0':
case '1':
case '2':
case '3':
case '4':
case '5':
case '6':
case '7':
case '8':
case '9':
val = str[i] - '0';
break;
case 'X':
case 'x':
val = 10;
break;
default:
return 12;
continue;
case '0': case '1': case '2': case '3':
case '4': case '5': case '6': case '7':
case '8': case '9':
val = str[i] - '0';
break;
case 'X': case 'x':
val = 10;
break;
default:
return 12;
}
sum += val * (8 - (i - dashes));

View File

@ -1,7 +1,7 @@
/*
* PostgreSQL type definitions for managed LargeObjects.
*
* $Id: lo.c,v 1.1 1998/06/16 07:07:11 momjian Exp $
* $Id: lo.c,v 1.2 1999/05/25 16:05:45 momjian Exp $
*
*/
@ -33,11 +33,11 @@ typedef Oid Blob;
* Various forward declarations:
*/
Blob *lo_in(char *str); /* Create from String */
char *lo_out(Blob * addr); /* Output oid as String */
Oid lo_oid(Blob * addr); /* Return oid as an oid */
Blob *lo(Oid oid); /* Return Blob based on oid */
HeapTuple lo_manage(void); /* Trigger handler */
Blob *lo_in(char *str); /* Create from String */
char *lo_out(Blob * addr);/* Output oid as String */
Oid lo_oid(Blob * addr);/* Return oid as an oid */
Blob *lo(Oid oid); /* Return Blob based on oid */
HeapTuple lo_manage(void); /* Trigger handler */
/*
* This creates a large object, and set's its OID to the value in the
@ -49,45 +49,46 @@ HeapTuple lo_manage(void); /* Trigger handler */
Blob *
lo_in(char *str)
{
Blob *result;
Oid oid;
int count;
if (strlen(str) > 0)
Blob *result;
Oid oid;
int count;
if (strlen(str) > 0)
{
count = sscanf(str, "%d", &oid);
if (count < 1)
count = sscanf(str, "%d", &oid);
if (count < 1)
{
elog(ERROR, "lo_in: error in parsing \"%s\"", str);
return (NULL);
elog(ERROR, "lo_in: error in parsing \"%s\"", str);
return (NULL);
}
if(oid < 0)
if (oid < 0)
{
elog(ERROR, "lo_in: illegal oid \"%s\"", str);
return (NULL);
elog(ERROR, "lo_in: illegal oid \"%s\"", str);
return (NULL);
}
}
else
else
{
/*
* There is no Oid passed, so create a new one
*/
oid = lo_creat(INV_READ|INV_WRITE);
if(oid == InvalidOid)
/*
* There is no Oid passed, so create a new one
*/
oid = lo_creat(INV_READ | INV_WRITE);
if (oid == InvalidOid)
{
elog(ERROR,"lo_in: InvalidOid returned from lo_creat");
return (NULL);
elog(ERROR, "lo_in: InvalidOid returned from lo_creat");
return (NULL);
}
}
result = (Blob *) palloc(sizeof(Blob));
*result = oid;
return (result);
result = (Blob *) palloc(sizeof(Blob));
*result = oid;
return (result);
}
/*
@ -96,42 +97,43 @@ lo_in(char *str)
char *
lo_out(Blob * addr)
{
char *result;
if (addr == NULL)
return (NULL);
result = (char *) palloc(32);
sprintf(result,"%d",*addr);
return (result);
char *result;
if (addr == NULL)
return (NULL);
result = (char *) palloc(32);
sprintf(result, "%d", *addr);
return (result);
}
/*
* This function converts Blob to oid.
*
* eg: select lo_export(raster::oid,'/path/file') from table;
*
*
*/
Oid
lo_oid(Blob * addr)
{
if(addr == NULL)
return InvalidOid;
return (Oid)(*addr);
if (addr == NULL)
return InvalidOid;
return (Oid) (*addr);
}
/*
* This function is used so we can convert oid's to lo's
*
* ie: insert into table values(lo_import('/path/file')::lo);
* ie: insert into table values(lo_import('/path/file')::lo);
*
*/
Blob *
lo(Oid oid)
{
Blob *result = (Blob *) palloc(sizeof(Blob));
*result = oid;
return (result);
Blob *result = (Blob *) palloc(sizeof(Blob));
*result = oid;
return (result);
}
/*
@ -140,74 +142,77 @@ lo(Oid oid)
HeapTuple
lo_manage(void)
{
int attnum; /* attribute number to monitor */
char **args; /* Args containing attr name */
TupleDesc tupdesc; /* Tuple Descriptor */
HeapTuple rettuple; /* Tuple to be returned */
bool isdelete; /* are we deleting? */
HeapTuple newtuple=NULL; /* The new value for tuple */
HeapTuple trigtuple; /* The original value of tuple */
if (!CurrentTriggerData)
elog(ERROR, "lo: triggers are not initialized");
/*
* Fetch some values from CurrentTriggerData
*/
newtuple = CurrentTriggerData->tg_newtuple;
trigtuple = CurrentTriggerData->tg_trigtuple;
tupdesc = CurrentTriggerData->tg_relation->rd_att;
args = CurrentTriggerData->tg_trigger->tgargs;
/* tuple to return to Executor */
if (TRIGGER_FIRED_BY_UPDATE(CurrentTriggerData->tg_event))
rettuple = newtuple;
else
rettuple = trigtuple;
/* Are we deleting the row? */
isdelete = TRIGGER_FIRED_BY_DELETE(CurrentTriggerData->tg_event);
/* Were done with it */
CurrentTriggerData = NULL;
/* Get the column were interested in */
attnum = SPI_fnumber(tupdesc,args[0]);
/*
* Handle updates
*
* Here, if the value of the monitored attribute changes, then the
* large object associated with the original value is unlinked.
*/
if(newtuple!=NULL) {
char *orig = SPI_getvalue(trigtuple,tupdesc,attnum);
char *newv = SPI_getvalue(newtuple,tupdesc,attnum);
if((orig != newv && (orig==NULL || newv==NULL)) || (orig!=NULL && newv!=NULL && strcmp(orig,newv)))
lo_unlink(atoi(orig));
if(newv)
pfree(newv);
if(orig)
pfree(orig);
}
/*
* Handle deleting of rows
*
* Here, we unlink the large object associated with the managed attribute
*
*/
if(isdelete) {
char *orig = SPI_getvalue(trigtuple,tupdesc,attnum);
if(orig != NULL) {
lo_unlink(atoi(orig));
pfree(orig);
int attnum; /* attribute number to monitor */
char **args; /* Args containing attr name */
TupleDesc tupdesc; /* Tuple Descriptor */
HeapTuple rettuple; /* Tuple to be returned */
bool isdelete; /* are we deleting? */
HeapTuple newtuple = NULL;/* The new value for tuple */
HeapTuple trigtuple; /* The original value of tuple */
if (!CurrentTriggerData)
elog(ERROR, "lo: triggers are not initialized");
/*
* Fetch some values from CurrentTriggerData
*/
newtuple = CurrentTriggerData->tg_newtuple;
trigtuple = CurrentTriggerData->tg_trigtuple;
tupdesc = CurrentTriggerData->tg_relation->rd_att;
args = CurrentTriggerData->tg_trigger->tgargs;
/* tuple to return to Executor */
if (TRIGGER_FIRED_BY_UPDATE(CurrentTriggerData->tg_event))
rettuple = newtuple;
else
rettuple = trigtuple;
/* Are we deleting the row? */
isdelete = TRIGGER_FIRED_BY_DELETE(CurrentTriggerData->tg_event);
/* Were done with it */
CurrentTriggerData = NULL;
/* Get the column were interested in */
attnum = SPI_fnumber(tupdesc, args[0]);
/*
* Handle updates
*
* Here, if the value of the monitored attribute changes, then the large
* object associated with the original value is unlinked.
*/
if (newtuple != NULL)
{
char *orig = SPI_getvalue(trigtuple, tupdesc, attnum);
char *newv = SPI_getvalue(newtuple, tupdesc, attnum);
if ((orig != newv && (orig == NULL || newv == NULL)) || (orig != NULL && newv != NULL && strcmp(orig, newv)))
lo_unlink(atoi(orig));
if (newv)
pfree(newv);
if (orig)
pfree(orig);
}
}
return (rettuple);
/*
* Handle deleting of rows
*
* Here, we unlink the large object associated with the managed attribute
*
*/
if (isdelete)
{
char *orig = SPI_getvalue(trigtuple, tupdesc, attnum);
if (orig != NULL)
{
lo_unlink(atoi(orig));
pfree(orig);
}
}
return (rettuple);
}

View File

@ -6,270 +6,360 @@
#define HNDMAX 10
PGconn *PGh[HNDMAX] = {
NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL
PGconn *PGh[HNDMAX] = {
NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL
};
#define E_NOHANDLERS 0
char *msqlErrors[] = {
"Out of database handlers."
char *msqlErrors[] = {
"Out of database handlers."
};
char msqlErrMsg[BUFSIZ], *tfrom = "dunno";
PGresult *queryres = NULL;
char msqlErrMsg[BUFSIZ],
*tfrom = "dunno";
PGresult *queryres = NULL;
int msqlConnect (char *host) {
int count;
for (count = 0; count < HNDMAX; count++)
if (PGh[count] == NULL) break;
if (count == HNDMAX) {
strncpy(msqlErrMsg, msqlErrors[E_NOHANDLERS], BUFSIZ);
return -1;
}
int
msqlConnect(char *host)
{
int count;
PGh[count] = malloc(sizeof (PGconn));
PGh[count]->pghost = host ? strdup(host) : NULL;
return count;
for (count = 0; count < HNDMAX; count++)
if (PGh[count] == NULL)
break;
if (count == HNDMAX)
{
strncpy(msqlErrMsg, msqlErrors[E_NOHANDLERS], BUFSIZ);
return -1;
}
PGh[count] = malloc(sizeof(PGconn));
PGh[count]->pghost = host ? strdup(host) : NULL;
return count;
}
int msqlSelectDB(int handle, char *dbname) {
char *options = calloc(1, BUFSIZ);
char *e = getenv("PG_OPTIONS");
if (e == NULL)
e = "";
if (PGh[handle]->pghost) {
strcat(options, "host=");
strncat(options, PGh[handle]->pghost, BUFSIZ);
strncat(options, " ", BUFSIZ);
free(PGh[handle]->pghost);
PGh[handle]->pghost = NULL;
}
strncat(options, "dbname=", BUFSIZ);
strncat(options, dbname, BUFSIZ);
strncat(options, " ", BUFSIZ);
strncat(options, e, BUFSIZ);
free(PGh[handle]);
PGh[handle] = PQconnectdb(options);
free(options);
strncpy(msqlErrMsg, PQerrorMessage(PGh[handle]), BUFSIZ);
return (PQstatus(PGh[handle]) == CONNECTION_BAD ? -1 : 0);
int
msqlSelectDB(int handle, char *dbname)
{
char *options = calloc(1, BUFSIZ);
char *e = getenv("PG_OPTIONS");
if (e == NULL)
e = "";
if (PGh[handle]->pghost)
{
strcat(options, "host=");
strncat(options, PGh[handle]->pghost, BUFSIZ);
strncat(options, " ", BUFSIZ);
free(PGh[handle]->pghost);
PGh[handle]->pghost = NULL;
}
strncat(options, "dbname=", BUFSIZ);
strncat(options, dbname, BUFSIZ);
strncat(options, " ", BUFSIZ);
strncat(options, e, BUFSIZ);
free(PGh[handle]);
PGh[handle] = PQconnectdb(options);
free(options);
strncpy(msqlErrMsg, PQerrorMessage(PGh[handle]), BUFSIZ);
return (PQstatus(PGh[handle]) == CONNECTION_BAD ? -1 : 0);
}
int msqlQuery(int handle, char *query) {
char *tq = strdup(query);
char *p = tq;
PGresult *res;
PGconn *conn = PGh[handle];
ExecStatusType rcode;
res = PQexec(conn, p);
int
msqlQuery(int handle, char *query)
{
char *tq = strdup(query);
char *p = tq;
PGresult *res;
PGconn *conn = PGh[handle];
ExecStatusType rcode;
rcode = PQresultStatus(res);
if (rcode == PGRES_TUPLES_OK) {
queryres = res;
return PQntuples(res);
} else if (rcode == PGRES_FATAL_ERROR || rcode == PGRES_NONFATAL_ERROR) {
PQclear(res);
queryres = NULL;
return -1;
} else {
PQclear(res);
queryres = NULL;
return 0;
}
res = PQexec(conn, p);
rcode = PQresultStatus(res);
if (rcode == PGRES_TUPLES_OK)
{
queryres = res;
return PQntuples(res);
}
else if (rcode == PGRES_FATAL_ERROR || rcode == PGRES_NONFATAL_ERROR)
{
PQclear(res);
queryres = NULL;
return -1;
}
else
{
PQclear(res);
queryres = NULL;
return 0;
}
}
int msqlCreateDB (int a, char*b) {
char tbuf[BUFSIZ];
sprintf(tbuf, "create database %s", b);
return msqlQuery(a, tbuf) >= 0 ? 0 : -1;
int
msqlCreateDB(int a, char *b)
{
char tbuf[BUFSIZ];
sprintf(tbuf, "create database %s", b);
return msqlQuery(a, tbuf) >= 0 ? 0 : -1;
}
int msqlDropDB (int a, char* b) {
char tbuf[BUFSIZ];
sprintf(tbuf, "drop database %s", b);
return msqlQuery(a, tbuf) >= 0 ? 0 : -1;
int
msqlDropDB(int a, char *b)
{
char tbuf[BUFSIZ];
sprintf(tbuf, "drop database %s", b);
return msqlQuery(a, tbuf) >= 0 ? 0 : -1;
}
int msqlShutdown(int a) {
int
msqlShutdown(int a)
{
}
int msqlGetProtoInfo(void) {
int
msqlGetProtoInfo(void)
{
}
int msqlReloadAcls(int a) {
int
msqlReloadAcls(int a)
{
}
char *msqlGetServerInfo(void) {
char *
msqlGetServerInfo(void)
{
}
char *msqlGetHostInfo(void) {
char *
msqlGetHostInfo(void)
{
}
char *msqlUnixTimeToDate(time_t date) {
char *
msqlUnixTimeToDate(time_t date)
{
}
char *msqlUnixTimeToTime(time_t time) {
char *
msqlUnixTimeToTime(time_t time)
{
}
void msqlClose(int a) {
PQfinish(PGh[a]);
PGh[a] = NULL;
if (queryres) {
free(queryres);
queryres = NULL;
}
void
msqlClose(int a)
{
PQfinish(PGh[a]);
PGh[a] = NULL;
if (queryres)
{
free(queryres);
queryres = NULL;
}
}
void msqlDataSeek(m_result *result, int count) {
int c;
result->cursor = result->queryData;
for (c = 1; c < count; c++)
if (result->cursor->next)
result->cursor = result->cursor->next;
void
msqlDataSeek(m_result * result, int count)
{
int c;
result->cursor = result->queryData;
for (c = 1; c < count; c++)
if (result->cursor->next)
result->cursor = result->cursor->next;
}
void msqlFieldSeek(m_result *result, int count) {
int c;
result->fieldCursor = result->fieldData;
for (c = 1; c < count; c++)
if (result->fieldCursor->next)
result->fieldCursor = result->fieldCursor->next;
void
msqlFieldSeek(m_result * result, int count)
{
int c;
result->fieldCursor = result->fieldData;
for (c = 1; c < count; c++)
if (result->fieldCursor->next)
result->fieldCursor = result->fieldCursor->next;
}
void msqlFreeResult(m_result *result) {
if (result) {
/* Clears fields */
free(result->fieldData);
result->cursor = result->queryData;
while (result->cursor) {
int c;
m_row m = result->cursor->data;
for (c = 0; m[c]; c++)
free(m[c]);
void
msqlFreeResult(m_result * result)
{
if (result)
{
/* Clears fields */
free(result->fieldData);
result->cursor = result->queryData;
while (result->cursor)
{
int c;
m_row m = result->cursor->data;
result->cursor = result->cursor->next;
}
free(result->queryData);
free(result);
}
for (c = 0; m[c]; c++)
free(m[c]);
result->cursor = result->cursor->next;
}
free(result->queryData);
free(result);
}
}
m_row msqlFetchRow(m_result *row) {
m_data *r = row->cursor;
if (r) {
row->cursor = row->cursor->next;
return (m_row)r->data;
}
return (m_row)NULL;
m_row
msqlFetchRow(m_result * row)
{
m_data *r = row->cursor;
if (r)
{
row->cursor = row->cursor->next;
return (m_row) r->data;
}
return (m_row) NULL;
}
m_seq *msqlGetSequenceInfo(int a, char *b) {
m_seq *
msqlGetSequenceInfo(int a, char *b)
{
}
m_field *msqlFetchField (m_result *mr) {
m_field *m = (m_field*)mr->fieldCursor;
if (m) {
mr->fieldCursor = mr->fieldCursor->next;
return m;
}
return NULL;
m_field *
msqlFetchField(m_result * mr)
{
m_field *m = (m_field *) mr->fieldCursor;
if (m)
{
mr->fieldCursor = mr->fieldCursor->next;
return m;
}
return NULL;
}
m_result *msqlListDBs(int a) {
m_result *m;
if (msqlQuery(a, "select datname from pg_database") > 0) {
m = msqlStoreResult();
return m;
} else return NULL;
m_result *
msqlListDBs(int a)
{
m_result *m;
if (msqlQuery(a, "select datname from pg_database") > 0)
{
m = msqlStoreResult();
return m;
}
else
return NULL;
}
m_result *msqlListTables(int a) {
m_result *m;
char tbuf[BUFSIZ];
sprintf(tbuf, "select relname from pg_class where relkind='r' and relowner=%d", getuid());
if (msqlQuery(a, tbuf) > 0) {
m = msqlStoreResult();
return m;
} else return NULL;
m_result *
msqlListTables(int a)
{
m_result *m;
char tbuf[BUFSIZ];
sprintf(tbuf, "select relname from pg_class where relkind='r' and relowner=%d", getuid());
if (msqlQuery(a, tbuf) > 0)
{
m = msqlStoreResult();
return m;
}
else
return NULL;
}
m_result *msqlListFields(int a, char *b) {
m_result *
msqlListFields(int a, char *b)
{
}
m_result *msqlListIndex(int a, char *b, char *c) {
m_result *m;
char tbuf[BUFSIZ];
sprintf(tbuf, "select relname from pg_class where relkind='i' and relowner=%d", getuid());
if (msqlQuery(a, tbuf) > 0) {
m = msqlStoreResult();
return m;
} else return NULL;
m_result *
msqlListIndex(int a, char *b, char *c)
{
m_result *m;
char tbuf[BUFSIZ];
sprintf(tbuf, "select relname from pg_class where relkind='i' and relowner=%d", getuid());
if (msqlQuery(a, tbuf) > 0)
{
m = msqlStoreResult();
return m;
}
else
return NULL;
}
m_result *msqlStoreResult(void) {
if (queryres) {
m_result *mr = malloc(sizeof(m_result));
m_fdata *mf;
m_data *md;
int count;
mr->queryData = mr->cursor = NULL;
mr->numRows = PQntuples(queryres);
mr->numFields = PQnfields(queryres);
mf = calloc(PQnfields(queryres), sizeof(m_fdata));
for (count = 0; count < PQnfields(queryres); count++) {
(m_fdata*)(mf+count)->field.name = strdup(PQfname(queryres, count));
(m_fdata*)(mf+count)->field.table = tfrom;
(m_fdata*)(mf+count)->field.type = CHAR_TYPE;
(m_fdata*)(mf+count)->field.length = PQfsize(queryres, count);
(m_fdata*)(mf+count)->next = (m_fdata*)(mf+count+1);
}
(m_fdata*)(mf+count-1)->next = NULL;
md = calloc(PQntuples(queryres), sizeof(m_data));
for (count = 0; count < PQntuples(queryres); count++) {
m_row rows = calloc(PQnfields(queryres)*sizeof(m_row)+1, 1);
int c;
for (c = 0; c < PQnfields(queryres); c++) {
rows[c] = strdup(PQgetvalue(queryres, count, c));
}
(m_data*)(md+count)->data = rows;
(m_data*)(md+count)->width = PQnfields(queryres);
(m_data*)(md+count)->next = (m_data*)(md+count+1);
}
(m_data*)(md+count-1)->next = NULL;
mr->queryData = mr->cursor = md;
mr->fieldCursor = mr->fieldData = mf;
m_result *
msqlStoreResult(void)
{
if (queryres)
{
m_result *mr = malloc(sizeof(m_result));
m_fdata *mf;
m_data *md;
int count;
return mr;
} else return NULL;
mr->queryData = mr->cursor = NULL;
mr->numRows = PQntuples(queryres);
mr->numFields = PQnfields(queryres);
mf = calloc(PQnfields(queryres), sizeof(m_fdata));
for (count = 0; count < PQnfields(queryres); count++)
{
(m_fdata *) (mf + count)->field.name = strdup(PQfname(queryres, count));
(m_fdata *) (mf + count)->field.table = tfrom;
(m_fdata *) (mf + count)->field.type = CHAR_TYPE;
(m_fdata *) (mf + count)->field.length = PQfsize(queryres, count);
(m_fdata *) (mf + count)->next = (m_fdata *) (mf + count + 1);
}
(m_fdata *) (mf + count - 1)->next = NULL;
md = calloc(PQntuples(queryres), sizeof(m_data));
for (count = 0; count < PQntuples(queryres); count++)
{
m_row rows = calloc(PQnfields(queryres) * sizeof(m_row) + 1, 1);
int c;
for (c = 0; c < PQnfields(queryres); c++)
rows[c] = strdup(PQgetvalue(queryres, count, c));
(m_data *) (md + count)->data = rows;
(m_data *) (md + count)->width = PQnfields(queryres);
(m_data *) (md + count)->next = (m_data *) (md + count + 1);
}
(m_data *) (md + count - 1)->next = NULL;
mr->queryData = mr->cursor = md;
mr->fieldCursor = mr->fieldData = mf;
return mr;
}
else
return NULL;
}
time_t msqlDateToUnixTime(char *a) {
time_t
msqlDateToUnixTime(char *a)
{
}
time_t msqlTimeToUnixTime(char *b) {
time_t
msqlTimeToUnixTime(char *b)
{
}
char *msql_tmpnam(void) {
return tmpnam("/tmp/msql.XXXXXX");
char *
msql_tmpnam(void)
{
return tmpnam("/tmp/msql.XXXXXX");
}
int msqlLoadConfigFile(char *a) {
int
msqlLoadConfigFile(char *a)
{
}

View File

@ -23,6 +23,7 @@ extern int assertTest(int val);
#ifdef ASSERT_CHECKING_TEST
extern int assertEnable(int val);
#endif
int
@ -68,14 +69,15 @@ assert_test(int val)
{
return assertTest(val);
}
#endif
/* end of file */
/*
* Local variables:
* tab-width: 4
* c-indent-level: 4
* c-basic-offset: 4
* tab-width: 4
* c-indent-level: 4
* c-basic-offset: 4
* End:
*/

View File

@ -7,16 +7,18 @@ int unlisten(char *relname);
int max(int x, int y);
int min(int x, int y);
int assert_enable(int val);
#ifdef ASSERT_CHECKING_TEST
int assert_test(int val);
#endif
#endif
/*
* Local variables:
* tab-width: 4
* c-indent-level: 4
* c-basic-offset: 4
* tab-width: 4
* c-indent-level: 4
* c-basic-offset: 4
* End:
*/

View File

@ -12,7 +12,7 @@ HeapTuple noup(void);
* noup () -- revoke permission on column
*
* Though it's called without args You have to specify referenced
* table/column while creating trigger:
* table/column while creating trigger:
* EXECUTE PROCEDURE noup ('col').
*/
@ -98,8 +98,8 @@ noup()
*/
if (!isnull)
{
elog(WARN, "%s: update not allowed", args[i] );
elog(WARN, "%s: update not allowed", args[i]);
SPI_finish();
return NULL;
}

View File

@ -1,5 +1,5 @@
#undef PORTNAME
#define PORTNAME OS2
#undef PORTNAME
#define PORTNAME OS2
/*-------------------------------------------------------------------------
*
* c.h--
@ -9,7 +9,7 @@
*
* Copyright (c) 1994, Regents of the University of California
*
* $Id: c.h,v 1.1 1998/10/31 04:10:53 scrappy Exp $
* $Id: c.h,v 1.2 1999/05/25 16:06:01 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -65,7 +65,8 @@
#ifndef __cplusplus
#ifndef bool
typedef char bool;
#endif /* ndef bool */
#endif /* ndef bool */
#endif /* not C++ */
typedef bool *BoolPtr;

View File

@ -1,17 +1,17 @@
#ifndef TCPIPV4
# define TCPIPV4
#endif
#define TCPIPV4
#endif /* */
#ifndef MAXSOCKETS
#define MAXSOCKETS 2048
#endif
#endif /* */
/*
* DEF_PGPORT is the TCP port number on which the Postmaster listens by
* default. This can be overriden by command options, environment variables,
* and the postconfig hook. (set by build script)
*/
/*
* DEF_PGPORT is the TCP port number on which the Postmaster listens by
* default. This can be overriden by command options, environment variables,
* and the postconfig hook. (set by build script)
*/
#define DEF_PGPORT "5432"

View File

@ -206,28 +206,30 @@ on_error_continue()
** get_result
**
*/
PGresult *get_result()
PGresult *
get_result()
{
char *cmdstatus = PQcmdStatus(res);
char *cmdstatus = PQcmdStatus(res);
was_get_unset_result = TRUE;
/* we have to store the fetch location somewhere */
cmdstatus[0] = NUL;
memcpy(&cmdstatus[1],&tuple, sizeof(tuple));
memcpy(&cmdstatus[1], &tuple, sizeof(tuple));
return res;
}
/*
**
** set_result
**
*/
void set_result(PGresult *newres)
void
set_result(PGresult *newres)
{
char *cmdstatus = PQcmdStatus(res);
char *cmdstatus = PQcmdStatus(res);
if (newres == NULL)
halt("set_result called with null result pointer\n");
@ -256,9 +258,10 @@ void set_result(PGresult *newres)
** unset_result
**
*/
void unset_result(PGresult *oldres)
void
unset_result(PGresult *oldres)
{
char *cmdstatus = PQcmdStatus(oldres);
char *cmdstatus = PQcmdStatus(oldres);
if (oldres == NULL)
halt("unset_result called with null result pointer\n");
@ -277,8 +280,8 @@ void unset_result(PGresult *oldres)
** reset_fetch
**
*/
void reset_fetch()
void
reset_fetch()
{
tuple = 0;
}

View File

@ -11,8 +11,8 @@ int fetchwithnulls(void *param,...);
void on_error_continue();
void on_error_stop();
PGresult *get_result();
void set_result(PGresult *newres);
void unset_result(PGresult *oldres);
void reset_fetch();
void set_result(PGresult *newres);
void unset_result(PGresult *oldres);
void reset_fetch();
#define END_OF_TUPLES (-1)

View File

@ -7,7 +7,7 @@ a modification datetime stamp in a record when that record is UPDATEd.
Credits
This is 95%+ based on autoinc.c, which I used as a starting point as I do
not really know what I am doing. I also had help from
not really know what I am doing. I also had help from
Jan Wieck <jwieck@debis.com> who told me about the datetime_in("now") function.
OH, me, I'm Terry Mackintosh <terry@terrym.com>
*/
@ -17,17 +17,18 @@ OH, me, I'm Terry Mackintosh <terry@terrym.com>
HeapTuple moddatetime(void);
HeapTuple moddatetime()
HeapTuple
moddatetime()
{
Trigger *trigger; /* to get trigger name */
Trigger *trigger; /* to get trigger name */
int nargs; /* # of arguments */
int attnum; /* positional number of field to change */
Datum newdt; /* The current datetime. */
char **args; /* arguments */
char *relname; /* triggered relation name */
Relation rel; /* triggered relation */
Datum newdt; /* The current datetime. */
char **args; /* arguments */
char *relname; /* triggered relation name */
Relation rel; /* triggered relation */
HeapTuple rettuple = NULL;
TupleDesc tupdesc; /* tuple description */
TupleDesc tupdesc; /* tuple description */
if (!CurrentTriggerData)
elog(ERROR, "moddatetime: triggers are not initialized.");
@ -65,28 +66,31 @@ HeapTuple moddatetime()
/* Get the current datetime. */
newdt = datetime_in("now");
/* This gets the position in the turple of the field we want.
args[0] being the name of the field to update, as passed in
from the trigger.
*/
/*
* This gets the position in the turple of the field we want. args[0]
* being the name of the field to update, as passed in from the
* trigger.
*/
attnum = SPI_fnumber(tupdesc, args[0]);
/* This is were we check to see if the feild we are suppost to update even
exits. The above function must return -1 if name not found?
*/
/*
* This is were we check to see if the feild we are suppost to update
* even exits. The above function must return -1 if name not found?
*/
if (attnum < 0)
elog(ERROR, "moddatetime (%s): there is no attribute %s", relname,
args[0]);
/* OK, this is where we make sure the datetime field that we are
modifying is really a datetime field.
Hay, error checking, what a novel idea !-)
*/
if (SPI_gettypeid(tupdesc, attnum) != DATETIMEOID )
args[0]);
/*
* OK, this is where we make sure the datetime field that we are
* modifying is really a datetime field. Hay, error checking, what a
* novel idea !-)
*/
if (SPI_gettypeid(tupdesc, attnum) != DATETIMEOID)
elog(ERROR, "moddatetime (%s): attribute %s must be of DATETIME type",
relname, args[0]);
/* 1 is the number of items in the arrays attnum and newdt.
/* 1 is the number of items in the arrays attnum and newdt.
attnum is the positional number of the field to be updated.
newdt is the new datetime stamp.
NOTE that attnum and newdt are not arrays, but then a 1 ellement array

View File

@ -1,24 +1,27 @@
#include <stdio.h>
char *strtoupper(char *string)
char *
strtoupper(char *string)
{
int i ;
for (i=0;i<strlen(string);i++)
{
string[i]=toupper(string[i]);
}
return string;
int i;
for (i = 0; i < strlen(string); i++)
string[i] = toupper(string[i]);
return string;
}
void main ( char argc , char **argv )
void
main(char argc, char **argv)
{
char str[250];
int sw = 0 ;
while ( fgets (str,240,stdin) )
{
if ( sw == 0 ) printf("%s",strtoupper(str));
}
char str[250];
int sw = 0;
while (fgets(str, 240, stdin))
{
if (sw == 0)
printf("%s", strtoupper(str));
}
}

View File

@ -60,8 +60,8 @@ check_primary_key()
/*
* Some checks first...
*/
#ifdef DEBUG_QUERY
elog(NOTICE,"Check_primary_key Enter Function");
#ifdef DEBUG_QUERY
elog(NOTICE, "Check_primary_key Enter Function");
#endif
/* Called by trigger manager ? */
if (!CurrentTriggerData)
@ -228,7 +228,7 @@ check_foreign_key()
Trigger *trigger; /* to get trigger name */
int nargs; /* # of args specified in CREATE TRIGGER */
char **args; /* arguments: as described above */
char **args_temp ;
char **args_temp;
int nrefs; /* number of references (== # of plans) */
char action; /* 'R'estrict | 'S'etnull | 'C'ascade */
int nkeys; /* # of key columns */
@ -244,13 +244,15 @@ check_foreign_key()
bool isequal = true; /* are keys in both tuples equal (in
* UPDATE) */
char ident[2 * NAMEDATALEN]; /* to identify myself */
int is_update=0;
int is_update = 0;
int ret;
int i,
r;
#ifdef DEBUG_QUERY
elog(NOTICE,"Check_foreign_key Enter Function");
#ifdef DEBUG_QUERY
elog(NOTICE, "Check_foreign_key Enter Function");
#endif
/*
* Some checks first...
*/
@ -275,12 +277,12 @@ check_foreign_key()
* key in tg_newtuple is the same as in tg_trigtuple then nothing to
* do.
*/
is_update=0;
is_update = 0;
if (TRIGGER_FIRED_BY_UPDATE(CurrentTriggerData->tg_event))
{
{
newtuple = CurrentTriggerData->tg_newtuple;
is_update=1;
}
is_update = 1;
}
trigger = CurrentTriggerData->tg_trigger;
nargs = trigger->tgnargs;
args = trigger->tgargs;
@ -288,7 +290,7 @@ check_foreign_key()
if (nargs < 5) /* nrefs, action, key, Relation, key - at
* least */
elog(ERROR, "check_foreign_key: too short %d (< 5) list of arguments", nargs);
nrefs = pg_atoi(args[0], sizeof(int), 0);
if (nrefs < 1)
elog(ERROR, "check_foreign_key: %d (< 1) number of references specified", nrefs);
@ -386,7 +388,7 @@ check_foreign_key()
if (plan->nplans <= 0) /* Get typeId of column */
argtypes[i] = SPI_gettypeid(tupdesc, fnumber);
}
args_temp = args;
args_temp = args;
nargs -= nkeys;
args += nkeys;
@ -397,13 +399,14 @@ check_foreign_key()
{
void *pplan;
char sql[8192];
char **args2 = args ;
char **args2 = args;
plan->splan = (void **) malloc(nrefs * sizeof(void *));
for (r = 0; r < nrefs; r++)
{
relname = args2[0];
/*
* For 'R'estrict action we construct SELECT query - SELECT 1
* FROM _referencing_relation_ WHERE Fkey1 = $1 [AND Fkey2 =
@ -417,50 +420,59 @@ check_foreign_key()
* For 'C'ascade action we construct DELETE query - DELETE
* FROM _referencing_relation_ WHERE Fkey1 = $1 [AND Fkey2 =
* $2 [...]] - to delete all referencing tuples.
*/
/*Max : Cascade with UPDATE query i create update query that
updates new key values in referenced tables
*/
else if (action == 'c'){
if (is_update == 1)
{
int fn;
char *nv;
int k ;
sprintf(sql, "update %s set ", relname);
for (k = 1; k <= nkeys; k++)
*/
/*
* Max : Cascade with UPDATE query i create update query that
* updates new key values in referenced tables
*/
else if (action == 'c')
{
if (is_update == 1)
{
int is_char_type =0;
char *type;
fn = SPI_fnumber(tupdesc, args_temp[k-1]);
nv = SPI_getvalue(newtuple, tupdesc, fn);
type=SPI_gettype(tupdesc,fn);
if ( (strcmp(type,"text") && strcmp (type,"varchar") &&
strcmp(type,"char") && strcmp (type,"bpchar") &&
strcmp(type,"date") && strcmp (type,"datetime")) == 0 )
is_char_type=1;
#ifdef DEBUG_QUERY
elog(NOTICE,"Check_foreign_key Debug value %s type %s %d",
nv,type,is_char_type);
int fn;
char *nv;
int k;
sprintf(sql, "update %s set ", relname);
for (k = 1; k <= nkeys; k++)
{
int is_char_type = 0;
char *type;
fn = SPI_fnumber(tupdesc, args_temp[k - 1]);
nv = SPI_getvalue(newtuple, tupdesc, fn);
type = SPI_gettype(tupdesc, fn);
if ((strcmp(type, "text") && strcmp(type, "varchar") &&
strcmp(type, "char") && strcmp(type, "bpchar") &&
strcmp(type, "date") && strcmp(type, "datetime")) == 0)
is_char_type = 1;
#ifdef DEBUG_QUERY
elog(NOTICE, "Check_foreign_key Debug value %s type %s %d",
nv, type, is_char_type);
#endif
/* is_char_type =1 i set ' ' for define a new value
*/
sprintf(sql + strlen(sql), " %s = %s%s%s %s ",
args2[k], (is_char_type>0) ? "'" :"" ,
nv, (is_char_type >0) ? "'" :"",(k < nkeys) ? ", " : "");
is_char_type=0;
/*
* is_char_type =1 i set ' ' for define a new
* value
*/
sprintf(sql + strlen(sql), " %s = %s%s%s %s ",
args2[k], (is_char_type > 0) ? "'" : "",
nv, (is_char_type > 0) ? "'" : "", (k < nkeys) ? ", " : "");
is_char_type = 0;
}
strcat(sql, " where ");
}
strcat(sql, " where ");
}
else /* DELETE */
sprintf(sql, "delete from %s where ", relname);
}
else
/* DELETE */
sprintf(sql, "delete from %s where ", relname);
}
/*
* For 'S'etnull action we construct UPDATE query - UPDATE
* _referencing_relation_ SET Fkey1 null [, Fkey2 null [...]]
@ -500,15 +512,15 @@ check_foreign_key()
elog(ERROR, "check_foreign_key: SPI_saveplan returned %d", SPI_result);
plan->splan[r] = pplan;
args2 += nkeys + 1; /* to the next relation */
}
plan->nplans = nrefs;
#ifdef DEBUG_QUERY
elog(NOTICE,"Check_foreign_key Debug Query is : %s ", sql);
#ifdef DEBUG_QUERY
elog(NOTICE, "Check_foreign_key Debug Query is : %s ", sql);
#endif
}
/*
* If UPDATE and key is not changed ...
*/

View File

@ -352,14 +352,15 @@ c_charin(char *str)
{
return (string_input(str, 1, 0, NULL));
}
#endif
/* end of file */
/*
* Local variables:
* tab-width: 4
* c-indent-level: 4
* c-basic-offset: 4
* tab-width: 4
* c-indent-level: 4
* c-basic-offset: 4
* End:
*/

View File

@ -14,14 +14,15 @@ char *c_varcharout(char *s);
#if 0
struct varlena *c_textin(char *str);
char *c_char16in(char *str);
#endif
#endif
/*
* Local variables:
* tab-width: 4
* c-indent-level: 4
* c-basic-offset: 4
* tab-width: 4
* c-indent-level: 4
* c-basic-offset: 4
* End:
*/

View File

@ -29,7 +29,7 @@ user_lock(unsigned int id1, unsigned int id2, LOCKMODE lockmode)
LOCKTAG tag;
memset(&tag, 0, sizeof(LOCKTAG));
tag.dbId = MyDatabaseId;
tag.dbId = MyDatabaseId;
tag.relId = 0;
tag.tupleId.ip_blkid.bi_hi = id2 >> 16;
tag.tupleId.ip_blkid.bi_lo = id2 & 0xffff;
@ -44,7 +44,7 @@ user_unlock(unsigned int id1, unsigned int id2, LOCKMODE lockmode)
LOCKTAG tag;
memset(&tag, 0, sizeof(LOCKTAG));
tag.dbId = MyDatabaseId;
tag.dbId = MyDatabaseId;
tag.relId = 0;
tag.tupleId.ip_blkid.bi_hi = id2 >> 16;
tag.tupleId.ip_blkid.bi_lo = id2 & 0xffff;
@ -99,8 +99,8 @@ user_unlock_all()
/*
* Local variables:
* tab-width: 4
* c-indent-level: 4
* c-basic-offset: 4
* tab-width: 4
* c-indent-level: 4
* c-basic-offset: 4
* End:
*/

View File

@ -13,8 +13,8 @@ int user_unlock_all(void);
/*
* Local variables:
* tab-width: 4
* c-indent-level: 4
* c-basic-offset: 4
* tab-width: 4
* c-indent-level: 4
* c-basic-offset: 4
* End:
*/

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/contrib/vacuumlo/vacuumlo.c,v 1.1 1999/04/10 16:48:05 peter Exp $
* $Header: /cvsroot/pgsql/contrib/vacuumlo/vacuumlo.c,v 1.2 1999/05/25 16:06:31 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -25,177 +25,187 @@
#define BUFSIZE 1024
int vacuumlo(char *,int);
int vacuumlo(char *, int);
/*
* This vacuums a database. It returns 1 on success, -1 on failure.
*/
int vacuumlo(char *database,int verbose)
int
vacuumlo(char *database, int verbose)
{
PGconn *conn;
PGresult *res, *res2;
char buf[BUFSIZE];
int matched=0; /* Number matched per scan */
int i;
conn = PQsetdb(NULL, NULL, NULL, NULL, database);
/* check to see that the backend connection was successfully made */
if (PQstatus(conn) == CONNECTION_BAD)
{
fprintf(stderr, "Connection to database '%s' failed.\n", database);
fprintf(stderr, "%s", PQerrorMessage(conn));
return -1;
}
if(verbose)
fprintf(stdout,"Connected to %s\n",database);
/*
* First we create and populate the lo temp table
*/
buf[0]='\0';
strcat(buf,"SELECT oid AS lo ");
strcat(buf,"INTO TEMP TABLE vacuum_l ");
strcat(buf,"FROM pg_class ");
strcat(buf,"WHERE relkind='l'");
if(!(res = PQexec(conn,buf))) {
fprintf(stderr,"Failed to create temp table.\n");
PQfinish(conn);
return -1;
}
PQclear(res);
/*
* Now find any candidate tables who have columns of type oid (the column
* oid is ignored, as it has attnum < 1)
*/
buf[0]='\0';
strcat(buf,"SELECT c.relname, a.attname ");
strcat(buf,"FROM pg_class c, pg_attribute a, pg_type t ");
strcat(buf,"WHERE a.attnum > 0 ");
strcat(buf," AND a.attrelid = c.oid ");
strcat(buf," AND a.atttypid = t.oid ");
strcat(buf," AND t.typname = 'oid' ");
strcat(buf," AND c.relname NOT LIKE 'pg_%'");
if(!(res = PQexec(conn,buf))) {
fprintf(stderr,"Failed to create temp table.\n");
PQfinish(conn);
return -1;
}
for(i=0;i<PQntuples(res);i++)
{
char *table,*field;
table = PQgetvalue(res,i,0);
field = PQgetvalue(res,i,1);
if(verbose) {
fprintf(stdout,"Checking %s in %s: ",field,table);
fflush(stdout);
}
res2 = PQexec(conn, "begin");
PQclear(res2);
buf[0] = '\0';
strcat(buf,"DELETE FROM vacuum_l ");
strcat(buf,"WHERE lo IN (");
strcat(buf,"SELECT ");
strcat(buf,field);
strcat(buf," FROM ");
strcat(buf,table);
strcat(buf,");");
if(!(res2 = PQexec(conn,buf))) {
fprintf(stderr,"Failed to check %s in table %s\n",field,table);
PQclear(res);
PQfinish(conn);
return -1;
}
if(PQresultStatus(res2)!=PGRES_COMMAND_OK) {
fprintf(stderr,
"Failed to check %s in table %s\n%s\n",
field,table,
PQerrorMessage(conn)
);
PQclear(res2);
PQclear(res);
PQfinish(conn);
return -1;
}
PQclear(res2);
PGconn *conn;
PGresult *res,
*res2;
char buf[BUFSIZE];
int matched = 0; /* Number matched per scan */
int i;
res2 = PQexec(conn, "end");
PQclear(res2);
}
PQclear(res);
/* Start the transaction */
res = PQexec(conn, "begin");
PQclear(res);
/*
* Finally, those entries remaining in vacuum_l are orphans.
*/
buf[0]='\0';
strcat(buf,"SELECT lo ");
strcat(buf,"FROM vacuum_l");
if(!(res = PQexec(conn,buf))) {
fprintf(stderr,"Failed to read temp table.\n");
PQfinish(conn);
return -1;
}
matched=PQntuples(res);
for(i=0;i<matched;i++)
conn = PQsetdb(NULL, NULL, NULL, NULL, database);
/* check to see that the backend connection was successfully made */
if (PQstatus(conn) == CONNECTION_BAD)
{
Oid lo = (Oid) atoi(PQgetvalue(res,i,0));
if(verbose) {
fprintf(stdout,"\rRemoving lo %6d \n",lo);
fflush(stdout);
}
if(lo_unlink(conn,lo)<0) {
fprintf(stderr,"Failed to remove lo %d\n",lo);
}
fprintf(stderr, "Connection to database '%s' failed.\n", database);
fprintf(stderr, "%s", PQerrorMessage(conn));
return -1;
}
PQclear(res);
/*
* That's all folks!
*/
res = PQexec(conn, "end");
PQclear(res);
PQfinish(conn);
if(verbose)
fprintf(stdout,"\rRemoved %d large objects from %s.\n",matched,database);
return 0;
if (verbose)
fprintf(stdout, "Connected to %s\n", database);
/*
* First we create and populate the lo temp table
*/
buf[0] = '\0';
strcat(buf, "SELECT oid AS lo ");
strcat(buf, "INTO TEMP TABLE vacuum_l ");
strcat(buf, "FROM pg_class ");
strcat(buf, "WHERE relkind='l'");
if (!(res = PQexec(conn, buf)))
{
fprintf(stderr, "Failed to create temp table.\n");
PQfinish(conn);
return -1;
}
PQclear(res);
/*
* Now find any candidate tables who have columns of type oid (the
* column oid is ignored, as it has attnum < 1)
*/
buf[0] = '\0';
strcat(buf, "SELECT c.relname, a.attname ");
strcat(buf, "FROM pg_class c, pg_attribute a, pg_type t ");
strcat(buf, "WHERE a.attnum > 0 ");
strcat(buf, " AND a.attrelid = c.oid ");
strcat(buf, " AND a.atttypid = t.oid ");
strcat(buf, " AND t.typname = 'oid' ");
strcat(buf, " AND c.relname NOT LIKE 'pg_%'");
if (!(res = PQexec(conn, buf)))
{
fprintf(stderr, "Failed to create temp table.\n");
PQfinish(conn);
return -1;
}
for (i = 0; i < PQntuples(res); i++)
{
char *table,
*field;
table = PQgetvalue(res, i, 0);
field = PQgetvalue(res, i, 1);
if (verbose)
{
fprintf(stdout, "Checking %s in %s: ", field, table);
fflush(stdout);
}
res2 = PQexec(conn, "begin");
PQclear(res2);
buf[0] = '\0';
strcat(buf, "DELETE FROM vacuum_l ");
strcat(buf, "WHERE lo IN (");
strcat(buf, "SELECT ");
strcat(buf, field);
strcat(buf, " FROM ");
strcat(buf, table);
strcat(buf, ");");
if (!(res2 = PQexec(conn, buf)))
{
fprintf(stderr, "Failed to check %s in table %s\n", field, table);
PQclear(res);
PQfinish(conn);
return -1;
}
if (PQresultStatus(res2) != PGRES_COMMAND_OK)
{
fprintf(stderr,
"Failed to check %s in table %s\n%s\n",
field, table,
PQerrorMessage(conn)
);
PQclear(res2);
PQclear(res);
PQfinish(conn);
return -1;
}
PQclear(res2);
res2 = PQexec(conn, "end");
PQclear(res2);
}
PQclear(res);
/* Start the transaction */
res = PQexec(conn, "begin");
PQclear(res);
/*
* Finally, those entries remaining in vacuum_l are orphans.
*/
buf[0] = '\0';
strcat(buf, "SELECT lo ");
strcat(buf, "FROM vacuum_l");
if (!(res = PQexec(conn, buf)))
{
fprintf(stderr, "Failed to read temp table.\n");
PQfinish(conn);
return -1;
}
matched = PQntuples(res);
for (i = 0; i < matched; i++)
{
Oid lo = (Oid) atoi(PQgetvalue(res, i, 0));
if (verbose)
{
fprintf(stdout, "\rRemoving lo %6d \n", lo);
fflush(stdout);
}
if (lo_unlink(conn, lo) < 0)
fprintf(stderr, "Failed to remove lo %d\n", lo);
}
PQclear(res);
/*
* That's all folks!
*/
res = PQexec(conn, "end");
PQclear(res);
PQfinish(conn);
if (verbose)
fprintf(stdout, "\rRemoved %d large objects from %s.\n", matched, database);
return 0;
}
int
main(int argc, char **argv)
{
int verbose = 0;
int arg;
int rc=0;
if (argc < 2)
int verbose = 0;
int arg;
int rc = 0;
if (argc < 2)
{
fprintf(stderr, "Usage: %s [-v] database_name [db2 ... dbn]\n",
argv[0]);
exit(1);
fprintf(stderr, "Usage: %s [-v] database_name [db2 ... dbn]\n",
argv[0]);
exit(1);
}
for(arg=1;arg<argc;arg++) {
if(strcmp("-v",argv[arg])==0)
verbose=!verbose;
else
rc += vacuumlo(argv[arg],verbose);
}
return rc;
for (arg = 1; arg < argc; arg++)
{
if (strcmp("-v", argv[arg]) == 0)
verbose = !verbose;
else
rc += vacuumlo(argv[arg], verbose);
}
return rc;
}

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/common/heaptuple.c,v 1.50 1999/03/14 20:17:20 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/common/heaptuple.c,v 1.51 1999/05/25 16:06:35 momjian Exp $
*
* NOTES
* The old interface functions have been converted to macros
@ -124,7 +124,7 @@ DataFill(char *data,
*bitP |= bitmask;
}
data = (char *)att_align((long)data, att[i]->attlen, att[i]->attalign);
data = (char *) att_align((long) data, att[i]->attlen, att[i]->attalign);
switch (att[i]->attlen)
{
case -1:
@ -151,7 +151,7 @@ DataFill(char *data,
att[i]->attlen);
break;
}
data = (char *)att_addlength((long)data, att[i]->attlen, value[i]);
data = (char *) att_addlength((long) data, att[i]->attlen, value[i]);
}
}
@ -210,7 +210,7 @@ heap_attisnull(HeapTuple tup, int attnum)
int
heap_sysattrlen(AttrNumber attno)
{
HeapTupleHeader f = NULL;
HeapTupleHeader f = NULL;
switch (attno)
{
@ -301,6 +301,7 @@ heap_getsysattr(HeapTuple tup, Buffer b, int attnum)
}
return (Datum) NULL;
}
#endif
/* ----------------
@ -328,11 +329,11 @@ nocachegetattr(HeapTuple tuple,
TupleDesc tupleDesc,
bool *isnull)
{
char *tp; /* ptr to att in tuple */
HeapTupleHeader tup = tuple->t_data;
bits8 *bp = tup->t_bits; /* ptr to att in tuple */
Form_pg_attribute *att = tupleDesc->attrs;
int slow = 0; /* do we have to walk nulls? */
char *tp; /* ptr to att in tuple */
HeapTupleHeader tup = tuple->t_data;
bits8 *bp = tup->t_bits; /* ptr to att in tuple */
Form_pg_attribute *att = tupleDesc->attrs;
int slow = 0; /* do we have to walk nulls? */
#if IN_MACRO
@ -376,6 +377,7 @@ nocachegetattr(HeapTuple tuple,
}
else
{
/*
* there's a null somewhere in the tuple
*/
@ -404,12 +406,13 @@ nocachegetattr(HeapTuple tuple,
int finalbit = attnum & 0x07;
/* check for nulls "before" final bit of last byte */
if ((~ bp[byte]) & ((1 << finalbit) - 1))
if ((~bp[byte]) & ((1 << finalbit) - 1))
slow = 1;
else
{
/* check for nulls in any "earlier" bytes */
int i;
int i;
for (i = 0; i < byte; i++)
{
if (bp[i] != 0xFF)
@ -439,6 +442,7 @@ nocachegetattr(HeapTuple tuple,
else if (!HeapTupleAllFixed(tuple))
{
int j;
/*
* In for(), we make this <= and not < because we want to test
* if we can go past it in initializing offsets.
@ -456,9 +460,9 @@ nocachegetattr(HeapTuple tuple,
/*
* If slow is zero, and we got here, we know that we have a tuple with
* no nulls or varlenas before the target attribute.
* If possible, we also want to initialize the remainder of the
* attribute cached offset values.
* no nulls or varlenas before the target attribute. If possible, we
* also want to initialize the remainder of the attribute cached
* offset values.
*/
if (!slow)
{
@ -570,7 +574,7 @@ heap_copytuple(HeapTuple tuple)
newTuple->t_len = tuple->t_len;
newTuple->t_self = tuple->t_self;
newTuple->t_data = (HeapTupleHeader) ((char *) newTuple + HEAPTUPLESIZE);
memmove((char *) newTuple->t_data,
memmove((char *) newTuple->t_data,
(char *) tuple->t_data, (int) tuple->t_len);
return newTuple;
}
@ -589,11 +593,11 @@ heap_copytuple_with_tuple(HeapTuple src, HeapTuple dest)
dest->t_data = NULL;
return;
}
dest->t_len = src->t_len;
dest->t_self = src->t_self;
dest->t_data = (HeapTupleHeader) palloc(src->t_len);
memmove((char *) dest->t_data,
memmove((char *) dest->t_data,
(char *) src->t_data, (int) src->t_len);
return;
}
@ -657,14 +661,14 @@ heap_formtuple(TupleDesc tupleDescriptor,
Datum *value,
char *nulls)
{
HeapTuple tuple; /* return tuple */
HeapTupleHeader td; /* tuple data */
int bitmaplen;
long len;
int hoff;
bool hasnull = false;
int i;
int numberOfAttributes = tupleDescriptor->natts;
HeapTuple tuple; /* return tuple */
HeapTupleHeader td; /* tuple data */
int bitmaplen;
long len;
int hoff;
bool hasnull = false;
int i;
int numberOfAttributes = tupleDescriptor->natts;
len = offsetof(HeapTupleHeaderData, t_bits);
@ -760,9 +764,9 @@ heap_modifytuple(HeapTuple tuple,
if (repl[attoff] == ' ')
{
value[attoff] = heap_getattr(tuple,
AttrOffsetGetAttrNumber(attoff),
RelationGetDescr(relation),
&isNull);
AttrOffsetGetAttrNumber(attoff),
RelationGetDescr(relation),
&isNull);
nulls[attoff] = (isNull) ? 'n' : ' ';
}
@ -790,12 +794,12 @@ heap_modifytuple(HeapTuple tuple,
infomask = newTuple->t_data->t_infomask;
memmove((char *) &newTuple->t_data->t_oid, /* XXX */
(char *) &tuple->t_data->t_oid,
((char *) &tuple->t_data->t_hoff -
(char *) &tuple->t_data->t_oid)); /* XXX */
((char *) &tuple->t_data->t_hoff -
(char *) &tuple->t_data->t_oid)); /* XXX */
newTuple->t_data->t_infomask = infomask;
newTuple->t_data->t_natts = numberOfAttributes;
newTuple->t_self = tuple->t_self;
return newTuple;
}
@ -809,10 +813,10 @@ heap_addheader(uint32 natts, /* max domain index */
int structlen, /* its length */
char *structure) /* pointer to the struct */
{
HeapTuple tuple;
HeapTupleHeader td; /* tuple data */
long len;
int hoff;
HeapTuple tuple;
HeapTupleHeader td; /* tuple data */
long len;
int hoff;
AssertArg(natts > 0);

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/common/printtup.c,v 1.45 1999/05/10 00:44:50 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/common/printtup.c,v 1.46 1999/05/25 16:06:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -25,9 +25,9 @@
#include "libpq/pqformat.h"
#include "utils/syscache.h"
static void printtup_setup(DestReceiver* self, TupleDesc typeinfo);
static void printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver* self);
static void printtup_cleanup(DestReceiver* self);
static void printtup_setup(DestReceiver * self, TupleDesc typeinfo);
static void printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver * self);
static void printtup_cleanup(DestReceiver * self);
/* ----------------------------------------------------------------
* printtup / debugtup support
@ -43,7 +43,7 @@ static void printtup_cleanup(DestReceiver* self);
* ----------------
*/
int
getTypeOutAndElem(Oid type, Oid* typOutput, Oid* typElem)
getTypeOutAndElem(Oid type, Oid *typOutput, Oid *typElem)
{
HeapTuple typeTuple;
@ -54,6 +54,7 @@ getTypeOutAndElem(Oid type, Oid* typOutput, Oid* typElem)
if (HeapTupleIsValid(typeTuple))
{
Form_pg_type pt = (Form_pg_type) GETSTRUCT(typeTuple);
*typOutput = (Oid) pt->typoutput;
*typElem = (Oid) pt->typelem;
return OidIsValid(*typOutput);
@ -70,27 +71,29 @@ getTypeOutAndElem(Oid type, Oid* typOutput, Oid* typElem)
* Private state for a printtup destination object
* ----------------
*/
typedef struct { /* Per-attribute information */
typedef struct
{ /* Per-attribute information */
Oid typoutput; /* Oid for the attribute's type output fn */
Oid typelem; /* typelem value to pass to the output fn */
FmgrInfo finfo; /* Precomputed call info for typoutput */
} PrinttupAttrInfo;
} PrinttupAttrInfo;
typedef struct {
DestReceiver pub; /* publicly-known function pointers */
TupleDesc attrinfo; /* The attr info we are set up for */
int nattrs;
PrinttupAttrInfo *myinfo; /* Cached info about each attr */
} DR_printtup;
typedef struct
{
DestReceiver pub; /* publicly-known function pointers */
TupleDesc attrinfo; /* The attr info we are set up for */
int nattrs;
PrinttupAttrInfo *myinfo; /* Cached info about each attr */
} DR_printtup;
/* ----------------
* Initialize: create a DestReceiver for printtup
* ----------------
*/
DestReceiver*
DestReceiver *
printtup_create_DR()
{
DR_printtup* self = (DR_printtup*) palloc(sizeof(DR_printtup));
DR_printtup *self = (DR_printtup *) palloc(sizeof(DR_printtup));
self->pub.receiveTuple = printtup;
self->pub.setup = printtup_setup;
@ -100,42 +103,43 @@ printtup_create_DR()
self->nattrs = 0;
self->myinfo = NULL;
return (DestReceiver*) self;
return (DestReceiver *) self;
}
static void
printtup_setup(DestReceiver* self, TupleDesc typeinfo)
printtup_setup(DestReceiver * self, TupleDesc typeinfo)
{
/* ----------------
* We could set up the derived attr info at this time, but we postpone it
* until the first call of printtup, for 3 reasons:
* 1. We don't waste time (compared to the old way) if there are no
* tuples at all to output.
* tuples at all to output.
* 2. Checking in printtup allows us to handle the case that the tuples
* change type midway through (although this probably can't happen in
* the current executor).
* change type midway through (although this probably can't happen in
* the current executor).
* 3. Right now, ExecutorRun passes a NULL for typeinfo anyway :-(
* ----------------
*/
}
static void
printtup_prepare_info(DR_printtup* myState, TupleDesc typeinfo, int numAttrs)
printtup_prepare_info(DR_printtup * myState, TupleDesc typeinfo, int numAttrs)
{
int i;
int i;
if (myState->myinfo)
pfree(myState->myinfo); /* get rid of any old data */
pfree(myState->myinfo); /* get rid of any old data */
myState->myinfo = NULL;
myState->attrinfo = typeinfo;
myState->nattrs = numAttrs;
if (numAttrs <= 0)
return;
myState->myinfo = (PrinttupAttrInfo*)
myState->myinfo = (PrinttupAttrInfo *)
palloc(numAttrs * sizeof(PrinttupAttrInfo));
for (i = 0; i < numAttrs; i++)
{
PrinttupAttrInfo* thisState = myState->myinfo + i;
PrinttupAttrInfo *thisState = myState->myinfo + i;
if (getTypeOutAndElem((Oid) typeinfo->attrs[i]->atttypid,
&thisState->typoutput, &thisState->typelem))
fmgr_info(thisState->typoutput, &thisState->finfo);
@ -147,9 +151,9 @@ printtup_prepare_info(DR_printtup* myState, TupleDesc typeinfo, int numAttrs)
* ----------------
*/
static void
printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver* self)
printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver * self)
{
DR_printtup *myState = (DR_printtup*) self;
DR_printtup *myState = (DR_printtup *) self;
StringInfoData buf;
int i,
j,
@ -178,7 +182,7 @@ printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver* self)
k = 1 << 7;
for (i = 0; i < tuple->t_data->t_natts; ++i)
{
if (! heap_attisnull(tuple, i + 1))
if (!heap_attisnull(tuple, i + 1))
j |= k; /* set bit if not null */
k >>= 1;
if (k == 0) /* end of byte? */
@ -197,7 +201,8 @@ printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver* self)
*/
for (i = 0; i < tuple->t_data->t_natts; ++i)
{
PrinttupAttrInfo* thisState = myState->myinfo + i;
PrinttupAttrInfo *thisState = myState->myinfo + i;
attr = heap_getattr(tuple, i + 1, typeinfo, &isnull);
if (isnull)
continue;
@ -223,9 +228,10 @@ printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver* self)
* ----------------
*/
static void
printtup_cleanup(DestReceiver* self)
printtup_cleanup(DestReceiver * self)
{
DR_printtup* myState = (DR_printtup*) self;
DR_printtup *myState = (DR_printtup *) self;
if (myState->myinfo)
pfree(myState->myinfo);
pfree(myState);
@ -274,7 +280,7 @@ showatts(char *name, TupleDesc tupleDesc)
* ----------------
*/
void
debugtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver* self)
debugtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver * self)
{
int i;
Datum attr;
@ -310,7 +316,7 @@ debugtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver* self)
* ----------------
*/
void
printtup_internal(HeapTuple tuple, TupleDesc typeinfo, DestReceiver* self)
printtup_internal(HeapTuple tuple, TupleDesc typeinfo, DestReceiver * self)
{
StringInfoData buf;
int i,
@ -334,7 +340,7 @@ printtup_internal(HeapTuple tuple, TupleDesc typeinfo, DestReceiver* self)
k = 1 << 7;
for (i = 0; i < tuple->t_data->t_natts; ++i)
{
if (! heap_attisnull(tuple, i + 1))
if (!heap_attisnull(tuple, i + 1))
j |= k; /* set bit if not null */
k >>= 1;
if (k == 0) /* end of byte? */

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/common/scankey.c,v 1.13 1999/02/13 23:14:13 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/common/scankey.c,v 1.14 1999/05/25 16:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -18,7 +18,7 @@
#include <access/skey.h>
/*
* ScanKeyEntryIsLegal
* ScanKeyEntryIsLegal
* True iff the scan key entry is legal.
*/
#define ScanKeyEntryIsLegal(entry) \
@ -28,7 +28,7 @@
)
/*
* ScanKeyEntrySetIllegal
* ScanKeyEntrySetIllegal
* Marks a scan key entry as illegal.
*/
void
@ -43,7 +43,7 @@ ScanKeyEntrySetIllegal(ScanKey entry)
}
/*
* ScanKeyEntryInitialize
* ScanKeyEntryInitialize
* Initializes an scan key entry.
*
* Note:

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/common/tupdesc.c,v 1.48 1999/02/13 23:14:14 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/common/tupdesc.c,v 1.49 1999/05/25 16:06:42 momjian Exp $
*
* NOTES
* some of the executor utility code such as "ExecTypeFromTL" should be
@ -487,7 +487,7 @@ BuildDescForRelation(List *schema, char *relname)
{
/* array of XXX is _XXX */
snprintf(typename, NAMEDATALEN,
"_%.*s", NAMEDATALEN - 2, entry->typename->name);
"_%.*s", NAMEDATALEN - 2, entry->typename->name);
attdim = length(arry);
}
else

View File

@ -344,7 +344,7 @@ gistinsert(Relation r, Datum *datum, char *nulls, ItemPointer ht_ctid, Relation
/*
* Notes in ExecUtils:ExecOpenIndices()
*
RelationSetLockForWrite(r);
* RelationSetLockForWrite(r);
*/
res = gistdoinsert(r, itup, &giststate);
@ -1106,10 +1106,10 @@ gistdelete(Relation r, ItemPointer tid)
Page page;
/*
* Notes in ExecUtils:ExecOpenIndices()
* Also note that only vacuum deletes index tuples now...
* Notes in ExecUtils:ExecOpenIndices() Also note that only vacuum
* deletes index tuples now...
*
RelationSetLockForWrite(r);
* RelationSetLockForWrite(r);
*/
blkno = ItemPointerGetBlockNumber(tid);

View File

@ -68,7 +68,7 @@ gistbeginscan(Relation r,
/*
* Let index_beginscan does its work...
*
RelationSetLockForRead(r);
* RelationSetLockForRead(r);
*/
s = RelationGetIndexScan(r, fromEnd, nkeys, key);

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/hash/hash.c,v 1.25 1999/02/13 23:14:17 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/hash/hash.c,v 1.26 1999/05/25 16:06:54 momjian Exp $
*
* NOTES
* This file contains only the public interface routines.
@ -485,9 +485,9 @@ hashrestrpos(IndexScanDesc scan)
/* bump lock on currentMarkData and copy to currentItemData */
if (ItemPointerIsValid(&(scan->currentMarkData)))
{
so->hashso_curbuf =_hash_getbuf(scan->relation,
BufferGetBlockNumber(so->hashso_mrkbuf),
HASH_READ);
so->hashso_curbuf = _hash_getbuf(scan->relation,
BufferGetBlockNumber(so->hashso_mrkbuf),
HASH_READ);
scan->currentItemData = scan->currentMarkData;
}

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashfunc.c,v 1.16 1999/03/14 16:27:59 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashfunc.c,v 1.17 1999/05/25 16:06:56 momjian Exp $
*
* NOTES
* These functions are stored in pg_amproc. For each operator class
@ -34,9 +34,9 @@ hashint4(uint32 key)
}
uint32
hashint8(int64 *key)
hashint8(int64 * key)
{
return ~((uint32)*key);
return ~((uint32) *key);
}
/* Hash function from Chris Torek. */

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashpage.c,v 1.19 1999/02/13 23:14:20 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/hash/hashpage.c,v 1.20 1999/05/25 16:06:58 momjian Exp $
*
* NOTES
* Postgres hash pages look like ordinary relation pages. The opaque
@ -321,7 +321,7 @@ _hash_setpagelock(Relation rel,
{
switch (access)
{
case HASH_WRITE:
case HASH_WRITE:
LockPage(rel, blkno, ExclusiveLock);
break;
case HASH_READ:
@ -345,7 +345,7 @@ _hash_unsetpagelock(Relation rel,
{
switch (access)
{
case HASH_WRITE:
case HASH_WRITE:
UnlockPage(rel, blkno, ExclusiveLock);
break;
case HASH_READ:

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.42 1999/03/28 20:31:56 vadim Exp $
* $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.43 1999/05/25 16:07:04 momjian Exp $
*
*
* INTERFACE ROUTINES
@ -117,7 +117,7 @@ initscan(HeapScanDesc scan,
* relation is empty
* ----------------
*/
scan->rs_ntup.t_data = scan->rs_ctup.t_data =
scan->rs_ntup.t_data = scan->rs_ctup.t_data =
scan->rs_ptup.t_data = NULL;
scan->rs_nbuf = scan->rs_cbuf = scan->rs_pbuf = InvalidBuffer;
}
@ -216,15 +216,15 @@ heapgettup(Relation relation,
int nkeys,
ScanKey key)
{
ItemId lpp;
Page dp;
int page;
int pages;
int lines;
OffsetNumber lineoff;
int linesleft;
ItemPointer tid = (tuple->t_data == NULL) ?
(ItemPointer) NULL : &(tuple->t_self);
ItemId lpp;
Page dp;
int page;
int pages;
int lines;
OffsetNumber lineoff;
int linesleft;
ItemPointer tid = (tuple->t_data == NULL) ?
(ItemPointer) NULL : &(tuple->t_self);
/* ----------------
* increment access statistics
@ -290,8 +290,8 @@ heapgettup(Relation relation,
return;
}
*buffer = RelationGetBufferWithBuffer(relation,
ItemPointerGetBlockNumber(tid),
*buffer);
ItemPointerGetBlockNumber(tid),
*buffer);
if (!BufferIsValid(*buffer))
elog(ERROR, "heapgettup: failed ReadBuffer");
@ -439,7 +439,8 @@ heapgettup(Relation relation,
}
else
{
++lpp; /* move forward in this page's ItemId array */
++lpp; /* move forward in this page's ItemId
* array */
++lineoff;
}
}
@ -816,6 +817,7 @@ heap_getnext(HeapScanDesc scandesc, int backw)
}
else
{ /* NONTUP */
/*
* Don't release scan->rs_cbuf at this point, because
* heapgettup doesn't increase PrivateRefCount if it is
@ -897,6 +899,7 @@ heap_getnext(HeapScanDesc scandesc, int backw)
}
else
{ /* NONTUP */
/*
* Don't release scan->rs_cbuf at this point, because
* heapgettup doesn't increase PrivateRefCount if it is
@ -966,11 +969,11 @@ heap_fetch(Relation relation,
HeapTuple tuple,
Buffer *userbuf)
{
ItemId lp;
Buffer buffer;
PageHeader dp;
ItemPointer tid = &(tuple->t_self);
OffsetNumber offnum;
ItemId lp;
Buffer buffer;
PageHeader dp;
ItemPointer tid = &(tuple->t_self);
OffsetNumber offnum;
AssertMacro(PointerIsValid(userbuf)); /* see comments above */
@ -1093,9 +1096,7 @@ heap_insert(Relation relation, HeapTuple tup)
RelationPutHeapTupleAtEnd(relation, tup);
if (IsSystemRelationName(RelationGetRelationName(relation)->data))
{
RelationInvalidateHeapTuple(relation, tup);
}
return tup->t_data->t_oid;
}
@ -1106,11 +1107,11 @@ heap_insert(Relation relation, HeapTuple tup)
int
heap_delete(Relation relation, ItemPointer tid, ItemPointer ctid)
{
ItemId lp;
HeapTupleData tp;
PageHeader dp;
Buffer buffer;
int result;
ItemId lp;
HeapTupleData tp;
PageHeader dp;
Buffer buffer;
int result;
/* increment access statistics */
IncrHeapAccessStat(local_delete);
@ -1130,10 +1131,10 @@ heap_delete(Relation relation, ItemPointer tid, ItemPointer ctid)
tp.t_data = (HeapTupleHeader) PageGetItem((Page) dp, lp);
tp.t_len = ItemIdGetLength(lp);
tp.t_self = *tid;
l1:
result = HeapTupleSatisfiesUpdate(&tp);
if (result == HeapTupleInvisible)
{
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
@ -1142,7 +1143,7 @@ l1:
}
else if (result == HeapTupleBeingUpdated)
{
TransactionId xwait = tp.t_data->t_xmax;
TransactionId xwait = tp.t_data->t_xmax;
/* sleep untill concurrent transaction ends */
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
@ -1177,8 +1178,8 @@ l1:
/* store transaction information of xact deleting the tuple */
TransactionIdStore(GetCurrentTransactionId(), &(tp.t_data->t_xmax));
tp.t_data->t_cmax = GetCurrentCommandId();
tp.t_data->t_infomask &= ~(HEAP_XMAX_COMMITTED |
HEAP_XMAX_INVALID | HEAP_MARKED_FOR_UPDATE);
tp.t_data->t_infomask &= ~(HEAP_XMAX_COMMITTED |
HEAP_XMAX_INVALID | HEAP_MARKED_FOR_UPDATE);
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
@ -1194,14 +1195,14 @@ l1:
* heap_replace - replace a tuple
*/
int
heap_replace(Relation relation, ItemPointer otid, HeapTuple newtup,
ItemPointer ctid)
heap_replace(Relation relation, ItemPointer otid, HeapTuple newtup,
ItemPointer ctid)
{
ItemId lp;
HeapTupleData oldtup;
PageHeader dp;
Buffer buffer;
int result;
ItemId lp;
HeapTupleData oldtup;
PageHeader dp;
Buffer buffer;
int result;
/* increment access statistics */
IncrHeapAccessStat(local_replace);
@ -1223,7 +1224,7 @@ heap_replace(Relation relation, ItemPointer otid, HeapTuple newtup,
l2:
result = HeapTupleSatisfiesUpdate(&oldtup);
if (result == HeapTupleInvisible)
{
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
@ -1232,7 +1233,7 @@ l2:
}
else if (result == HeapTupleBeingUpdated)
{
TransactionId xwait = oldtup.t_data->t_xmax;
TransactionId xwait = oldtup.t_data->t_xmax;
/* sleep untill concurrent transaction ends */
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
@ -1275,19 +1276,20 @@ l2:
/* logically delete old item */
TransactionIdStore(GetCurrentTransactionId(), &(oldtup.t_data->t_xmax));
oldtup.t_data->t_cmax = GetCurrentCommandId();
oldtup.t_data->t_infomask &= ~(HEAP_XMAX_COMMITTED |
HEAP_XMAX_INVALID | HEAP_MARKED_FOR_UPDATE);
oldtup.t_data->t_infomask &= ~(HEAP_XMAX_COMMITTED |
HEAP_XMAX_INVALID | HEAP_MARKED_FOR_UPDATE);
/* insert new item */
if ((unsigned) DOUBLEALIGN(newtup->t_len) <= PageGetFreeSpace((Page) dp))
RelationPutHeapTuple(relation, buffer, newtup);
else
{
/*
* New item won't fit on same page as old item, have to look
* for a new place to put it. Note that we have to unlock
* current buffer context - not good but RelationPutHeapTupleAtEnd
* uses extend lock.
* New item won't fit on same page as old item, have to look for a
* new place to put it. Note that we have to unlock current buffer
* context - not good but RelationPutHeapTupleAtEnd uses extend
* lock.
*/
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
RelationPutHeapTupleAtEnd(relation, newtup);
@ -1295,8 +1297,8 @@ l2:
}
/*
* New item in place, now record address of new tuple in
* t_ctid of old one.
* New item in place, now record address of new tuple in t_ctid of old
* one.
*/
oldtup.t_data->t_ctid = newtup->t_self;
@ -1316,10 +1318,10 @@ l2:
int
heap_mark4update(Relation relation, HeapTuple tuple, Buffer *buffer)
{
ItemPointer tid = &(tuple->t_self);
ItemId lp;
PageHeader dp;
int result;
ItemPointer tid = &(tuple->t_self);
ItemId lp;
PageHeader dp;
int result;
/* increment access statistics */
IncrHeapAccessStat(local_mark4update);
@ -1336,10 +1338,10 @@ heap_mark4update(Relation relation, HeapTuple tuple, Buffer *buffer)
lp = PageGetItemId(dp, ItemPointerGetOffsetNumber(tid));
tuple->t_data = (HeapTupleHeader) PageGetItem((Page) dp, lp);
tuple->t_len = ItemIdGetLength(lp);
l3:
result = HeapTupleSatisfiesUpdate(tuple);
if (result == HeapTupleInvisible)
{
LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
@ -1348,7 +1350,7 @@ l3:
}
else if (result == HeapTupleBeingUpdated)
{
TransactionId xwait = tuple->t_data->t_xmax;
TransactionId xwait = tuple->t_data->t_xmax;
/* sleep untill concurrent transaction ends */
LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Id: hio.c,v 1.19 1999/05/07 01:22:53 vadim Exp $
* $Id: hio.c,v 1.20 1999/05/25 16:07:07 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -39,11 +39,11 @@ RelationPutHeapTuple(Relation relation,
Buffer buffer,
HeapTuple tuple)
{
Page pageHeader;
OffsetNumber offnum;
unsigned int len;
ItemId itemId;
Item item;
Page pageHeader;
OffsetNumber offnum;
unsigned int len;
ItemId itemId;
Item item;
/* ----------------
* increment access statistics
@ -62,13 +62,13 @@ RelationPutHeapTuple(Relation relation,
itemId = PageGetItemId((Page) pageHeader, offnum);
item = PageGetItem((Page) pageHeader, itemId);
ItemPointerSet(&((HeapTupleHeader) item)->t_ctid,
BufferGetBlockNumber(buffer), offnum);
ItemPointerSet(&((HeapTupleHeader) item)->t_ctid,
BufferGetBlockNumber(buffer), offnum);
/*
* Let the caller do this!
*
WriteBuffer(buffer);
* WriteBuffer(buffer);
*/
/* return an accurate tuple */
@ -111,8 +111,8 @@ RelationPutHeapTupleAtEnd(Relation relation, HeapTuple tuple)
Item item;
/*
* Lock relation for extention. We can use LockPage here as long as
* in all other places we use page-level locking for indices only.
* Lock relation for extention. We can use LockPage here as long as in
* all other places we use page-level locking for indices only.
* Alternatevely, we could define pseudo-table as we do for
* transactions with XactLockTable.
*/
@ -132,6 +132,7 @@ RelationPutHeapTupleAtEnd(Relation relation, HeapTuple tuple)
{
buffer = ReadBuffer(relation, lastblock);
pageHeader = (Page) BufferGetPage(buffer);
/*
* There was IF instead of ASSERT here ?!
*/

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/index/genam.c,v 1.16 1999/02/13 23:14:29 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/index/genam.c,v 1.17 1999/05/25 16:07:12 momjian Exp $
*
* NOTES
* many of the old access method routines have been turned into
@ -270,5 +270,5 @@ IndexScanRestorePosition(IndexScanDesc scan)
scan->flags = 0x0; /* XXX should have a symbolic name */
}
#endif
#endif

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/index/Attic/istrat.c,v 1.31 1999/02/13 23:14:30 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/index/Attic/istrat.c,v 1.32 1999/05/25 16:07:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -57,7 +57,7 @@ static bool StrategyTermIsValid(StrategyTerm term,
*/
/*
* StrategyMapGetScanKeyEntry
* StrategyMapGetScanKeyEntry
* Returns a scan key entry of a index strategy mapping member.
*
* Note:
@ -75,7 +75,7 @@ StrategyMapGetScanKeyEntry(StrategyMap map,
}
/*
* IndexStrategyGetStrategyMap
* IndexStrategyGetStrategyMap
* Returns an index strategy mapping of an index strategy.
*
* Note:
@ -97,7 +97,7 @@ IndexStrategyGetStrategyMap(IndexStrategy indexStrategy,
}
/*
* AttributeNumberGetIndexStrategySize
* AttributeNumberGetIndexStrategySize
* Computes the size of an index strategy.
*/
Size
@ -294,8 +294,8 @@ RelationGetStrategy(Relation relation,
Assert(RegProcedureIsValid(procedure));
strategyMap = IndexStrategyGetStrategyMap(RelationGetIndexStrategy(relation),
evaluation->maxStrategy,
attributeNumber);
evaluation->maxStrategy,
attributeNumber);
/* get a strategy number for the procedure ignoring flags for now */
for (index = 0; index < evaluation->maxStrategy; index += 1)
@ -526,7 +526,7 @@ OperatorRelationFillScanKeyEntry(Relation operatorRelation,
/*
* IndexSupportInitialize
* IndexSupportInitialize
* Initializes an index strategy and associated support procedures.
*/
void

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtcompare.c,v 1.22 1999/03/14 05:08:56 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtcompare.c,v 1.23 1999/05/25 16:07:21 momjian Exp $
*
* NOTES
* These functions are stored in pg_amproc. For each operator class
@ -40,7 +40,7 @@ btint4cmp(int32 a, int32 b)
}
int32
btint8cmp(int64 *a, int64 *b)
btint8cmp(int64 * a, int64 * b)
{
if (*a > *b)
return 1;

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.39 1999/05/01 16:09:45 vadim Exp $
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.40 1999/05/25 16:07:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -99,13 +99,13 @@ l1:
/* key on the page before trying to compare it */
if (!PageIsEmpty(page) && offset <= maxoff)
{
TupleDesc itupdesc;
BTItem cbti;
HeapTupleData htup;
BTPageOpaque opaque;
Buffer nbuf;
BlockNumber blkno;
bool chtup = true;
TupleDesc itupdesc;
BTItem cbti;
HeapTupleData htup;
BTPageOpaque opaque;
Buffer nbuf;
BlockNumber blkno;
bool chtup = true;
itupdesc = RelationGetDescr(rel);
nbuf = InvalidBuffer;
@ -122,15 +122,16 @@ l1:
*/
while (_bt_isequal(itupdesc, page, offset, natts, itup_scankey))
{ /* they're equal */
/*
* Have to check is inserted heap tuple deleted one
* (i.e. just moved to another place by vacuum)!
* Have to check is inserted heap tuple deleted one (i.e.
* just moved to another place by vacuum)!
*/
if (chtup)
{
htup.t_self = btitem->bti_itup.t_tid;
heap_fetch(heapRel, SnapshotDirty, &htup, &buffer);
if (htup.t_data == NULL) /* YES! */
if (htup.t_data == NULL) /* YES! */
break;
/* Live tuple was inserted */
ReleaseBuffer(buffer);
@ -139,11 +140,11 @@ l1:
cbti = (BTItem) PageGetItem(page, PageGetItemId(page, offset));
htup.t_self = cbti->bti_itup.t_tid;
heap_fetch(heapRel, SnapshotDirty, &htup, &buffer);
if (htup.t_data != NULL) /* it is a duplicate */
if (htup.t_data != NULL) /* it is a duplicate */
{
TransactionId xwait =
(TransactionIdIsValid(SnapshotDirty->xmin)) ?
SnapshotDirty->xmin : SnapshotDirty->xmax;
TransactionId xwait =
(TransactionIdIsValid(SnapshotDirty->xmin)) ?
SnapshotDirty->xmin : SnapshotDirty->xmax;
/*
* If this tuple is being updated by other transaction
@ -156,7 +157,7 @@ l1:
_bt_relbuf(rel, nbuf, BT_READ);
_bt_relbuf(rel, buf, BT_WRITE);
XactLockTableWait(xwait);
goto l1; /* continue from the begin */
goto l1;/* continue from the begin */
}
elog(ERROR, "Cannot insert a duplicate key into a unique index");
}
@ -571,10 +572,10 @@ _bt_insertonpg(Relation rel,
* reasoning).
*/
l_spl:;
l_spl: ;
if (stack == (BTStack) NULL)
{
if (!is_root) /* if this page was not root page */
if (!is_root) /* if this page was not root page */
{
elog(DEBUG, "btree: concurrent ROOT page split");
stack = (BTStack) palloc(sizeof(BTStackData));
@ -1144,8 +1145,8 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf)
lpage = BufferGetPage(lbuf);
rpage = BufferGetPage(rbuf);
((BTPageOpaque) PageGetSpecialPointer(lpage))->btpo_parent =
((BTPageOpaque) PageGetSpecialPointer(rpage))->btpo_parent =
((BTPageOpaque) PageGetSpecialPointer(lpage))->btpo_parent =
((BTPageOpaque) PageGetSpecialPointer(rpage))->btpo_parent =
rootbknum;
/*

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtpage.c,v 1.20 1999/04/22 08:19:59 vadim Exp $
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtpage.c,v 1.21 1999/05/25 16:07:26 momjian Exp $
*
* NOTES
* Postgres btree pages look like ordinary relation pages. The opaque
@ -421,7 +421,7 @@ _bt_pageinit(Page page, Size size)
MemSet(page, 0, size);
PageInit(page, size, sizeof(BTPageOpaqueData));
((BTPageOpaque) PageGetSpecialPointer(page))->btpo_parent =
((BTPageOpaque) PageGetSpecialPointer(page))->btpo_parent =
InvalidBlockNumber;
}
@ -494,17 +494,16 @@ _bt_getstackbuf(Relation rel, BTStack stack, int access)
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
maxoff = PageGetMaxOffsetNumber(page);
if (stack->bts_offset == InvalidOffsetNumber ||
if (stack->bts_offset == InvalidOffsetNumber ||
maxoff >= stack->bts_offset)
{
/*
* _bt_insertonpg set bts_offset to InvalidOffsetNumber
* in the case of concurrent ROOT page split
* _bt_insertonpg set bts_offset to InvalidOffsetNumber in the
* case of concurrent ROOT page split
*/
if (stack->bts_offset == InvalidOffsetNumber)
{
i = P_RIGHTMOST(opaque) ? P_HIKEY : P_FIRSTKEY;
}
else
{
itemid = PageGetItemId(page, stack->bts_offset);
@ -524,7 +523,7 @@ _bt_getstackbuf(Relation rel, BTStack stack, int access)
}
/* if the item has just moved right on this page, we're done */
for ( ;
for (;
i <= maxoff;
i = OffsetNumberNext(i))
{

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.37 1999/03/28 20:31:58 vadim Exp $
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.38 1999/05/25 16:07:27 momjian Exp $
*
* NOTES
* This file contains only the public interface routines.
@ -367,7 +367,7 @@ btinsert(Relation rel, Datum *datum, char *nulls, ItemPointer ht_ctid, Relation
btitem = _bt_formitem(itup);
res = _bt_doinsert(rel, btitem,
IndexIsUnique(RelationGetRelid(rel)), heapRel);
IndexIsUnique(RelationGetRelid(rel)), heapRel);
pfree(btitem);
pfree(itup);
@ -391,9 +391,10 @@ btgettuple(IndexScanDesc scan, ScanDirection dir)
if (ItemPointerIsValid(&(scan->currentItemData)))
{
/*
* Restore scan position using heap TID returned
* by previous call to btgettuple().
* Restore scan position using heap TID returned by previous call
* to btgettuple().
*/
_bt_restscan(scan);
res = _bt_next(scan, dir);
@ -623,16 +624,15 @@ _bt_restscan(IndexScanDesc scan)
BlockNumber blkno;
/*
* We use this as flag when first index tuple on page
* is deleted but we do not move left (this would
* slowdown vacuum) - so we set current->ip_posid
* before first index tuple on the current page
* We use this as flag when first index tuple on page is deleted but
* we do not move left (this would slowdown vacuum) - so we set
* current->ip_posid before first index tuple on the current page
* (_bt_step will move it right)...
*/
if (!ItemPointerIsValid(&target))
{
ItemPointerSetOffsetNumber(&(scan->currentItemData),
OffsetNumberPrev(P_RIGHTMOST(opaque) ? P_HIKEY : P_FIRSTKEY));
ItemPointerSetOffsetNumber(&(scan->currentItemData),
OffsetNumberPrev(P_RIGHTMOST(opaque) ? P_HIKEY : P_FIRSTKEY));
return;
}

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/Attic/nbtscan.c,v 1.20 1999/03/28 20:31:58 vadim Exp $
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/Attic/nbtscan.c,v 1.21 1999/05/25 16:07:29 momjian Exp $
*
*
* NOTES
@ -112,12 +112,12 @@ _bt_adjscans(Relation rel, ItemPointer tid)
static void
_bt_scandel(IndexScanDesc scan, BlockNumber blkno, OffsetNumber offno)
{
ItemPointer current;
Buffer buf;
BTScanOpaque so;
OffsetNumber start;
Page page;
BTPageOpaque opaque;
ItemPointer current;
Buffer buf;
BTScanOpaque so;
OffsetNumber start;
Page page;
BTPageOpaque opaque;
so = (BTScanOpaque) scan->opaque;
buf = so->btso_curbuf;
@ -140,7 +140,7 @@ _bt_scandel(IndexScanDesc scan, BlockNumber blkno, OffsetNumber offno)
{
Page pg = BufferGetPage(buf);
BTItem btitem = (BTItem) PageGetItem(pg,
PageGetItemId(pg, ItemPointerGetOffsetNumber(current)));
PageGetItemId(pg, ItemPointerGetOffsetNumber(current)));
so->curHeapIptr = btitem->bti_itup.t_tid;
}
@ -181,7 +181,7 @@ _bt_scandel(IndexScanDesc scan, BlockNumber blkno, OffsetNumber offno)
{
Page pg = BufferGetPage(buf);
BTItem btitem = (BTItem) PageGetItem(pg,
PageGetItemId(pg, ItemPointerGetOffsetNumber(current)));
PageGetItemId(pg, ItemPointerGetOffsetNumber(current)));
so->mrkHeapIptr = btitem->bti_itup.t_tid;
}

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.43 1999/04/13 17:18:28 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.44 1999/05/25 16:07:31 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -706,7 +706,7 @@ _bt_next(IndexScanDesc scan, ScanDirection dir)
so = (BTScanOpaque) scan->opaque;
current = &(scan->currentItemData);
Assert (BufferIsValid(so->btso_curbuf));
Assert(BufferIsValid(so->btso_curbuf));
/* we still have the buffer pinned and locked */
buf = so->btso_curbuf;
@ -733,8 +733,8 @@ _bt_next(IndexScanDesc scan, ScanDirection dir)
return res;
}
} while (keysok >= so->numberOfFirstKeys ||
(keysok == -1 && ScanDirectionIsBackward(dir)));
} while (keysok >= so->numberOfFirstKeys ||
(keysok == -1 && ScanDirectionIsBackward(dir)));
ItemPointerSetInvalid(current);
so->btso_curbuf = InvalidBuffer;
@ -776,8 +776,8 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
BTScanOpaque so;
ScanKeyData skdata;
Size keysok;
int i;
int nKeyIndex = -1;
int i;
int nKeyIndex = -1;
rel = scan->relation;
so = (BTScanOpaque) scan->opaque;
@ -795,27 +795,27 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
if (ScanDirectionIsBackward(dir))
{
for (i=0; i<so->numberOfKeys; i++)
for (i = 0; i < so->numberOfKeys; i++)
{
if (so->keyData[i].sk_attno != 1)
break;
strat = _bt_getstrat(rel, so->keyData[i].sk_attno,
so->keyData[i].sk_procedure);
strat = _bt_getstrat(rel, so->keyData[i].sk_attno,
so->keyData[i].sk_procedure);
if (strat == BTLessStrategyNumber ||
strat == BTLessEqualStrategyNumber||
strat == BTEqualStrategyNumber)
strat == BTLessEqualStrategyNumber ||
strat == BTEqualStrategyNumber)
{
nKeyIndex = i;
break;
}
}
}
else
else
{
strat = _bt_getstrat(rel, 1, so->keyData[0].sk_procedure);
if (strat == BTLessStrategyNumber ||
strat == BTLessEqualStrategyNumber)
strat == BTLessEqualStrategyNumber)
;
else
nKeyIndex = 0;
@ -850,7 +850,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
}
proc = index_getprocid(rel, 1, BTORDER_PROC);
ScanKeyEntryInitialize(&skdata, so->keyData[nKeyIndex].sk_flags,
1, proc, so->keyData[nKeyIndex].sk_argument);
1, proc, so->keyData[nKeyIndex].sk_argument);
stack = _bt_search(rel, 1, &skdata, &buf);
_bt_freestack(stack);
@ -1104,9 +1104,10 @@ _bt_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
rel = scan->relation;
current = &(scan->currentItemData);
/*
* Don't use ItemPointerGetOffsetNumber or you risk to get
* assertion due to ability of ip_posid to be equal 0.
* Don't use ItemPointerGetOffsetNumber or you risk to get assertion
* due to ability of ip_posid to be equal 0.
*/
offnum = current->ip_posid;
page = BufferGetPage(*bufP);

View File

@ -5,7 +5,7 @@
*
*
* IDENTIFICATION
* $Id: nbtsort.c,v 1.38 1999/05/09 00:53:19 tgl Exp $
* $Id: nbtsort.c,v 1.39 1999/05/25 16:07:34 momjian Exp $
*
* NOTES
*
@ -552,16 +552,16 @@ _bt_spoolinit(Relation index, int ntapes, bool isunique)
btspool->bts_tape = 0;
btspool->isunique = isunique;
btspool->bts_itape =(BTTapeBlock **) palloc(sizeof(BTTapeBlock *) * ntapes);
btspool->bts_otape =(BTTapeBlock **) palloc(sizeof(BTTapeBlock *) * ntapes);
btspool->bts_itape = (BTTapeBlock **) palloc(sizeof(BTTapeBlock *) * ntapes);
btspool->bts_otape = (BTTapeBlock **) palloc(sizeof(BTTapeBlock *) * ntapes);
if (btspool->bts_itape == (BTTapeBlock **) NULL ||
btspool->bts_otape == (BTTapeBlock **) NULL)
elog(ERROR, "_bt_spoolinit: out of memory");
for (i = 0; i < ntapes; ++i)
{
btspool->bts_itape[i] = _bt_tapecreate();
btspool->bts_otape[i] = _bt_tapecreate();
btspool->bts_itape[i] = _bt_tapecreate();
btspool->bts_otape[i] = _bt_tapecreate();
}
_bt_isortcmpinit(index, btspool);

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtree.c,v 1.31 1999/02/13 23:14:42 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtree.c,v 1.32 1999/05/25 16:07:38 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -307,7 +307,7 @@ rtinsert(Relation r, Datum *datum, char *nulls, ItemPointer ht_ctid, Relation he
/*
* Notes in ExecUtils:ExecOpenIndices()
*
RelationSetLockForWrite(r);
* RelationSetLockForWrite(r);
*/
res = rtdoinsert(r, itup, &rtState);
@ -947,10 +947,10 @@ rtdelete(Relation r, ItemPointer tid)
Page page;
/*
* Notes in ExecUtils:ExecOpenIndices()
* Also note that only vacuum deletes index tuples now...
* Notes in ExecUtils:ExecOpenIndices() Also note that only vacuum
* deletes index tuples now...
*
RelationSetLockForWrite(r);
* RelationSetLockForWrite(r);
*/
blkno = ItemPointerGetBlockNumber(tid);

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtscan.c,v 1.22 1999/02/13 23:14:43 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtscan.c,v 1.23 1999/05/25 16:07:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -69,7 +69,7 @@ rtbeginscan(Relation r,
/*
* Let index_beginscan does its work...
*
RelationSetLockForRead(r);
* RelationSetLockForRead(r);
*/
s = RelationGetIndexScan(r, fromEnd, nkeys, key);

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/transam/transam.c,v 1.25 1999/03/30 01:37:21 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/transam/transam.c,v 1.26 1999/05/25 16:07:45 momjian Exp $
*
* NOTES
* This file contains the high level access-method interface to the
@ -221,7 +221,7 @@ TransactionLogUpdate(TransactionId transactionId, /* trans id to update */
/*
* update (invalidate) our single item TransactionLogTest cache.
*
if (status != XID_COMMIT)
* if (status != XID_COMMIT)
*
* What's the hell ?! Why != XID_COMMIT ?!
*/
@ -374,7 +374,7 @@ TransRecover(Relation logRelation)
*/
/*
* InitializeTransactionLog
* InitializeTransactionLog
* Initializes transaction logging.
*/
void
@ -484,7 +484,7 @@ InitializeTransactionLog(void)
*/
/*
* TransactionIdDidCommit
* TransactionIdDidCommit
* True iff transaction associated with the identifier did commit.
*
* Note:
@ -500,7 +500,7 @@ TransactionIdDidCommit(TransactionId transactionId)
}
/*
* TransactionIdDidAborted
* TransactionIdDidAborted
* True iff transaction associated with the identifier did abort.
*
* Note:
@ -541,7 +541,7 @@ TransactionIdIsInProgress(TransactionId transactionId)
*/
/*
* TransactionIdCommit
* TransactionIdCommit
* Commits the transaction associated with the identifier.
*
* Note:
@ -557,7 +557,7 @@ TransactionIdCommit(TransactionId transactionId)
}
/*
* TransactionIdAbort
* TransactionIdAbort
* Aborts the transaction associated with the identifier.
*
* Note:

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/transam/varsup.c,v 1.19 1999/02/13 23:14:48 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/access/transam/varsup.c,v 1.20 1999/05/25 16:07:48 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -260,7 +260,7 @@ VariableRelationPutNextOid(Oid *oidP)
* In the version 2 transaction system, transaction id's are
* restricted in several ways.
*
* -- Old comments removed
* -- Old comments removed
*
* Second, since we may someday preform compression of the data
* in the log and time relations, we cause the numbering of the

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.35 1999/05/13 00:34:57 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.36 1999/05/25 16:07:50 momjian Exp $
*
* NOTES
* Transaction aborts can now occur two ways:
@ -194,8 +194,8 @@ TransactionStateData CurrentTransactionStateData = {
TransactionState CurrentTransactionState = &CurrentTransactionStateData;
int DefaultXactIsoLevel = XACT_READ_COMMITTED;
int XactIsoLevel;
int DefaultXactIsoLevel = XACT_READ_COMMITTED;
int XactIsoLevel;
/* ----------------
* info returned when the system is disabled
@ -299,6 +299,7 @@ IsTransactionState(void)
*/
return false;
}
#endif
/* --------------------------------
@ -516,7 +517,7 @@ CommandCounterIncrement()
AtStart_Cache();
TransactionIdFlushCache();
}
void
@ -695,9 +696,9 @@ AtCommit_Memory()
/* ----------------
* Release memory in the blank portal.
* Since EndPortalAllocMode implicitly works on the current context,
* first make real sure that the blank portal is the selected context.
* (This is probably not necessary, but seems like a good idea...)
* Since EndPortalAllocMode implicitly works on the current context,
* first make real sure that the blank portal is the selected context.
* (This is probably not necessary, but seems like a good idea...)
* ----------------
*/
portal = GetPortalByName(NULL);
@ -789,9 +790,9 @@ AtAbort_Memory()
/* ----------------
* Release memory in the blank portal.
* Since EndPortalAllocMode implicitly works on the current context,
* first make real sure that the blank portal is the selected context.
* (This is ESSENTIAL in case we aborted from someplace where it wasn't.)
* Since EndPortalAllocMode implicitly works on the current context,
* first make real sure that the blank portal is the selected context.
* (This is ESSENTIAL in case we aborted from someplace where it wasn't.)
* ----------------
*/
portal = GetPortalByName(NULL);
@ -1074,7 +1075,7 @@ StartTransactionCommand()
break;
/* ----------------
* As with BEGIN, we should never experience this
* As with BEGIN, we should never experience this
* if we do it means the END state was not changed in the
* previous CommitTransactionCommand(). If we get it, we
* print a warning, commit the transaction, start a new
@ -1509,6 +1510,7 @@ AbortOutOfAnyTransaction()
*/
if (s->state != TRANS_DEFAULT)
AbortTransaction();
/*
* Now reset the high-level state
*/

View File

@ -5,7 +5,7 @@
*
* Copyright (c) 1994, Regents of the University of California
*
* $Id: xid.c,v 1.21 1999/02/13 23:14:49 momjian Exp $
* $Id: xid.c,v 1.22 1999/05/25 16:07:52 momjian Exp $
*
* OLD COMMENTS
* XXX WARNING

View File

@ -7,7 +7,7 @@
* Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/bootstrap/bootstrap.c,v 1.59 1999/05/10 00:44:52 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/bootstrap/bootstrap.c,v 1.60 1999/05/25 16:07:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -182,7 +182,7 @@ static char *relname; /* current relation name */
Form_pg_attribute attrtypes[MAXATTR]; /* points to attribute info */
static char *values[MAXATTR]; /* cooresponding attribute values */
int numattr; /* number of attributes for cur. rel */
extern bool disableFsync; /* do not fsync the database */
extern bool disableFsync; /* do not fsync the database */
int DebugMode;
static GlobalMemory nogc = (GlobalMemory) NULL; /* special no-gc mem
@ -587,7 +587,9 @@ DefineAttr(char *name, char *type, int attnum)
printf("<%s %s> ", attrtypes[attnum]->attname.data, type);
attrtypes[attnum]->attnum = 1 + attnum; /* fillatt */
attlen = attrtypes[attnum]->attlen = Procid[typeoid].len;
/* Cheat like mad to fill in these items from the length only.
/*
* Cheat like mad to fill in these items from the length only.
* This only has to work for types used in the system catalogs...
*/
switch (attlen)

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/catalog.c,v 1.20 1999/02/13 23:14:55 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/catalog.c,v 1.21 1999/05/25 16:08:01 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -29,8 +29,8 @@
char *
relpath(char *relname)
{
char *path;
int bufsize = 0;
char *path;
int bufsize = 0;
if (IsSharedSystemRelationName(relname))
{
@ -43,7 +43,7 @@ relpath(char *relname)
}
/*
* IsSystemRelationName
* IsSystemRelationName
* True iff name is the name of a system catalog relation.
*
* We now make a new requirement where system catalog relns must begin
@ -64,7 +64,7 @@ IsSystemRelationName(char *relname)
}
/*
* IsSharedSystemRelationName
* IsSharedSystemRelationName
* True iff name is the name of a shared system catalog relation.
*/
bool

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/heap.c,v 1.84 1999/05/22 04:12:24 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/heap.c,v 1.85 1999/05/25 16:08:03 momjian Exp $
*
*
* INTERFACE ROUTINES
@ -70,8 +70,8 @@
#endif
static void AddNewRelationTuple(Relation pg_class_desc,
Relation new_rel_desc, Oid new_rel_oid, unsigned natts,
char relkind, char *temp_relname);
Relation new_rel_desc, Oid new_rel_oid, unsigned natts,
char relkind, char *temp_relname);
static void AddToNoNameRelList(Relation r);
static void DeleteAttributeTuples(Relation rel);
static void DeleteRelationTuple(Relation rel);
@ -185,7 +185,7 @@ heap_create(char *relname,
bool nailme = false;
int natts = tupDesc->natts;
static unsigned int uniqueId = 0;
extern GlobalMemory CacheCxt;
MemoryContext oldcxt;
@ -240,23 +240,21 @@ heap_create(char *relname,
nailme = true;
}
else
{
relid = newoid();
}
if (isnoname)
{
Assert(!relname);
relname = palloc(NAMEDATALEN);
snprintf(relname, NAMEDATALEN, "pg_noname.%d.%u",
(int) MyProcPid, uniqueId++);
(int) MyProcPid, uniqueId++);
}
if (istemp)
{
/* replace relname of caller */
snprintf(relname, NAMEDATALEN, "pg_temp.%d.%u",
(int) MyProcPid, uniqueId++);
(int) MyProcPid, uniqueId++);
}
/* ----------------
@ -272,7 +270,7 @@ heap_create(char *relname,
/*
* create a new tuple descriptor from the one passed in
*/
*/
rel->rd_att = CreateTupleDescCopyConstr(tupDesc);
/* ----------------
@ -321,7 +319,7 @@ heap_create(char *relname,
* ----------------
*/
rel->rd_nonameunlinked = TRUE; /* change once table is created */
rel->rd_nonameunlinked = TRUE; /* change once table is created */
rel->rd_fd = (File) smgrcreate(DEFAULT_SMGR, rel);
rel->rd_nonameunlinked = FALSE;
@ -479,8 +477,8 @@ RelnameFindRelid(char *relname)
if (!IsBootstrapProcessingMode())
{
tuple = SearchSysCacheTuple(RELNAME,
PointerGetDatum(relname),
0, 0, 0);
PointerGetDatum(relname),
0, 0, 0);
if (HeapTupleIsValid(tuple))
relid = tuple->t_data->t_oid;
else
@ -488,10 +486,10 @@ RelnameFindRelid(char *relname)
}
else
{
Relation pg_class_desc;
Relation pg_class_desc;
ScanKeyData key;
HeapScanDesc pg_class_scan;
pg_class_desc = heap_openr(RelationRelationName);
/* ----------------
@ -504,7 +502,7 @@ RelnameFindRelid(char *relname)
(AttrNumber) Anum_pg_class_relname,
(RegProcedure) F_NAMEEQ,
(Datum) relname);
/* ----------------
* begin the scan
* ----------------
@ -514,14 +512,14 @@ RelnameFindRelid(char *relname)
SnapshotNow,
1,
&key);
/* ----------------
* get a tuple. if the tuple is NULL then it means we
* didn't find an existing relation.
* ----------------
*/
tuple = heap_getnext(pg_class_scan, 0);
if (HeapTupleIsValid(tuple))
relid = tuple->t_data->t_oid;
else
@ -594,7 +592,7 @@ AddNewAttributeTuples(Oid new_rel_oid,
(char *) *dpp);
heap_insert(rel, tup);
if (hasindex)
CatalogIndexInsert(idescs, Num_pg_attr_indices, rel, tup);
@ -643,11 +641,11 @@ AddNewAttributeTuples(Oid new_rel_oid,
*/
static void
AddNewRelationTuple(Relation pg_class_desc,
Relation new_rel_desc,
Oid new_rel_oid,
unsigned natts,
char relkind,
char *temp_relname)
Relation new_rel_desc,
Oid new_rel_oid,
unsigned natts,
char relkind,
char *temp_relname)
{
Form_pg_class new_rel_reltup;
HeapTuple tup;
@ -678,12 +676,12 @@ AddNewRelationTuple(Relation pg_class_desc,
* the table has been proven to be small by VACUUM or CREATE INDEX.
* (NOTE: if user does CREATE TABLE, then CREATE INDEX, then loads
* the table, he still loses until he vacuums, because CREATE INDEX
* will set reltuples to zero. Can't win 'em all. Maintaining the
* will set reltuples to zero. Can't win 'em all. Maintaining the
* stats on-the-fly would solve the problem, but the overhead of that
* would likely cost more than it'd save.)
* ----------------
*/
new_rel_reltup->relpages = 10; /* bogus estimates */
new_rel_reltup->relpages = 10; /* bogus estimates */
new_rel_reltup->reltuples = 1000;
new_rel_reltup->relowner = GetUserId();
@ -716,9 +714,10 @@ AddNewRelationTuple(Relation pg_class_desc,
if (temp_relname)
create_temp_relation(temp_relname, tup);
if (!isBootstrap)
{
/*
* First, open the catalog indices and insert index tuples for the
* new relation.
@ -730,7 +729,7 @@ AddNewRelationTuple(Relation pg_class_desc,
/* now restore processing mode */
SetProcessingMode(NormalProcessing);
}
pfree(tup);
}
@ -788,8 +787,8 @@ heap_create_with_catalog(char *relname,
Relation new_rel_desc;
Oid new_rel_oid;
int natts = tupdesc->natts;
char *temp_relname = NULL;
char *temp_relname = NULL;
/* ----------------
* sanity checks
* ----------------
@ -804,33 +803,34 @@ heap_create_with_catalog(char *relname,
/* temp tables can mask non-temp tables */
if ((!istemp && RelnameFindRelid(relname)) ||
(istemp && get_temp_rel_by_name(relname) != NULL))
(istemp && get_temp_rel_by_name(relname) != NULL))
elog(ERROR, "Relation '%s' already exists", relname);
/* invalidate cache so non-temp table is masked by temp */
if (istemp)
{
Oid relid = RelnameFindRelid(relname);
Oid relid = RelnameFindRelid(relname);
if (relid != InvalidOid)
{
/*
* This is heavy-handed, but appears necessary bjm 1999/02/01
* SystemCacheRelationFlushed(relid) is not enough either.
* This is heavy-handed, but appears necessary bjm 1999/02/01
* SystemCacheRelationFlushed(relid) is not enough either.
*/
RelationForgetRelation(relid);
ResetSystemCache();
}
}
}
/* save user relation name because heap_create changes it */
if (istemp)
{
temp_relname = pstrdup(relname); /* save original value */
temp_relname = pstrdup(relname); /* save original value */
relname = palloc(NAMEDATALEN);
strcpy(relname, temp_relname); /* heap_create will change this */
strcpy(relname, temp_relname); /* heap_create will change this */
}
/* ----------------
* ok, relation does not already exist so now we
* create an uncataloged relation and pull its relation oid
@ -838,7 +838,7 @@ heap_create_with_catalog(char *relname,
*
* Note: The call to heap_create() does all the "real" work
* of creating the disk file for the relation.
* This changes relname for noname and temp tables.
* This changes relname for noname and temp tables.
* ----------------
*/
new_rel_desc = heap_create(relname, tupdesc, false, istemp);
@ -866,11 +866,11 @@ heap_create_with_catalog(char *relname,
pg_class_desc = heap_openr(RelationRelationName);
AddNewRelationTuple(pg_class_desc,
new_rel_desc,
new_rel_oid,
natts,
relkind,
temp_relname);
new_rel_desc,
new_rel_oid,
natts,
relkind,
temp_relname);
StoreConstraints(new_rel_desc);
@ -1320,7 +1320,7 @@ heap_destroy_with_catalog(char *relname)
if (istemp)
remove_temp_relation(rid);
/* ----------------
* delete type tuple. here we want to see the effects
* of the deletions we just did, so we use setheapoverride().
@ -1334,7 +1334,7 @@ heap_destroy_with_catalog(char *relname)
* delete relation tuple
* ----------------
*/
/* must delete fake tuple in cache */
/* must delete fake tuple in cache */
DeleteRelationTuple(rel);
/*
@ -1516,10 +1516,12 @@ StoreAttrDefault(Relation rel, AttrDefault *attrdef)
extern GlobalMemory CacheCxt;
start:
/* Surround table name with double quotes to allow mixed-case and
/*
* Surround table name with double quotes to allow mixed-case and
* whitespaces in names. - BGA 1998-11-14
*/
snprintf(str, MAX_PARSE_BUFFER,
snprintf(str, MAX_PARSE_BUFFER,
"select %s%s from \"%.*s\"", attrdef->adsrc, cast,
NAMEDATALEN, rel->rd_rel->relname.data);
setheapoverride(true);
@ -1539,16 +1541,16 @@ start:
if (type != atp->atttypid)
{
if (IS_BINARY_COMPATIBLE(type, atp->atttypid))
; /* use without change */
; /* use without change */
else if (can_coerce_type(1, &(type), &(atp->atttypid)))
expr = coerce_type(NULL, (Node *)expr, type, atp->atttypid,
atp->atttypmod);
expr = coerce_type(NULL, (Node *) expr, type, atp->atttypid,
atp->atttypmod);
else if (IsA(expr, Const))
{
if (*cast != 0)
elog(ERROR, "DEFAULT clause const type '%s' mismatched with column type '%s'",
typeidTypeName(type), typeidTypeName(atp->atttypid));
snprintf(cast, 2*NAMEDATALEN, ":: %s", typeidTypeName(atp->atttypid));
snprintf(cast, 2 * NAMEDATALEN, ":: %s", typeidTypeName(atp->atttypid));
goto start;
}
else
@ -1598,12 +1600,13 @@ StoreRelCheck(Relation rel, ConstrCheck *check)
char nulls[4] = {' ', ' ', ' ', ' '};
extern GlobalMemory CacheCxt;
/* Check for table's existance. Surround table name with double-quotes
/*
* Check for table's existance. Surround table name with double-quotes
* to allow mixed-case and whitespace names. - thomas 1998-11-12
*/
snprintf(str, MAX_PARSE_BUFFER,
"select 1 from \"%.*s\" where %s",
NAMEDATALEN, rel->rd_rel->relname.data, check->ccsrc);
snprintf(str, MAX_PARSE_BUFFER,
"select 1 from \"%.*s\" where %s",
NAMEDATALEN, rel->rd_rel->relname.data, check->ccsrc);
setheapoverride(true);
planTree_list = pg_parse_and_plan(str, NULL, 0,
&queryTree_list, None, FALSE);

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/index.c,v 1.74 1999/05/17 00:27:45 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/index.c,v 1.75 1999/05/25 16:08:06 momjian Exp $
*
*
* INTERFACE ROUTINES
@ -60,8 +60,8 @@
#define NTUPLES_PER_PAGE(natts) (BLCKSZ/((natts)*AVG_TUPLE_SIZE))
/* non-export function prototypes */
static Oid GetHeapRelationOid(char *heapRelationName, char *indexRelationName,
bool istemp);
static Oid GetHeapRelationOid(char *heapRelationName, char *indexRelationName,
bool istemp);
static TupleDesc BuildFuncTupleDesc(FuncIndexInfo *funcInfo);
static TupleDesc ConstructTupleDescriptor(Oid heapoid, Relation heapRelation,
List *attributeList,
@ -77,7 +77,7 @@ static void
static void UpdateIndexRelation(Oid indexoid, Oid heapoid,
FuncIndexInfo *funcInfo, int natts,
AttrNumber *attNums, Oid *classOids, Node *predicate,
List *attributeList, bool islossy, bool unique, bool primary);
List *attributeList, bool islossy, bool unique, bool primary);
static void DefaultBuild(Relation heapRelation, Relation indexRelation,
int numberOfAttributes, AttrNumber *attributeNumber,
IndexStrategy indexStrategy, uint16 parameterCount,
@ -126,11 +126,11 @@ GetHeapRelationOid(char *heapRelationName, char *indexRelationName, bool istemp)
Oid indoid;
Oid heapoid;
indoid = RelnameFindRelid(indexRelationName);
if ((!istemp && OidIsValid(indoid)) ||
(istemp && get_temp_rel_by_name(indexRelationName) != NULL))
(istemp && get_temp_rel_by_name(indexRelationName) != NULL))
elog(ERROR, "Cannot create index: '%s' already exists",
indexRelationName);
@ -139,7 +139,7 @@ GetHeapRelationOid(char *heapRelationName, char *indexRelationName, bool istemp)
if (!OidIsValid(heapoid))
elog(ERROR, "Cannot create index on '%s': relation does not exist",
heapRelationName);
return heapoid;
}
@ -356,7 +356,7 @@ ConstructTupleDescriptor(Oid heapoid,
}
/* ----------------------------------------------------------------
* AccessMethodObjectIdGetForm
* AccessMethodObjectIdGetForm
* Returns the formated access method tuple given its object identifier.
*
* XXX ADD INDEXING
@ -482,7 +482,7 @@ UpdateRelationRelation(Relation indexRelation, char *temp_relname)
if (temp_relname)
create_temp_relation(temp_relname, tuple);
/*
* During normal processing, we need to make sure that the system
* catalog indices are correct. Bootstrap (initdb) time doesn't
@ -571,7 +571,7 @@ AppendAttributeTuples(Relation indexRelation, int numatts)
value[Anum_pg_attribute_attcacheoff - 1] = Int32GetDatum(-1);
init_tuple = heap_addheader(Natts_pg_attribute,
ATTRIBUTE_TUPLE_SIZE,
ATTRIBUTE_TUPLE_SIZE,
(char *) (indexRelation->rd_att->attrs[0]));
hasind = false;
@ -611,7 +611,7 @@ AppendAttributeTuples(Relation indexRelation, int numatts)
*/
memmove(GETSTRUCT(cur_tuple),
(char *) indexTupDesc->attrs[i],
ATTRIBUTE_TUPLE_SIZE);
ATTRIBUTE_TUPLE_SIZE);
value[Anum_pg_attribute_attnum - 1] = Int16GetDatum(i + 1);
@ -657,7 +657,7 @@ UpdateIndexRelation(Oid indexoid,
List *attributeList,
bool islossy,
bool unique,
bool primary)
bool primary)
{
Form_pg_index indexForm;
IndexElem *IndexKey;
@ -686,7 +686,7 @@ UpdateIndexRelation(Oid indexoid,
predLen = VARSIZE(predText);
itupLen = predLen + sizeof(FormData_pg_index);
indexForm = (Form_pg_index) palloc(itupLen);
memset (indexForm, 0, sizeof(FormData_pg_index));
memset(indexForm, 0, sizeof(FormData_pg_index));
memmove((char *) &indexForm->indpred, (char *) predText, predLen);
@ -939,7 +939,7 @@ index_create(char *heapRelationName,
Node *predicate,
bool islossy,
bool unique,
bool primary)
bool primary)
{
Relation heapRelation;
Relation indexRelation;
@ -948,15 +948,15 @@ index_create(char *heapRelationName,
Oid indexoid;
PredInfo *predInfo;
bool istemp = (get_temp_rel_by_name(heapRelationName) != NULL);
char *temp_relname = NULL;
char *temp_relname = NULL;
/* ----------------
* check parameters
* ----------------
*/
if (numatts < 1)
elog(ERROR, "must index at least one attribute");
/* ----------------
* get heap relation oid and open the heap relation
* XXX ADD INDEXING
@ -987,25 +987,27 @@ index_create(char *heapRelationName,
/* invalidate cache so possible non-temp index is masked by temp */
if (istemp)
{
Oid relid = RelnameFindRelid(indexRelationName);
Oid relid = RelnameFindRelid(indexRelationName);
if (relid != InvalidOid)
{
/*
* This is heavy-handed, but appears necessary bjm 1999/02/01
* SystemCacheRelationFlushed(relid) is not enough either.
* This is heavy-handed, but appears necessary bjm 1999/02/01
* SystemCacheRelationFlushed(relid) is not enough either.
*/
RelationForgetRelation(relid);
ResetSystemCache();
}
}
/* save user relation name because heap_create changes it */
if (istemp)
{
temp_relname = pstrdup(indexRelationName); /* save original value */
temp_relname = pstrdup(indexRelationName); /* save original value */
indexRelationName = palloc(NAMEDATALEN);
strcpy(indexRelationName, temp_relname); /* heap_create will change this */
strcpy(indexRelationName, temp_relname); /* heap_create will
* change this */
}
/* ----------------
@ -1122,8 +1124,8 @@ index_destroy(Oid indexId)
Relation relationRelation;
Relation attributeRelation;
HeapTuple tuple;
int16 attnum;
int16 attnum;
Assert(OidIsValid(indexId));
/* Open now to obtain lock by referencing table? bjm */
@ -1166,7 +1168,7 @@ index_destroy(Oid indexId)
/* does something only if it is a temp index */
remove_temp_relation(indexId);
/* ----------------
* fix INDEX relation
* ----------------

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/indexing.c,v 1.37 1999/05/10 00:44:55 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/indexing.c,v 1.38 1999/05/25 16:08:07 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -46,15 +46,15 @@
*/
char *Name_pg_attr_indices[Num_pg_attr_indices] = {AttributeNameIndex,
AttributeNumIndex,
AttributeRelidIndex};
AttributeNumIndex,
AttributeRelidIndex};
char *Name_pg_proc_indices[Num_pg_proc_indices] = {ProcedureNameIndex,
ProcedureOidIndex,
ProcedureSrcIndex};
ProcedureOidIndex,
ProcedureSrcIndex};
char *Name_pg_type_indices[Num_pg_type_indices] = {TypeNameIndex,
TypeOidIndex};
TypeOidIndex};
char *Name_pg_class_indices[Num_pg_class_indices] = {ClassNameIndex,
ClassOidIndex};
ClassOidIndex};
char *Name_pg_attrdef_indices[Num_pg_attrdef_indices] = {AttrDefaultIndex};
char *Name_pg_relcheck_indices[Num_pg_relcheck_indices] = {RelCheckIndex};
@ -63,9 +63,9 @@ char *Name_pg_trigger_indices[Num_pg_trigger_indices] = {TriggerRelidIndex};
static HeapTuple CatalogIndexFetchTuple(Relation heapRelation,
Relation idesc,
ScanKey skey,
int16 num_keys);
Relation idesc,
ScanKey skey,
int16 num_keys);
/*
@ -126,13 +126,13 @@ CatalogIndexInsert(Relation *idescs,
index_tup = SearchSysCacheTupleCopy(INDEXRELID,
ObjectIdGetDatum(idescs[i]->rd_id),
0, 0, 0);
0, 0, 0);
Assert(index_tup);
index_form = (Form_pg_index) GETSTRUCT(index_tup);
if (index_form->indproc != InvalidOid)
{
int fatts;
int fatts;
/*
* Compute the number of attributes we are indexing upon.
@ -152,7 +152,7 @@ CatalogIndexInsert(Relation *idescs,
natts = RelationGetDescr(idescs[i])->natts;
finfoP = (FuncIndexInfo *) NULL;
}
FormIndexDatum(natts,
(AttrNumber *) index_form->indkey,
heapTuple,
@ -229,11 +229,11 @@ CatalogIndexFetchTuple(Relation heapRelation,
ScanKey skey,
int16 num_keys)
{
IndexScanDesc sd;
IndexScanDesc sd;
RetrieveIndexResult indexRes;
HeapTupleData tuple;
HeapTuple result = NULL;
Buffer buffer;
HeapTupleData tuple;
HeapTuple result = NULL;
Buffer buffer;
sd = index_beginscan(idesc, false, num_keys, skey);
tuple.t_data = NULL;
@ -462,7 +462,7 @@ ClassNameIndexScan(Relation heapRelation, char *relName)
*/
if ((tuple = get_temp_rel_by_name(relName)) != NULL)
return heap_copytuple(tuple);
ScanKeyEntryInitialize(&skey[0],
(bits16) 0x0,
(AttrNumber) 1,

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_operator.c,v 1.36 1999/05/10 00:44:56 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_operator.c,v 1.37 1999/05/25 16:08:09 momjian Exp $
*
* NOTES
* these routines moved here from commands/define.c and somewhat cleaned up.
@ -36,15 +36,15 @@
#endif
static Oid OperatorGetWithOpenRelation(Relation pg_operator_desc,
const char *operatorName,
Oid leftObjectId,
Oid rightObjectId,
bool *defined);
const char *operatorName,
Oid leftObjectId,
Oid rightObjectId,
bool *defined);
static Oid OperatorGet(char *operatorName,
char *leftTypeName,
char *rightTypeName,
bool *defined);
char *leftTypeName,
char *rightTypeName,
bool *defined);
static Oid OperatorShellMakeWithOpenRelation(Relation pg_operator_desc,
char *operatorName,
@ -135,6 +135,7 @@ OperatorGetWithOpenRelation(Relation pg_operator_desc,
if (HeapTupleIsValid(tup))
{
regproc oprcode = ((Form_pg_operator) GETSTRUCT(tup))->oprcode;
operatorObjectId = tup->t_data->t_oid;
*defined = RegProcedureIsValid(oprcode);
}
@ -259,7 +260,7 @@ OperatorShellMakeWithOpenRelation(Relation pg_operator_desc,
/* ----------------
* initialize *values with the operator name and input data types.
* Note that oprcode is set to InvalidOid, indicating it's a shell.
* Note that oprcode is set to InvalidOid, indicating it's a shell.
* ----------------
*/
i = 0;
@ -356,9 +357,9 @@ OperatorShellMake(char *operatorName,
* ----------------
*/
operatorObjectId = OperatorShellMakeWithOpenRelation(pg_operator_desc,
operatorName,
leftObjectId,
rightObjectId);
operatorName,
leftObjectId,
rightObjectId);
/* ----------------
* close the operator relation and return the oid.
* ----------------
@ -506,8 +507,9 @@ OperatorDef(char *operatorName,
elog(ERROR, "OperatorDef: operator \"%s\" already defined",
operatorName);
/* At this point, if operatorObjectId is not InvalidOid then
* we are filling in a previously-created shell.
/*
* At this point, if operatorObjectId is not InvalidOid then we are
* filling in a previously-created shell.
*/
/* ----------------
@ -580,7 +582,7 @@ OperatorDef(char *operatorName,
values[Anum_pg_operator_oprcode - 1] = ObjectIdGetDatum(tup->t_data->t_oid);
values[Anum_pg_operator_oprresult - 1] = ObjectIdGetDatum(((Form_pg_proc)
GETSTRUCT(tup))->prorettype);
GETSTRUCT(tup))->prorettype);
/* ----------------
* find restriction
@ -648,7 +650,8 @@ OperatorDef(char *operatorName,
values[i++] = ObjectIdGetDatum(leftTypeId);
values[i++] = ObjectIdGetDatum(rightTypeId);
++i; /* Skip "oprresult", it was filled in above */
++i; /* Skip "oprresult", it was filled in
* above */
/*
* Set up the other operators. If they do not currently exist, create
@ -663,16 +666,16 @@ OperatorDef(char *operatorName,
{
if (name[j])
{
char *otherLeftTypeName = NULL;
char *otherRightTypeName = NULL;
Oid otherLeftTypeId = InvalidOid;
Oid otherRightTypeId = InvalidOid;
Oid other_oid = InvalidOid;
bool otherDefined = false;
char *otherLeftTypeName = NULL;
char *otherRightTypeName = NULL;
Oid otherLeftTypeId = InvalidOid;
Oid otherRightTypeId = InvalidOid;
Oid other_oid = InvalidOid;
bool otherDefined = false;
switch (j)
{
case 0: /* commutator has reversed arg types */
case 0: /* commutator has reversed arg types */
otherLeftTypeName = rightTypeName;
otherRightTypeName = leftTypeName;
otherLeftTypeId = rightTypeId;
@ -683,7 +686,7 @@ OperatorDef(char *operatorName,
&otherDefined);
commutatorId = other_oid;
break;
case 1: /* negator has same arg types */
case 1: /* negator has same arg types */
otherLeftTypeName = leftTypeName;
otherRightTypeName = rightTypeName;
otherLeftTypeId = leftTypeId;
@ -694,7 +697,7 @@ OperatorDef(char *operatorName,
&otherDefined);
negatorId = other_oid;
break;
case 2: /* left sort op takes left-side data type */
case 2: /* left sort op takes left-side data type */
otherLeftTypeName = leftTypeName;
otherRightTypeName = leftTypeName;
otherLeftTypeId = leftTypeId;
@ -704,7 +707,8 @@ OperatorDef(char *operatorName,
otherRightTypeName,
&otherDefined);
break;
case 3: /* right sort op takes right-side data type */
case 3: /* right sort op takes right-side data
* type */
otherLeftTypeName = rightTypeName;
otherRightTypeName = rightTypeName;
otherLeftTypeId = rightTypeId;
@ -737,8 +741,10 @@ OperatorDef(char *operatorName,
}
else
{
/* self-linkage to this operator; will fix below.
* Note that only self-linkage for commutation makes sense.
/*
* self-linkage to this operator; will fix below. Note
* that only self-linkage for commutation makes sense.
*/
if (j != 0)
elog(ERROR,
@ -804,15 +810,14 @@ OperatorDef(char *operatorName,
/*
* If a commutator and/or negator link is provided, update the other
* operator(s) to point at this one, if they don't already have a link.
* This supports an alternate style of operator definition wherein the
* user first defines one operator without giving negator or
* commutator, then defines the other operator of the pair with the
* proper commutator or negator attribute. That style doesn't require
* creation of a shell, and it's the only style that worked right before
* Postgres version 6.5.
* This code also takes care of the situation where the new operator
* is its own commutator.
* operator(s) to point at this one, if they don't already have a
* link. This supports an alternate style of operator definition
* wherein the user first defines one operator without giving negator
* or commutator, then defines the other operator of the pair with the
* proper commutator or negator attribute. That style doesn't require
* creation of a shell, and it's the only style that worked right
* before Postgres version 6.5. This code also takes care of the
* situation where the new operator is its own commutator.
*/
if (selfCommutator)
commutatorId = operatorObjectId;
@ -869,7 +874,8 @@ OperatorUpd(Oid baseId, Oid commId, Oid negId)
tup = heap_getnext(pg_operator_scan, 0);
/* if the commutator and negator are the same operator, do one update.
/*
* if the commutator and negator are the same operator, do one update.
* XXX this is probably useless code --- I doubt it ever makes sense
* for commutator and negator to be the same thing...
*/
@ -1008,7 +1014,7 @@ OperatorCreate(char *operatorName,
if (!leftTypeName && !rightTypeName)
elog(ERROR, "OperatorCreate: at least one of leftarg or rightarg must be defined");
if (! (leftTypeName && rightTypeName))
if (!(leftTypeName && rightTypeName))
{
/* If it's not a binary op, these things mustn't be set: */
if (commutatorName)

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_proc.c,v 1.28 1999/05/13 07:28:27 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_proc.c,v 1.29 1999/05/25 16:08:11 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -226,11 +226,11 @@ ProcedureCreate(char *procedureName,
* function name (the 'prosrc' value) is a known builtin function.
*
* NOTE: in Postgres versions before 6.5, the SQL name of the created
* function could not be different from the internal name, and 'prosrc'
* wasn't used. So there is code out there that does CREATE FUNCTION
* xyz AS '' LANGUAGE 'internal'. To preserve some modicum of
* backwards compatibility, accept an empty 'prosrc' value as meaning
* the supplied SQL function name.
* function could not be different from the internal name, and
* 'prosrc' wasn't used. So there is code out there that does CREATE
* FUNCTION xyz AS '' LANGUAGE 'internal'. To preserve some modicum
* of backwards compatibility, accept an empty 'prosrc' value as
* meaning the supplied SQL function name.
*/
if (strcmp(languageName, "internal") == 0)
@ -239,7 +239,7 @@ ProcedureCreate(char *procedureName,
prosrc = procedureName;
if (fmgr_lookupByName(prosrc) == (func_ptr) NULL)
elog(ERROR,
"ProcedureCreate: there is no builtin function named \"%s\"",
"ProcedureCreate: there is no builtin function named \"%s\"",
prosrc);
}

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_type.c,v 1.36 1999/04/20 03:51:14 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/catalog/pg_type.c,v 1.37 1999/05/25 16:08:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -400,8 +400,8 @@ TypeCreate(char *typeName,
procname = procs[j];
/*
* First look for a 1-argument func with all argtypes 0.
* This is valid for all four kinds of procedure.
* First look for a 1-argument func with all argtypes 0. This is
* valid for all four kinds of procedure.
*/
MemSet(argList, 0, 8 * sizeof(Oid));
@ -413,20 +413,23 @@ TypeCreate(char *typeName,
if (!HeapTupleIsValid(tup))
{
/*
* For array types, the input procedures may take 3 args
* (data value, element OID, atttypmod); the pg_proc
* argtype signature is 0,0,INT4OID. The output procedures
* may take 2 args (data value, element OID).
* For array types, the input procedures may take 3 args (data
* value, element OID, atttypmod); the pg_proc argtype
* signature is 0,0,INT4OID. The output procedures may take 2
* args (data value, element OID).
*/
if (OidIsValid(elementObjectId))
{
int nargs;
int nargs;
if (j % 2)
{
/* output proc */
nargs = 2;
} else
}
else
{
/* input proc */
nargs = 3;

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/_deadcode/Attic/recipe.c,v 1.2 1999/03/16 04:25:46 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/_deadcode/Attic/recipe.c,v 1.3 1999/05/25 16:08:30 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -121,18 +121,18 @@ static QueryTreeList *tg_parseTeeNode(TgRecipe * r,
void
beginRecipe(RecipeStmt *stmt)
{
TgRecipe *r;
int i,
numTees;
TgRecipe *r;
int i,
numTees;
QueryTreeList *qList;
char portalName[1024];
char portalName[1024];
Plan *plan;
TupleDesc attinfo;
QueryDesc *queryDesc;
Query *parsetree;
Plan *plan;
TupleDesc attinfo;
QueryDesc *queryDesc;
Query *parsetree;
TeeInfo *teeInfo;
TeeInfo *teeInfo;
/*
* retrieveRecipe() reads the recipe from the database and returns a
@ -808,21 +808,21 @@ tg_parseTeeNode(TgRecipe * r,
static QueryTreeList *
tg_parseSubQuery(TgRecipe * r, TgNode * n, TeeInfo * teeInfo)
{
TgElement *elem;
char *funcName;
Oid typev[8], /* eight arguments maximum */
relid;
int i,
parameterCount;
TgElement *elem;
char *funcName;
Oid typev[8], /* eight arguments maximum */
relid;
int i,
parameterCount;
QueryTreeList *qList; /* the parse tree of the nodeElement */
QueryTreeList *inputQlist; /* the list of parse trees for the inputs
* to this node */
QueryTreeList *q;
TgNode *child;
Relation rel;
unsigned int len;
TupleDesc tupdesc;
TgNode *child;
Relation rel;
unsigned int len;
TupleDesc tupdesc;
qList = NULL;

View File

@ -5,17 +5,17 @@
*
* Copyright (c) 1994, Regents of the University of California
*
* The version stuff has not been tested under postgres95 and probably
* The version stuff has not been tested under postgres95 and probably
* doesn't work! - jolly 8/19/95
*
*
* $Id: version.c,v 1.18 1999/02/13 23:15:12 momjian Exp $
* $Id: version.c,v 1.19 1999/05/25 16:08:32 momjian Exp $
*
* NOTES
* At the point the version is defined, 2 physical relations are created
* <vname>_added and <vname>_deleted.
*
* In addition, 4 rules are defined which govern the semantics of
* In addition, 4 rules are defined which govern the semantics of
* versions w.r.t retrieves, appends, replaces and deletes.
*
*-------------------------------------------------------------------------

View File

@ -6,7 +6,7 @@
* Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/async.c,v 1.46 1999/04/25 19:27:43 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/async.c,v 1.47 1999/05/25 16:08:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -31,17 +31,17 @@
* relname to a list of outstanding NOTIFY requests. Actual processing
* happens if and only if we reach transaction commit. At that time (in
* routine AtCommit_Notify) we scan pg_listener for matching relnames.
* If the listenerPID in a matching tuple is ours, we just send a notify
* If the listenerPID in a matching tuple is ours, we just send a notify
* message to our own front end. If it is not ours, and "notification"
* is not already nonzero, we set notification to our own PID and send a
* SIGUSR2 signal to the receiving process (indicated by listenerPID).
* BTW: if the signal operation fails, we presume that the listener backend
* crashed without removing this tuple, and remove the tuple for it.
* crashed without removing this tuple, and remove the tuple for it.
*
* 4. Upon receipt of a SIGUSR2 signal, the signal handler can call inbound-
* notify processing immediately if this backend is idle (ie, it is
* waiting for a frontend command and is not within a transaction block).
* Otherwise the handler may only set a flag, which will cause the
* Otherwise the handler may only set a flag, which will cause the
* processing to occur just before we next go idle.
*
* 5. Inbound-notify processing consists of scanning pg_listener for tuples
@ -53,7 +53,7 @@
*
* Note that the system's use of pg_listener is confined to very short
* intervals at the end of a transaction that contains NOTIFY statements,
* or during the transaction caused by an inbound SIGUSR2. So the fact that
* or during the transaction caused by an inbound SIGUSR2. So the fact that
* pg_listener is a global resource shouldn't cause too much performance
* problem. But application authors ought to be discouraged from doing
* LISTEN or UNLISTEN near the start of a long transaction --- that would
@ -109,8 +109,8 @@ extern CommandDest whereToSendOutput;
/*
* State for outbound notifies consists of a list of all relnames NOTIFYed
* in the current transaction. We do not actually perform a NOTIFY until
* and unless the transaction commits. pendingNotifies is NULL if no
* in the current transaction. We do not actually perform a NOTIFY until
* and unless the transaction commits. pendingNotifies is NULL if no
* NOTIFYs have been done in the current transaction.
*/
static Dllist *pendingNotifies = NULL;
@ -125,8 +125,8 @@ static Dllist *pendingNotifies = NULL;
* does not grok "volatile", you'd be best advised to compile this file
* with all optimization turned off.
*/
static volatile int notifyInterruptEnabled = 0;
static volatile int notifyInterruptOccurred = 0;
static volatile int notifyInterruptEnabled = 0;
static volatile int notifyInterruptOccurred = 0;
/* True if we've registered an on_shmem_exit cleanup (or at least tried to). */
static int unlistenExitRegistered = 0;
@ -142,7 +142,7 @@ static void ClearPendingNotifies(void);
/*
*--------------------------------------------------------------
* Async_Notify
* Async_Notify
*
* This is executed by the SQL notify command.
*
@ -164,28 +164,29 @@ Async_Notify(char *relname)
/*
* We allocate list memory from the global malloc pool to ensure that
* it will live until we want to use it. This is probably not necessary
* any longer, since we will use it before the end of the transaction.
* DLList only knows how to use malloc() anyway, but we could probably
* palloc() the strings...
* it will live until we want to use it. This is probably not
* necessary any longer, since we will use it before the end of the
* transaction. DLList only knows how to use malloc() anyway, but we
* could probably palloc() the strings...
*/
if (!pendingNotifies)
pendingNotifies = DLNewList();
notifyName = strdup(relname);
DLAddHead(pendingNotifies, DLNewElem(notifyName));
/*
* NOTE: we could check to see if pendingNotifies already has an entry
* for relname, and thus avoid making duplicate entries. However, most
* apps probably don't notify the same name multiple times per transaction,
* so we'd likely just be wasting cycles to make such a check.
* AsyncExistsPendingNotify() doesn't really care whether the list
* contains duplicates...
* for relname, and thus avoid making duplicate entries. However,
* most apps probably don't notify the same name multiple times per
* transaction, so we'd likely just be wasting cycles to make such a
* check. AsyncExistsPendingNotify() doesn't really care whether the
* list contains duplicates...
*/
}
/*
*--------------------------------------------------------------
* Async_Listen
* Async_Listen
*
* This is executed by the SQL listen command.
*
@ -274,7 +275,7 @@ Async_Listen(char *relname, int pid)
/*
* now that we are listening, make sure we will unlisten before dying.
*/
if (! unlistenExitRegistered)
if (!unlistenExitRegistered)
{
if (on_shmem_exit(Async_UnlistenOnExit, (caddr_t) NULL) < 0)
elog(NOTICE, "Async_Listen: out of shmem_exit slots");
@ -284,7 +285,7 @@ Async_Listen(char *relname, int pid)
/*
*--------------------------------------------------------------
* Async_Unlisten
* Async_Unlisten
*
* This is executed by the SQL unlisten command.
*
@ -326,14 +327,16 @@ Async_Unlisten(char *relname, int pid)
UnlockRelation(lRel, AccessExclusiveLock);
heap_close(lRel);
}
/* We do not complain about unlistening something not being listened;
/*
* We do not complain about unlistening something not being listened;
* should we?
*/
}
/*
*--------------------------------------------------------------
* Async_UnlistenAll
* Async_UnlistenAll
*
* Unlisten all relations for this backend.
*
@ -379,7 +382,7 @@ Async_UnlistenAll()
/*
*--------------------------------------------------------------
* Async_UnlistenOnExit
* Async_UnlistenOnExit
*
* Clean up the pg_listener table at backend exit.
*
@ -398,11 +401,12 @@ Async_UnlistenAll()
static void
Async_UnlistenOnExit()
{
/*
* We need to start/commit a transaction for the unlisten,
* but if there is already an active transaction we had better
* abort that one first. Otherwise we'd end up committing changes
* that probably ought to be discarded.
* We need to start/commit a transaction for the unlisten, but if
* there is already an active transaction we had better abort that one
* first. Otherwise we'd end up committing changes that probably
* ought to be discarded.
*/
AbortOutOfAnyTransaction();
/* Now we can do the unlisten */
@ -413,7 +417,7 @@ Async_UnlistenOnExit()
/*
*--------------------------------------------------------------
* AtCommit_Notify
* AtCommit_Notify
*
* This is called at transaction commit.
*
@ -450,12 +454,14 @@ AtCommit_Notify()
int32 listenerPID;
if (!pendingNotifies)
return; /* no NOTIFY statements in this transaction */
return; /* no NOTIFY statements in this
* transaction */
/* NOTIFY is disabled if not normal processing mode.
* This test used to be in xact.c, but it seems cleaner to do it here.
/*
* NOTIFY is disabled if not normal processing mode. This test used to
* be in xact.c, but it seems cleaner to do it here.
*/
if (! IsNormalProcessingMode())
if (!IsNormalProcessingMode())
{
ClearPendingNotifies();
return;
@ -487,10 +493,13 @@ AtCommit_Notify()
if (listenerPID == MyProcPid)
{
/* Self-notify: no need to bother with table update.
/*
* Self-notify: no need to bother with table update.
* Indeed, we *must not* clear the notification field in
* this path, or we could lose an outside notify, which'd be
* bad for applications that ignore self-notify messages.
* this path, or we could lose an outside notify, which'd
* be bad for applications that ignore self-notify
* messages.
*/
TPRINTF(TRACE_NOTIFY, "AtCommit_Notify: notifying self");
NotifyMyFrontEnd(relname, listenerPID);
@ -499,23 +508,27 @@ AtCommit_Notify()
{
TPRINTF(TRACE_NOTIFY, "AtCommit_Notify: notifying pid %d",
listenerPID);
/*
* If someone has already notified this listener,
* we don't bother modifying the table, but we do still send
* a SIGUSR2 signal, just in case that backend missed the
* earlier signal for some reason. It's OK to send the signal
* first, because the other guy can't read pg_listener until
* we unlock it.
* If someone has already notified this listener, we don't
* bother modifying the table, but we do still send a
* SIGUSR2 signal, just in case that backend missed the
* earlier signal for some reason. It's OK to send the
* signal first, because the other guy can't read
* pg_listener until we unlock it.
*/
#ifdef HAVE_KILL
if (kill(listenerPID, SIGUSR2) < 0)
{
/* Get rid of pg_listener entry if it refers to a PID
/*
* Get rid of pg_listener entry if it refers to a PID
* that no longer exists. Presumably, that backend
* crashed without deleting its pg_listener entries.
* This code used to only delete the entry if errno==ESRCH,
* but as far as I can see we should just do it for any
* failure (certainly at least for EPERM too...)
* This code used to only delete the entry if
* errno==ESRCH, but as far as I can see we should
* just do it for any failure (certainly at least for
* EPERM too...)
*/
heap_delete(lRel, &lTuple->t_self, NULL);
}
@ -536,6 +549,7 @@ AtCommit_Notify()
}
heap_endscan(sRel);
/*
* We do not do RelationUnsetLockForWrite(lRel) here, because the
* transaction is about to be committed anyway.
@ -549,7 +563,7 @@ AtCommit_Notify()
/*
*--------------------------------------------------------------
* AtAbort_Notify
* AtAbort_Notify
*
* This is called at transaction abort.
*
@ -569,7 +583,7 @@ AtAbort_Notify()
/*
*--------------------------------------------------------------
* Async_NotifyHandler
* Async_NotifyHandler
*
* This is the signal handler for SIGUSR2.
*
@ -588,25 +602,30 @@ AtAbort_Notify()
void
Async_NotifyHandler(SIGNAL_ARGS)
{
/*
* Note: this is a SIGNAL HANDLER. You must be very wary what you do here.
* Some helpful soul had this routine sprinkled with TPRINTFs, which would
* likely lead to corruption of stdio buffers if they were ever turned on.
* Note: this is a SIGNAL HANDLER. You must be very wary what you do
* here. Some helpful soul had this routine sprinkled with TPRINTFs,
* which would likely lead to corruption of stdio buffers if they were
* ever turned on.
*/
if (notifyInterruptEnabled)
{
/* I'm not sure whether some flavors of Unix might allow another
* SIGUSR2 occurrence to recursively interrupt this routine.
* To cope with the possibility, we do the same sort of dance that
* EnableNotifyInterrupt must do --- see that routine for comments.
/*
* I'm not sure whether some flavors of Unix might allow another
* SIGUSR2 occurrence to recursively interrupt this routine. To
* cope with the possibility, we do the same sort of dance that
* EnableNotifyInterrupt must do --- see that routine for
* comments.
*/
notifyInterruptEnabled = 0; /* disable any recursive signal */
notifyInterruptOccurred = 1; /* do at least one iteration */
for (;;)
{
notifyInterruptEnabled = 1;
if (! notifyInterruptOccurred)
if (!notifyInterruptOccurred)
break;
notifyInterruptEnabled = 0;
if (notifyInterruptOccurred)
@ -621,14 +640,18 @@ Async_NotifyHandler(SIGNAL_ARGS)
}
else
{
/* In this path it is NOT SAFE to do much of anything, except this: */
/*
* In this path it is NOT SAFE to do much of anything, except
* this:
*/
notifyInterruptOccurred = 1;
}
}
/*
* --------------------------------------------------------------
* EnableNotifyInterrupt
* EnableNotifyInterrupt
*
* This is called by the PostgresMain main loop just before waiting
* for a frontend command. If we are truly idle (ie, *not* inside
@ -652,26 +675,27 @@ EnableNotifyInterrupt(void)
* notifyInterruptOccurred and then set notifyInterruptEnabled, we
* could fail to respond promptly to a signal that happens in between
* those two steps. (A very small time window, perhaps, but Murphy's
* Law says you can hit it...) Instead, we first set the enable flag,
* then test the occurred flag. If we see an unserviced interrupt
* has occurred, we re-clear the enable flag before going off to do
* the service work. (That prevents re-entrant invocation of
* ProcessIncomingNotify() if another interrupt occurs.)
* If an interrupt comes in between the setting and clearing of
* notifyInterruptEnabled, then it will have done the service
* work and left notifyInterruptOccurred zero, so we have to check
* again after clearing enable. The whole thing has to be in a loop
* in case another interrupt occurs while we're servicing the first.
* Once we get out of the loop, enable is set and we know there is no
* Law says you can hit it...) Instead, we first set the enable flag,
* then test the occurred flag. If we see an unserviced interrupt has
* occurred, we re-clear the enable flag before going off to do the
* service work. (That prevents re-entrant invocation of
* ProcessIncomingNotify() if another interrupt occurs.) If an
* interrupt comes in between the setting and clearing of
* notifyInterruptEnabled, then it will have done the service work and
* left notifyInterruptOccurred zero, so we have to check again after
* clearing enable. The whole thing has to be in a loop in case
* another interrupt occurs while we're servicing the first. Once we
* get out of the loop, enable is set and we know there is no
* unserviced interrupt.
*
* NB: an overenthusiastic optimizing compiler could easily break this
* code. Hopefully, they all understand what "volatile" means these days.
* code. Hopefully, they all understand what "volatile" means these
* days.
*/
for (;;)
{
notifyInterruptEnabled = 1;
if (! notifyInterruptOccurred)
if (!notifyInterruptOccurred)
break;
notifyInterruptEnabled = 0;
if (notifyInterruptOccurred)
@ -686,7 +710,7 @@ EnableNotifyInterrupt(void)
/*
* --------------------------------------------------------------
* DisableNotifyInterrupt
* DisableNotifyInterrupt
*
* This is called by the PostgresMain main loop just after receiving
* a frontend command. Signal handler execution of inbound notifies
@ -702,7 +726,7 @@ DisableNotifyInterrupt(void)
/*
* --------------------------------------------------------------
* ProcessIncomingNotify
* ProcessIncomingNotify
*
* Deal with arriving NOTIFYs from other backends.
* This is called either directly from the SIGUSR2 signal handler,
@ -777,6 +801,7 @@ ProcessIncomingNotify(void)
}
}
heap_endscan(sRel);
/*
* We do not do RelationUnsetLockForWrite(lRel) here, because the
* transaction is about to be committed anyway.
@ -785,7 +810,10 @@ ProcessIncomingNotify(void)
CommitTransactionCommand();
/* Must flush the notify messages to ensure frontend gets them promptly. */
/*
* Must flush the notify messages to ensure frontend gets them
* promptly.
*/
pq_flush();
PS_SET_STATUS("idle");
@ -800,20 +828,22 @@ NotifyMyFrontEnd(char *relname, int32 listenerPID)
if (whereToSendOutput == Remote)
{
StringInfoData buf;
pq_beginmessage(&buf);
pq_sendbyte(&buf, 'A');
pq_sendint(&buf, listenerPID, sizeof(int32));
pq_sendstring(&buf, relname);
pq_endmessage(&buf);
/* NOTE: we do not do pq_flush() here. For a self-notify, it will
/*
* NOTE: we do not do pq_flush() here. For a self-notify, it will
* happen at the end of the transaction, and for incoming notifies
* ProcessIncomingNotify will do it after finding all the notifies.
* ProcessIncomingNotify will do it after finding all the
* notifies.
*/
}
else
{
elog(NOTICE, "NOTIFY for %s", relname);
}
}
/* Does pendingNotifies include the given relname?
@ -847,10 +877,12 @@ ClearPendingNotifies()
if (pendingNotifies)
{
/* Since the referenced strings are malloc'd, we have to scan the
/*
* Since the referenced strings are malloc'd, we have to scan the
* list and delete them individually. If we used palloc for the
* strings then we could just do DLFreeList to get rid of both
* the list nodes and the list base...
* strings then we could just do DLFreeList to get rid of both the
* list nodes and the list base...
*/
while ((p = DLRemHead(pendingNotifies)) != NULL)
{

View File

@ -14,7 +14,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/cluster.c,v 1.38 1999/02/13 23:15:02 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/cluster.c,v 1.39 1999/05/25 16:08:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -236,17 +236,17 @@ copy_heap(Oid OIDOldHeap)
static void
copy_index(Oid OIDOldIndex, Oid OIDNewHeap)
{
Relation OldIndex,
NewHeap;
HeapTuple Old_pg_index_Tuple,
Old_pg_index_relation_Tuple,
pg_proc_Tuple;
Relation OldIndex,
NewHeap;
HeapTuple Old_pg_index_Tuple,
Old_pg_index_relation_Tuple,
pg_proc_Tuple;
Form_pg_index Old_pg_index_Form;
Form_pg_class Old_pg_index_relation_Form;
Form_pg_proc pg_proc_Form;
char *NewIndexName;
AttrNumber *attnumP;
int natts;
Form_pg_proc pg_proc_Form;
char *NewIndexName;
AttrNumber *attnumP;
int natts;
FuncIndexInfo *finfo;
NewHeap = heap_open(OIDNewHeap);
@ -259,14 +259,14 @@ copy_index(Oid OIDOldIndex, Oid OIDNewHeap)
*/
Old_pg_index_Tuple = SearchSysCacheTuple(INDEXRELID,
ObjectIdGetDatum(RelationGetRelid(OldIndex)),
0, 0, 0);
0, 0, 0);
Assert(Old_pg_index_Tuple);
Old_pg_index_Form = (Form_pg_index) GETSTRUCT(Old_pg_index_Tuple);
Old_pg_index_relation_Tuple = SearchSysCacheTuple(RELOID,
ObjectIdGetDatum(RelationGetRelid(OldIndex)),
0, 0, 0);
0, 0, 0);
Assert(Old_pg_index_relation_Tuple);
Old_pg_index_relation_Form = (Form_pg_class) GETSTRUCT(Old_pg_index_relation_Tuple);
@ -296,7 +296,7 @@ copy_index(Oid OIDOldIndex, Oid OIDNewHeap)
pg_proc_Tuple = SearchSysCacheTuple(PROOID,
ObjectIdGetDatum(Old_pg_index_Form->indproc),
0, 0, 0);
0, 0, 0);
Assert(pg_proc_Tuple);
pg_proc_Form = (Form_pg_proc) GETSTRUCT(pg_proc_Tuple);
@ -319,7 +319,7 @@ copy_index(Oid OIDOldIndex, Oid OIDNewHeap)
(uint16) 0, (Datum) NULL, NULL,
Old_pg_index_Form->indislossy,
Old_pg_index_Form->indisunique,
Old_pg_index_Form->indisprimary);
Old_pg_index_Form->indisprimary);
heap_close(OldIndex);
heap_close(NewHeap);
@ -329,14 +329,14 @@ copy_index(Oid OIDOldIndex, Oid OIDNewHeap)
static void
rebuildheap(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex)
{
Relation LocalNewHeap,
LocalOldHeap,
LocalOldIndex;
IndexScanDesc ScanDesc;
RetrieveIndexResult ScanResult;
HeapTupleData LocalHeapTuple;
Buffer LocalBuffer;
Oid OIDNewHeapInsert;
Relation LocalNewHeap,
LocalOldHeap,
LocalOldIndex;
IndexScanDesc ScanDesc;
RetrieveIndexResult ScanResult;
HeapTupleData LocalHeapTuple;
Buffer LocalBuffer;
Oid OIDNewHeapInsert;
/*
* Open the relations I need. Scan through the OldHeap on the OldIndex

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/command.c,v 1.44 1999/05/10 00:44:56 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/command.c,v 1.45 1999/05/25 16:08:17 momjian Exp $
*
* NOTES
* The PortalExecutorHeapMemory crap needs to be eliminated
@ -117,18 +117,18 @@ PerformPortalFetch(char *name,
}
/* ----------------
* Create a const node from the given count value
* Create a const node from the given count value
* ----------------
*/
memset(&limcount, 0, sizeof(limcount));
limcount.type = T_Const;
limcount.consttype = INT4OID;
limcount.constlen = sizeof(int4);
limcount.constvalue = (Datum)count;
limcount.constisnull = FALSE;
limcount.type = T_Const;
limcount.consttype = INT4OID;
limcount.constlen = sizeof(int4);
limcount.constvalue = (Datum) count;
limcount.constisnull = FALSE;
limcount.constbyval = TRUE;
limcount.constisset = FALSE;
limcount.constiscast = FALSE;
limcount.constiscast = FALSE;
/* ----------------
@ -193,8 +193,8 @@ PerformPortalFetch(char *name,
*/
PortalExecutorHeapMemory = (MemoryContext) PortalGetHeapMemory(portal);
ExecutorRun(queryDesc, PortalGetState(portal), feature,
(Node *)NULL, (Node *)&limcount);
ExecutorRun(queryDesc, PortalGetState(portal), feature,
(Node *) NULL, (Node *) &limcount);
if (dest == None) /* MOVE */
pfree(queryDesc);
@ -211,7 +211,7 @@ PerformPortalFetch(char *name,
* ----------------
*/
MemoryContextSwitchTo(
(MemoryContext) PortalGetHeapMemory(GetPortalByName(NULL)));
(MemoryContext) PortalGetHeapMemory(GetPortalByName(NULL)));
}
/* --------------------------------
@ -503,7 +503,7 @@ PerformAddAttribute(char *relationName,
heap_replace(rel, &reltup->t_self, reltup, NULL);
{
HeapTuple temptup;
HeapTuple temptup;
if ((temptup = get_temp_rel_by_name(relationName)) != NULL)
((Form_pg_class) GETSTRUCT(temptup))->relnatts = maxatts;
@ -519,7 +519,7 @@ PerformAddAttribute(char *relationName,
}
void
LockTableCommand(LockStmt *lockstmt)
LockTableCommand(LockStmt * lockstmt)
{
Relation rel;
int aclresult;

View File

@ -6,7 +6,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/copy.c,v 1.76 1999/05/10 00:44:58 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/copy.c,v 1.77 1999/05/25 16:08:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -70,111 +70,138 @@ static int CountTuples(Relation relation);
static int lineno;
/*
/*
* Internal communications functions
*/
inline void CopySendData(void *databuf, int datasize, FILE *fp);
inline void CopySendString(char *str, FILE *fp);
inline void CopySendChar(char c, FILE *fp);
inline void CopyGetData(void *databuf, int datasize, FILE *fp);
inline int CopyGetChar(FILE *fp);
inline int CopyGetEof(FILE *fp);
inline int CopyPeekChar(FILE *fp);
inline int CopyGetChar(FILE *fp);
inline int CopyGetEof(FILE *fp);
inline int CopyPeekChar(FILE *fp);
inline void CopyDonePeek(FILE *fp, int c, int pickup);
/*
* CopySendData sends output data either to the file
* specified by fp or, if fp is NULL, using the standard
* backend->frontend functions
* specified by fp or, if fp is NULL, using the standard
* backend->frontend functions
*
* CopySendString does the same for null-terminated strings
* CopySendChar does the same for single characters
*
* NB: no data conversion is applied by these functions
*/
inline void CopySendData(void *databuf, int datasize, FILE *fp) {
if (!fp)
pq_putbytes((char*) databuf, datasize);
else
fwrite(databuf, datasize, 1, fp);
}
inline void CopySendString(char *str, FILE *fp) {
CopySendData(str,strlen(str),fp);
inline void
CopySendData(void *databuf, int datasize, FILE *fp)
{
if (!fp)
pq_putbytes((char *) databuf, datasize);
else
fwrite(databuf, datasize, 1, fp);
}
inline void CopySendChar(char c, FILE *fp) {
CopySendData(&c,1,fp);
inline void
CopySendString(char *str, FILE *fp)
{
CopySendData(str, strlen(str), fp);
}
inline void
CopySendChar(char c, FILE *fp)
{
CopySendData(&c, 1, fp);
}
/*
* CopyGetData reads output data either from the file
* specified by fp or, if fp is NULL, using the standard
* backend->frontend functions
* specified by fp or, if fp is NULL, using the standard
* backend->frontend functions
*
* CopyGetChar does the same for single characters
* CopyGetEof checks if it's EOF on the input
*
* NB: no data conversion is applied by these functions
*/
inline void CopyGetData(void *databuf, int datasize, FILE *fp) {
if (!fp)
pq_getbytes((char*) databuf, datasize);
else
fread(databuf, datasize, 1, fp);
inline void
CopyGetData(void *databuf, int datasize, FILE *fp)
{
if (!fp)
pq_getbytes((char *) databuf, datasize);
else
fread(databuf, datasize, 1, fp);
}
inline int CopyGetChar(FILE *fp) {
if (!fp)
{
unsigned char ch;
if (pq_getbytes((char*) &ch, 1))
return EOF;
return ch;
}
else
return getc(fp);
inline int
CopyGetChar(FILE *fp)
{
if (!fp)
{
unsigned char ch;
if (pq_getbytes((char *) &ch, 1))
return EOF;
return ch;
}
else
return getc(fp);
}
inline int CopyGetEof(FILE *fp) {
if (!fp)
return 0; /* Never return EOF when talking to frontend ? */
else
return feof(fp);
inline int
CopyGetEof(FILE *fp)
{
if (!fp)
return 0; /* Never return EOF when talking to
* frontend ? */
else
return feof(fp);
}
/*
* CopyPeekChar reads a byte in "peekable" mode.
* after each call to CopyPeekChar, a call to CopyDonePeek _must_
* follow.
* CopyDonePeek will either take the peeked char off the steam
* CopyDonePeek will either take the peeked char off the steam
* (if pickup is != 0) or leave it on the stream (if pickup == 0)
*/
inline int CopyPeekChar(FILE *fp) {
if (!fp)
return pq_peekbyte();
else
return getc(fp);
inline int
CopyPeekChar(FILE *fp)
{
if (!fp)
return pq_peekbyte();
else
return getc(fp);
}
inline void CopyDonePeek(FILE *fp, int c, int pickup) {
if (!fp) {
if (pickup) {
/* We want to pick it up - just receive again into dummy buffer */
char c;
pq_getbytes(&c, 1);
}
/* If we didn't want to pick it up, just leave it where it sits */
}
else {
if (!pickup) {
/* We don't want to pick it up - so put it back in there */
ungetc(c,fp);
}
/* If we wanted to pick it up, it's already there */
}
inline void
CopyDonePeek(FILE *fp, int c, int pickup)
{
if (!fp)
{
if (pickup)
{
/*
* We want to pick it up - just receive again into dummy
* buffer
*/
char c;
pq_getbytes(&c, 1);
}
/* If we didn't want to pick it up, just leave it where it sits */
}
else
{
if (!pickup)
{
/* We don't want to pick it up - so put it back in there */
ungetc(c, fp);
}
/* If we wanted to pick it up, it's already there */
}
}
/*
@ -317,7 +344,7 @@ DoCopy(char *relname, bool binary, bool oids, bool from, bool pipe,
else if (!from)
{
if (!binary)
CopySendData("\\.\n",3,fp);
CopySendData("\\.\n", 3, fp);
if (IsUnderPostmaster)
pq_endcopyout(false);
}
@ -395,8 +422,8 @@ CopyTo(Relation rel, bool binary, bool oids, FILE *fp, char *delim)
if (oids && !binary)
{
CopySendString(oidout(tuple->t_data->t_oid),fp);
CopySendChar(delim[0],fp);
CopySendString(oidout(tuple->t_data->t_oid), fp);
CopySendChar(delim[0], fp);
}
for (i = 0; i < attr_count; i++)
@ -466,8 +493,8 @@ CopyTo(Relation rel, bool binary, bool oids, FILE *fp, char *delim)
}
}
}
CopySendData((char *) tuple->t_data + tuple->t_data->t_hoff,
length, fp);
CopySendData((char *) tuple->t_data + tuple->t_data->t_hoff,
length, fp);
}
}
@ -521,7 +548,7 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp, char *delim)
Node **indexPred = NULL;
TupleDesc rtupdesc;
ExprContext *econtext = NULL;
EState *estate = makeNode(EState); /* for ExecConstraints() */
EState *estate = makeNode(EState); /* for ExecConstraints() */
#ifndef OMIT_PARTIAL_INDEX
TupleTable tupleTable;
@ -566,11 +593,11 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp, char *delim)
itupdescArr[i] = RelationGetDescr(index_rels[i]);
pgIndexTup = SearchSysCacheTuple(INDEXRELID,
ObjectIdGetDatum(RelationGetRelid(index_rels[i])),
0, 0, 0);
0, 0, 0);
Assert(pgIndexTup);
pgIndexP[i] = (Form_pg_index) GETSTRUCT(pgIndexTup);
for (attnumP = &(pgIndexP[i]->indkey[0]), natts = 0;
natts < INDEX_MAX_KEYS && *attnumP != InvalidAttrNumber;
natts < INDEX_MAX_KEYS && *attnumP != InvalidAttrNumber;
attnumP++, natts++);
if (pgIndexP[i]->indproc != InvalidOid)
{
@ -777,7 +804,7 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp, char *delim)
}
else if (nulls[i] != 'n')
{
ptr = (char *)att_align(ptr, attr[i]->attlen, attr[i]->attalign);
ptr = (char *) att_align(ptr, attr[i]->attlen, attr[i]->attalign);
values[i] = (Datum) ptr;
ptr = att_addlength(ptr, attr[i]->attlen, ptr);
}
@ -888,7 +915,7 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp, char *delim)
pfree(index_nulls);
pfree(idatum);
pfree(byval);
if (!binary)
{
pfree(in_functions);
@ -903,7 +930,7 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp, char *delim)
{
if (index_rels[i] == NULL)
continue;
if ((index_rels[i])->rd_rel->relam != BTREE_AM_OID &&
if ((index_rels[i])->rd_rel->relam != BTREE_AM_OID &&
(index_rels[i])->rd_rel->relam != HASH_AM_OID)
UnlockRelation(index_rels[i], AccessExclusiveLock);
index_close(index_rels[i]);
@ -1022,12 +1049,12 @@ GetIndexRelations(Oid main_relation_oid,
{
index_relation_oid = (Oid) DatumGetInt32(heap_getattr(tuple, 2,
tupDesc, &isnull));
tupDesc, &isnull));
if (index_relation_oid == main_relation_oid)
{
scan->index_rel_oid = (Oid) DatumGetInt32(heap_getattr(tuple,
Anum_pg_index_indexrelid,
tupDesc, &isnull));
Anum_pg_index_indexrelid,
tupDesc, &isnull));
(*n_indices)++;
scan->next = (RelationList *) palloc(sizeof(RelationList));
scan = scan->next;
@ -1047,7 +1074,7 @@ GetIndexRelations(Oid main_relation_oid,
{
(*index_rels)[i] = index_open(scan->index_rel_oid);
/* comments in execUtils.c */
if ((*index_rels)[i] != NULL &&
if ((*index_rels)[i] != NULL &&
((*index_rels)[i])->rd_rel->relam != BTREE_AM_OID &&
((*index_rels)[i])->rd_rel->relam != HASH_AM_OID)
LockRelation((*index_rels)[i], AccessExclusiveLock);
@ -1176,26 +1203,29 @@ CopyReadAttribute(FILE *fp, bool *isnull, char *delim)
if (ISOCTAL(c))
{
val = (val << 3) + VALUE(c);
CopyDonePeek(fp, c, 1); /* Pick up the character! */
CopyDonePeek(fp, c, 1); /* Pick up the
* character! */
c = CopyPeekChar(fp);
if (ISOCTAL(c)) {
CopyDonePeek(fp,c,1); /* pick up! */
if (ISOCTAL(c))
{
CopyDonePeek(fp, c, 1); /* pick up! */
val = (val << 3) + VALUE(c);
}
else
{
if (CopyGetEof(fp)) {
CopyDonePeek(fp,c,1); /* pick up */
if (CopyGetEof(fp))
{
CopyDonePeek(fp, c, 1); /* pick up */
return NULL;
}
CopyDonePeek(fp,c,0); /* Return to stream! */
CopyDonePeek(fp, c, 0); /* Return to stream! */
}
}
else
{
if (CopyGetEof(fp))
return NULL;
CopyDonePeek(fp,c,0); /* Return to stream! */
CopyDonePeek(fp, c, 0); /* Return to stream! */
}
c = val & 0377;
}

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/creatinh.c,v 1.40 1999/02/13 23:15:05 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/creatinh.c,v 1.41 1999/05/25 16:08:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -39,7 +39,7 @@ static List *MergeAttributes(List *schema, List *supers, List **supconstr);
static void StoreCatalogInheritance(Oid relationId, List *supers);
/* ----------------------------------------------------------------
* DefineRelation
* DefineRelation
* Creates a new relation.
* ----------------------------------------------------------------
*/
@ -90,10 +90,10 @@ DefineRelation(CreateStmt *stmt, char relkind)
if (constraints != NIL)
{
List *entry;
int nconstr = length(constraints),
ncheck = 0,
i;
List *entry;
int nconstr = length(constraints),
ncheck = 0,
i;
ConstrCheck *check = (ConstrCheck *) palloc(nconstr * sizeof(ConstrCheck));
foreach(entry, constraints)
@ -107,9 +107,9 @@ DefineRelation(CreateStmt *stmt, char relkind)
for (i = 0; i < ncheck; i++)
{
if (strcmp(check[i].ccname, cdef->name) == 0)
elog(ERROR,
"DefineRelation: name (%s) of CHECK constraint duplicated",
cdef->name);
elog(ERROR,
"DefineRelation: name (%s) of CHECK constraint duplicated",
cdef->name);
}
check[ncheck].ccname = cdef->name;
}
@ -145,7 +145,7 @@ DefineRelation(CreateStmt *stmt, char relkind)
}
/*
* RemoveRelation
* RemoveRelation
* Deletes a new relation.
*
* Exceptions:
@ -164,7 +164,7 @@ RemoveRelation(char *name)
/*
* MergeAttributes
* MergeAttributes
* Returns new schema given initial schema and supers.
*
*
@ -276,8 +276,8 @@ MergeAttributes(List *schema, List *supers, List **supconstr)
*/
attributeName = (attribute->attname).data;
tuple = SearchSysCacheTuple(TYPOID,
ObjectIdGetDatum(attribute->atttypid),
0, 0, 0);
ObjectIdGetDatum(attribute->atttypid),
0, 0, 0);
Assert(HeapTupleIsValid(tuple));
attributeType = (((Form_pg_type) GETSTRUCT(tuple))->typname).data;
@ -365,7 +365,7 @@ MergeAttributes(List *schema, List *supers, List **supconstr)
}
/*
* StoreCatalogInheritance
* StoreCatalogInheritance
* Updates the system catalogs with proper inheritance information.
*/
static void
@ -411,9 +411,9 @@ StoreCatalogInheritance(Oid relationId, List *supers)
*/
idList = lappendi(idList, tuple->t_data->t_oid);
datum[0] = ObjectIdGetDatum(relationId); /* inhrel */
datum[1] = ObjectIdGetDatum(tuple->t_data->t_oid); /* inhparent */
datum[2] = Int16GetDatum(seqNumber); /* inhseqno */
datum[0] = ObjectIdGetDatum(relationId); /* inhrel */
datum[1] = ObjectIdGetDatum(tuple->t_data->t_oid); /* inhparent */
datum[2] = Int16GetDatum(seqNumber); /* inhseqno */
nullarr[0] = ' ';
nullarr[1] = ' ';
@ -467,8 +467,8 @@ StoreCatalogInheritance(Oid relationId, List *supers)
break;
lnext(current) = lconsi(((Form_pg_inherits)
GETSTRUCT(tuple))->inhparent,
NIL);
GETSTRUCT(tuple))->inhparent,
NIL);
current = lnext(current);
}

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/dbcommands.c,v 1.34 1999/05/10 00:44:59 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/dbcommands.c,v 1.35 1999/05/25 16:08:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -47,8 +47,8 @@ createdb(char *dbname, char *dbpath, int encoding, CommandDest dest)
Oid db_id;
int4 user_id;
char buf[512];
char *lp,
loc[512];
char *lp,
loc[512];
/*
* If this call returns, the database does not exist and we're allowed
@ -80,13 +80,13 @@ createdb(char *dbname, char *dbpath, int encoding, CommandDest dest)
elog(ERROR, "Unable to create database directory '%s'", lp);
snprintf(buf, 512, "%s %s%cbase%ctemplate1%c* %s",
COPY_CMD, DataDir, SEP_CHAR, SEP_CHAR, SEP_CHAR, lp);
COPY_CMD, DataDir, SEP_CHAR, SEP_CHAR, SEP_CHAR, lp);
system(buf);
snprintf(buf, 512,
"insert into pg_database (datname, datdba, encoding, datpath)"
" values ('%s', '%d', '%d', '%s');", dbname, user_id, encoding,
loc);
snprintf(buf, 512,
"insert into pg_database (datname, datdba, encoding, datpath)"
" values ('%s', '%d', '%d', '%s');", dbname, user_id, encoding,
loc);
pg_exec_query_dest(buf, dest, false);
}
@ -96,9 +96,9 @@ destroydb(char *dbname, CommandDest dest)
{
int4 user_id;
Oid db_id;
char *path,
dbpath[MAXPGPATH + 1],
buf[512];
char *path,
dbpath[MAXPGPATH + 1],
buf[512];
/*
* If this call returns, the database exists and we're allowed to
@ -122,9 +122,9 @@ destroydb(char *dbname, CommandDest dest)
* remove the pg_database tuple FIRST, this may fail due to
* permissions problems
*/
snprintf(buf, 512,
"delete from pg_database where pg_database.oid = \'%u\'::oid", db_id);
pg_exec_query_dest(buf ,dest, false);
snprintf(buf, 512,
"delete from pg_database where pg_database.oid = \'%u\'::oid", db_id);
pg_exec_query_dest(buf, dest, false);
/* drop pages for this database that are in the shared buffer cache */
DropBuffers(db_id);
@ -294,13 +294,13 @@ static void
stop_vacuum(char *dbpath, char *dbname)
{
char filename[256];
FILE *fp;
FILE *fp;
int pid;
if (strchr(dbpath, SEP_CHAR) != 0)
{
snprintf(filename, 256, "%s%cbase%c%s%c%s.vacuum",
DataDir, SEP_CHAR, SEP_CHAR, dbname, SEP_CHAR, dbname);
snprintf(filename, 256, "%s%cbase%c%s%c%s.vacuum",
DataDir, SEP_CHAR, SEP_CHAR, dbname, SEP_CHAR, dbname);
}
else
snprintf(filename, 256, "%s%c%s.vacuum", dbpath, SEP_CHAR, dbname);

View File

@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/define.c,v 1.28 1999/04/09 22:35:41 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/define.c,v 1.29 1999/05/25 16:08:22 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@ -203,7 +203,7 @@ interpret_AS_clause(const char *languageName, const char *as,
/*
* CreateFunction
* CreateFunction
* Execute a CREATE FUNCTION utility statement.
*
*/
@ -574,7 +574,7 @@ DefineAggregate(char *aggName, List *parameters)
}
/*
* DefineType
* DefineType
* Registers a new type.
*
*/

View File

@ -4,7 +4,7 @@
*
* Copyright (c) 1994-5, Regents of the University of California
*
* $Id: explain.c,v 1.36 1999/05/09 23:31:45 tgl Exp $
* $Id: explain.c,v 1.37 1999/05/25 16:08:23 momjian Exp $
*
*/
#include <stdio.h>
@ -34,7 +34,7 @@ typedef struct ExplainState
} ExplainState;
static char *Explain_PlanToString(Plan *plan, ExplainState *es);
static void printLongNotice(const char * header, const char * message);
static void printLongNotice(const char *header, const char *message);
static void ExplainOneQuery(Query *query, bool verbose, CommandDest dest);
@ -46,8 +46,8 @@ static void ExplainOneQuery(Query *query, bool verbose, CommandDest dest);
void
ExplainQuery(Query *query, bool verbose, CommandDest dest)
{
List *rewritten;
List *l;
List *rewritten;
List *l;
/* rewriter and planner may not work in aborted state? */
if (IsAbortedTransactionBlockState())
@ -145,10 +145,10 @@ ExplainOneQuery(Query *query, bool verbose, CommandDest dest)
static void
explain_outNode(StringInfo str, Plan *plan, int indent, ExplainState *es)
{
List *l;
List *l;
Relation relation;
char *pname;
int i;
char *pname;
int i;
if (plan == NULL)
{
@ -208,15 +208,13 @@ explain_outNode(StringInfo str, Plan *plan, int indent, ExplainState *es)
case T_IndexScan:
appendStringInfo(str, " using ");
i = 0;
foreach (l, ((IndexScan *) plan)->indxid)
foreach(l, ((IndexScan *) plan)->indxid)
{
relation = RelationIdCacheGetRelation((int) lfirst(l));
if (++i > 1)
{
appendStringInfo(str, ", ");
}
appendStringInfo(str,
stringStringInfo((RelationGetRelationName(relation))->data));
appendStringInfo(str,
stringStringInfo((RelationGetRelationName(relation))->data));
}
case T_SeqScan:
if (((Scan *) plan)->scanrelid > 0)
@ -227,7 +225,7 @@ explain_outNode(StringInfo str, Plan *plan, int indent, ExplainState *es)
if (strcmp(rte->refname, rte->relname) != 0)
{
appendStringInfo(str, "%s ",
stringStringInfo(rte->relname));
stringStringInfo(rte->relname));
}
appendStringInfo(str, stringStringInfo(rte->refname));
}
@ -238,7 +236,7 @@ explain_outNode(StringInfo str, Plan *plan, int indent, ExplainState *es)
if (es->printCost)
{
appendStringInfo(str, " (cost=%.2f rows=%d width=%d)",
plan->cost, plan->plan_size, plan->plan_width);
plan->cost, plan->plan_size, plan->plan_width);
}
appendStringInfo(str, "\n");
@ -248,18 +246,14 @@ explain_outNode(StringInfo str, Plan *plan, int indent, ExplainState *es)
List *saved_rtable = es->rtable;
List *lst;
for (i = 0; i < indent; i++)
{
for (i = 0; i < indent; i++)
appendStringInfo(str, " ");
}
appendStringInfo(str, " InitPlan\n");
foreach(lst, plan->initPlan)
{
es->rtable = ((SubPlan *) lfirst(lst))->rtable;
for (i = 0; i < indent; i++)
{
appendStringInfo(str, " ");
}
appendStringInfo(str, " -> ");
explain_outNode(str, ((SubPlan *) lfirst(lst))->plan, indent + 2, es);
}
@ -270,9 +264,7 @@ explain_outNode(StringInfo str, Plan *plan, int indent, ExplainState *es)
if (outerPlan(plan))
{
for (i = 0; i < indent; i++)
{
appendStringInfo(str, " ");
}
appendStringInfo(str, " -> ");
explain_outNode(str, outerPlan(plan), indent + 3, es);
}
@ -281,9 +273,7 @@ explain_outNode(StringInfo str, Plan *plan, int indent, ExplainState *es)
if (innerPlan(plan))
{
for (i = 0; i < indent; i++)
{
appendStringInfo(str, " ");
}
appendStringInfo(str, " -> ");
explain_outNode(str, innerPlan(plan), indent + 3, es);
}
@ -295,17 +285,13 @@ explain_outNode(StringInfo str, Plan *plan, int indent, ExplainState *es)
List *lst;
for (i = 0; i < indent; i++)
{
appendStringInfo(str, " ");
}
appendStringInfo(str, " SubPlan\n");
foreach(lst, plan->subPlan)
{
es->rtable = ((SubPlan *) lfirst(lst))->rtable;
for (i = 0; i < indent; i++)
{
appendStringInfo(str, " ");
}
appendStringInfo(str, " -> ");
explain_outNode(str, ((SubPlan *) lfirst(lst))->plan, indent + 4, es);
}
@ -336,9 +322,7 @@ explain_outNode(StringInfo str, Plan *plan, int indent, ExplainState *es)
es->rtable = nth(whichplan, appendplan->unionrtables);
for (i = 0; i < indent; i++)
{
appendStringInfo(str, " ");
}
appendStringInfo(str, " -> ");
explain_outNode(str, subnode, indent + 4, es);
@ -353,7 +337,7 @@ explain_outNode(StringInfo str, Plan *plan, int indent, ExplainState *es)
static char *
Explain_PlanToString(Plan *plan, ExplainState *es)
{
StringInfoData str;
StringInfoData str;
/* see stringinfo.h for an explanation of this maneuver */
initStringInfo(&str);
@ -367,9 +351,9 @@ Explain_PlanToString(Plan *plan, ExplainState *es)
* This is a crock ... there shouldn't be an upper limit to what you can elog().
*/
static void
printLongNotice(const char * header, const char * message)
printLongNotice(const char *header, const char *message)
{
int len = strlen(message);
int len = strlen(message);
elog(NOTICE, "%.20s%.*s", header, ELOG_MAXLEN - 64, message);
len -= ELOG_MAXLEN - 64;

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/indexcmds.c,v 1.3 1999/05/10 00:44:59 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/indexcmds.c,v 1.4 1999/05/25 16:08:24 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -53,7 +53,7 @@ static void NormIndexAttrs(List *attList, AttrNumber *attNumP,
static char *GetDefaultOpClass(Oid atttypid);
/*
* DefineIndex
* DefineIndex
* Creates a new index.
*
* 'attributeList' is a list of IndexElem specifying either a functional
@ -164,7 +164,7 @@ DefineIndex(char *heapRelationName,
if (nargs > INDEX_MAX_KEYS)
{
elog(ERROR,
"Too many args to function, limit of %d", INDEX_MAX_KEYS);
"Too many args to function, limit of %d", INDEX_MAX_KEYS);
}
FIsetnArgs(&fInfo, nargs);
@ -207,7 +207,7 @@ DefineIndex(char *heapRelationName,
/*
* ExtendIndex
* ExtendIndex
* Extends a partial index.
*
* Exceptions:
@ -304,7 +304,7 @@ ExtendIndex(char *indexRelationName, Expr *predicate, List *rangetable)
predInfo->oldPred = oldPred;
attributeNumberA = (AttrNumber *) palloc(numberOfAttributes *
sizeof attributeNumberA[0]);
sizeof attributeNumberA[0]);
classObjectId = (Oid *) palloc(numberOfAttributes * sizeof classObjectId[0]);
@ -501,7 +501,7 @@ NormIndexAttrs(List *attList, /* list of IndexElem's */
/* we just set the type name because that is all we need */
attribute->typename = makeNode(TypeName);
attribute->typename->name = nameout(&((Form_pg_type) GETSTRUCT(tuple))->typname);
/* we all need the typmod for the char and varchar types. */
attribute->typename->typmod = attform->atttypmod;
}
@ -547,7 +547,7 @@ GetDefaultOpClass(Oid atttypid)
}
/*
* RemoveIndex
* RemoveIndex
* Deletes an index.
*
* Exceptions:

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/remove.c,v 1.32 1999/02/13 23:15:08 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/remove.c,v 1.33 1999/05/25 16:08:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -36,7 +36,7 @@
#endif
/*
* RemoveOperator
* RemoveOperator
* Deletes an operator.
*
* Exceptions:
@ -288,7 +288,7 @@ RemoveType(char *typeName) /* type name to be removed */
}
/*
* RemoveFunction
* RemoveFunction
* Deletes a function.
*
* Exceptions:

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/rename.c,v 1.24 1999/05/17 18:24:48 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/Attic/rename.c,v 1.25 1999/05/25 16:08:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -142,7 +142,7 @@ renameatt(char *relname,
}
}
if ((relid = RelnameFindRelid(relname)) == InvalidOid)
elog(ERROR, "renameatt: relation \"%s\" nonexistent", relname);
@ -201,7 +201,7 @@ renameatt(char *relname,
void
renamerel(char *oldrelname, char *newrelname)
{
int i;
int i;
Relation relrelation; /* for RELATION relation */
HeapTuple oldreltup;
char oldpath[MAXPGPATH],
@ -237,7 +237,7 @@ renamerel(char *oldrelname, char *newrelname)
{
sprintf(toldpath, "%s.%d", oldpath, i);
sprintf(tnewpath, "%s.%d", newpath, i);
if(rename(toldpath, tnewpath) < 0)
if (rename(toldpath, tnewpath) < 0)
break;
}

View File

@ -66,7 +66,7 @@ static void init_params(CreateSeqStmt *seq, Form_pg_sequence new);
static int get_param(DefElem *def);
/*
* DefineSequence
* DefineSequence
* Creates a new sequence relation
*/
void
@ -218,8 +218,8 @@ nextval(struct varlena * seqin)
return elm->last;
}
seq = read_info("nextval", elm, &buf); /* lock page' buffer and read
* tuple */
seq = read_info("nextval", elm, &buf); /* lock page' buffer and
* read tuple */
next = result = seq->last_value;
incby = seq->increment_by;
@ -327,8 +327,8 @@ setval(struct varlena * seqin, int4 next)
/* open and AccessShareLock sequence */
elm = init_sequence("setval", seqname);
seq = read_info("setval", elm, &buf); /* lock page' buffer and read
* tuple */
seq = read_info("setval", elm, &buf); /* lock page' buffer and
* read tuple */
if (seq->cache_value != 1)
{
@ -361,11 +361,11 @@ setval(struct varlena * seqin, int4 next)
static Form_pg_sequence
read_info(char *caller, SeqTable elm, Buffer *buf)
{
PageHeader page;
ItemId lp;
HeapTupleData tuple;
PageHeader page;
ItemId lp;
HeapTupleData tuple;
sequence_magic *sm;
Form_pg_sequence seq;
Form_pg_sequence seq;
if (RelationGetNumberOfBlocks(elm->rel) != 1)
elog(ERROR, "%s.%s: invalid number of blocks in sequence",
@ -464,7 +464,7 @@ init_sequence(char *caller, char *name)
/*
* CloseSequences
* CloseSequences
* is calling by xact mgr at commit/abort.
*/
void

View File

@ -362,9 +362,9 @@ RelationBuildTriggers(Relation relation)
Form_pg_trigger pg_trigger;
Relation irel;
ScanKeyData skey;
HeapTupleData tuple;
IndexScanDesc sd;
RetrieveIndexResult indexRes;
HeapTupleData tuple;
IndexScanDesc sd;
RetrieveIndexResult indexRes;
Buffer buffer;
struct varlena *val;
bool isnull;
@ -659,14 +659,14 @@ ExecARInsertTriggers(Relation rel, HeapTuple trigtuple)
bool
ExecBRDeleteTriggers(EState *estate, ItemPointer tupleid)
{
Relation rel = estate->es_result_relation_info->ri_RelationDesc;
TriggerData *SaveTriggerData;
int ntrigs = rel->trigdesc->n_before_row[TRIGGER_EVENT_DELETE];
Trigger **trigger = rel->trigdesc->tg_before_row[TRIGGER_EVENT_DELETE];
HeapTuple trigtuple;
HeapTuple newtuple = NULL;
Relation rel = estate->es_result_relation_info->ri_RelationDesc;
TriggerData *SaveTriggerData;
int ntrigs = rel->trigdesc->n_before_row[TRIGGER_EVENT_DELETE];
Trigger **trigger = rel->trigdesc->tg_before_row[TRIGGER_EVENT_DELETE];
HeapTuple trigtuple;
HeapTuple newtuple = NULL;
TupleTableSlot *newSlot;
int i;
int i;
trigtuple = GetTupleForTrigger(estate, tupleid, &newSlot);
if (trigtuple == NULL)
@ -697,7 +697,7 @@ ExecBRDeleteTriggers(EState *estate, ItemPointer tupleid)
void
ExecARDeleteTriggers(EState *estate, ItemPointer tupleid)
{
Relation rel = estate->es_result_relation_info->ri_RelationDesc;
Relation rel = estate->es_result_relation_info->ri_RelationDesc;
TriggerData *SaveTriggerData;
int ntrigs = rel->trigdesc->n_after_row[TRIGGER_EVENT_DELETE];
Trigger **trigger = rel->trigdesc->tg_after_row[TRIGGER_EVENT_DELETE];
@ -727,23 +727,23 @@ ExecARDeleteTriggers(EState *estate, ItemPointer tupleid)
HeapTuple
ExecBRUpdateTriggers(EState *estate, ItemPointer tupleid, HeapTuple newtuple)
{
Relation rel = estate->es_result_relation_info->ri_RelationDesc;
TriggerData *SaveTriggerData;
int ntrigs = rel->trigdesc->n_before_row[TRIGGER_EVENT_UPDATE];
Trigger **trigger = rel->trigdesc->tg_before_row[TRIGGER_EVENT_UPDATE];
HeapTuple trigtuple;
HeapTuple oldtuple;
HeapTuple intuple = newtuple;
Relation rel = estate->es_result_relation_info->ri_RelationDesc;
TriggerData *SaveTriggerData;
int ntrigs = rel->trigdesc->n_before_row[TRIGGER_EVENT_UPDATE];
Trigger **trigger = rel->trigdesc->tg_before_row[TRIGGER_EVENT_UPDATE];
HeapTuple trigtuple;
HeapTuple oldtuple;
HeapTuple intuple = newtuple;
TupleTableSlot *newSlot;
int i;
int i;
trigtuple = GetTupleForTrigger(estate, tupleid, &newSlot);
if (trigtuple == NULL)
return NULL;
/*
* In READ COMMITTED isolevel it's possible that newtuple
* was changed due to concurrent update.
* In READ COMMITTED isolevel it's possible that newtuple was changed
* due to concurrent update.
*/
if (newSlot != NULL)
intuple = newtuple = ExecRemoveJunk(estate->es_junkFilter, newSlot);
@ -772,7 +772,7 @@ ExecBRUpdateTriggers(EState *estate, ItemPointer tupleid, HeapTuple newtuple)
void
ExecARUpdateTriggers(EState *estate, ItemPointer tupleid, HeapTuple newtuple)
{
Relation rel = estate->es_result_relation_info->ri_RelationDesc;
Relation rel = estate->es_result_relation_info->ri_RelationDesc;
TriggerData *SaveTriggerData;
int ntrigs = rel->trigdesc->n_after_row[TRIGGER_EVENT_UPDATE];
Trigger **trigger = rel->trigdesc->tg_after_row[TRIGGER_EVENT_UPDATE];
@ -799,22 +799,22 @@ ExecARUpdateTriggers(EState *estate, ItemPointer tupleid, HeapTuple newtuple)
return;
}
extern TupleTableSlot *EvalPlanQual(EState *estate, Index rti, ItemPointer tid);
extern TupleTableSlot *EvalPlanQual(EState *estate, Index rti, ItemPointer tid);
static HeapTuple
GetTupleForTrigger(EState *estate, ItemPointer tid, TupleTableSlot **newSlot)
{
Relation relation = estate->es_result_relation_info->ri_RelationDesc;
HeapTupleData tuple;
HeapTuple result;
Buffer buffer;
Relation relation = estate->es_result_relation_info->ri_RelationDesc;
HeapTupleData tuple;
HeapTuple result;
Buffer buffer;
if (newSlot != NULL)
{
int test;
int test;
/*
* mark tuple for update
* mark tuple for update
*/
*newSlot = NULL;
tuple.t_self = *tid;
@ -824,7 +824,7 @@ ltrmark:;
{
case HeapTupleSelfUpdated:
ReleaseBuffer(buffer);
return(NULL);
return (NULL);
case HeapTupleMayBeUpdated:
break;
@ -835,9 +835,9 @@ ltrmark:;
elog(ERROR, "Can't serialize access due to concurrent update");
else if (!(ItemPointerEquals(&(tuple.t_self), tid)))
{
TupleTableSlot *epqslot = EvalPlanQual(estate,
estate->es_result_relation_info->ri_RangeTableIndex,
&(tuple.t_self));
TupleTableSlot *epqslot = EvalPlanQual(estate,
estate->es_result_relation_info->ri_RangeTableIndex,
&(tuple.t_self));
if (!(TupIsNull(epqslot)))
{
@ -846,23 +846,23 @@ ltrmark:;
goto ltrmark;
}
}
/*
* if tuple was deleted or PlanQual failed
* for updated tuple - we have not process
* this tuple!
/*
* if tuple was deleted or PlanQual failed for updated
* tuple - we have not process this tuple!
*/
return(NULL);
return (NULL);
default:
ReleaseBuffer(buffer);
elog(ERROR, "Unknown status %u from heap_mark4update", test);
return(NULL);
return (NULL);
}
}
else
{
PageHeader dp;
ItemId lp;
PageHeader dp;
ItemId lp;
buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));

View File

@ -5,11 +5,11 @@
*
* Copyright (c) 1994, Regents of the University of California
*
* $Id: user.c,v 1.27 1999/04/02 06:16:36 tgl Exp $
* $Id: user.c,v 1.28 1999/05/25 16:08:27 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#include <stdio.h>
#include <stdio.h>
#include <string.h>
#include <sys/types.h>
#include <sys/stat.h>
@ -35,7 +35,7 @@
static void CheckPgUserAclNotNull(void);
#define SQL_LENGTH 512
#define SQL_LENGTH 512
/*---------------------------------------------------------------------
* UpdatePgPwdFile
@ -49,9 +49,9 @@ void
UpdatePgPwdFile(char *sql, CommandDest dest)
{
char *filename,
*tempname;
int bufsize;
char *filename,
*tempname;
int bufsize;
/*
* Create a temporary filename to be renamed later. This prevents the
@ -68,9 +68,9 @@ UpdatePgPwdFile(char *sql, CommandDest dest)
* SEPCHAR character as the delimiter between fields. Then rename the
* file to its final name.
*/
snprintf(sql, SQL_LENGTH,
"copy %s to '%s' using delimiters %s",
ShadowRelationName, tempname, CRYPT_PWD_FILE_SEPCHAR);
snprintf(sql, SQL_LENGTH,
"copy %s to '%s' using delimiters %s",
ShadowRelationName, tempname, CRYPT_PWD_FILE_SEPCHAR);
pg_exec_query_dest(sql, dest, false);
rename(tempname, filename);
pfree((void *) tempname);
@ -94,19 +94,19 @@ UpdatePgPwdFile(char *sql, CommandDest dest)
void
DefineUser(CreateUserStmt *stmt, CommandDest dest)
{
char *pg_shadow,
sql[SQL_LENGTH];
Relation pg_shadow_rel;
TupleDesc pg_shadow_dsc;
HeapScanDesc scan;
HeapTuple tuple;
Datum datum;
bool exists = false,
n,
inblock,
havepassword,
havevaluntil;
int max_id = -1;
char *pg_shadow,
sql[SQL_LENGTH];
Relation pg_shadow_rel;
TupleDesc pg_shadow_dsc;
HeapScanDesc scan;
HeapTuple tuple;
Datum datum;
bool exists = false,
n,
inblock,
havepassword,
havevaluntil;
int max_id = -1;
havepassword = stmt->password && stmt->password[0];
havevaluntil = stmt->validUntil && stmt->validUntil[0];
@ -161,21 +161,21 @@ DefineUser(CreateUserStmt *stmt, CommandDest dest)
UnlockRelation(pg_shadow_rel, AccessExclusiveLock);
heap_close(pg_shadow_rel);
UserAbortTransactionBlock();
elog(ERROR,
"defineUser: user \"%s\" has already been created", stmt->user);
elog(ERROR,
"defineUser: user \"%s\" has already been created", stmt->user);
return;
}
/*
* Build the insert statement to be executed.
*
* XXX Ugly as this code is, it still fails to cope with ' or \
* in any of the provided strings.
* XXX Ugly as this code is, it still fails to cope with ' or \ in any of
* the provided strings.
*/
snprintf(sql, SQL_LENGTH,
snprintf(sql, SQL_LENGTH,
"insert into %s (usename,usesysid,usecreatedb,usetrace,"
"usesuper,usecatupd,passwd,valuntil) "
"values('%s',%d,'%c','t','%c','t',%s%s%s,%s%s%s)",
"values('%s',%d,'%c','t','%c','t',%s%s%s,%s%s%s)",
ShadowRelationName,
stmt->user,
max_id + 1,
@ -216,12 +216,12 @@ extern void
AlterUser(AlterUserStmt *stmt, CommandDest dest)
{
char *pg_shadow,
sql[SQL_LENGTH];
char *pg_shadow,
sql[SQL_LENGTH];
Relation pg_shadow_rel;
TupleDesc pg_shadow_dsc;
HeapTuple tuple;
bool inblock;
bool inblock;
if (stmt->password)
CheckPgUserAclNotNull();
@ -272,34 +272,32 @@ AlterUser(AlterUserStmt *stmt, CommandDest dest)
snprintf(sql, SQL_LENGTH, "update %s set", ShadowRelationName);
if (stmt->password)
{
snprintf(sql, SQL_LENGTH, "%s passwd = '%s'", pstrdup(sql), stmt->password);
}
if (stmt->createdb)
{
snprintf(sql, SQL_LENGTH, "%s %susecreatedb='%s'",
pstrdup(sql), stmt->password ? "," : "",
*stmt->createdb ? "t" : "f");
pstrdup(sql), stmt->password ? "," : "",
*stmt->createdb ? "t" : "f");
}
if (stmt->createuser)
{
snprintf(sql, SQL_LENGTH, "%s %susesuper='%s'",
pstrdup(sql), (stmt->password || stmt->createdb) ? "," : "",
*stmt->createuser ? "t" : "f");
pstrdup(sql), (stmt->password || stmt->createdb) ? "," : "",
*stmt->createuser ? "t" : "f");
}
if (stmt->validUntil)
{
snprintf(sql, SQL_LENGTH, "%s %svaluntil='%s'",
pstrdup(sql),
(stmt->password || stmt->createdb || stmt->createuser) ? "," : "",
stmt->validUntil);
pstrdup(sql),
(stmt->password || stmt->createdb || stmt->createuser) ? "," : "",
stmt->validUntil);
}
snprintf(sql, SQL_LENGTH, "%s where usename = '%s'",
pstrdup(sql), stmt->user);
pstrdup(sql), stmt->user);
pg_exec_query_dest(sql, dest, false);
@ -393,8 +391,8 @@ RemoveUser(char *user, CommandDest dest)
datum = heap_getattr(tuple, Anum_pg_database_datname, pg_dsc, &n);
if (memcmp((void *) datum, "template1", 9))
{
dbase =
(char **) repalloc((void *) dbase, sizeof(char *) * (ndbase + 1));
dbase =
(char **) repalloc((void *) dbase, sizeof(char *) * (ndbase + 1));
dbase[ndbase] = (char *) palloc(NAMEDATALEN + 1);
memcpy((void *) dbase[ndbase], (void *) datum, NAMEDATALEN);
dbase[ndbase++][NAMEDATALEN] = '\0';
@ -435,8 +433,8 @@ RemoveUser(char *user, CommandDest dest)
/*
* Remove the user from the pg_shadow table
*/
snprintf(sql, SQL_LENGTH,
"delete from %s where usename = '%s'", ShadowRelationName, user);
snprintf(sql, SQL_LENGTH,
"delete from %s where usename = '%s'", ShadowRelationName, user);
pg_exec_query_dest(sql, dest, false);
UpdatePgPwdFile(sql, dest);

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.103 1999/05/23 09:10:24 vadim Exp $
* $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.104 1999/05/25 16:08:27 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -66,7 +66,7 @@ static Portal vc_portal;
static int MESSAGE_LEVEL; /* message level */
static TransactionId XmaxRecent;
static TransactionId XmaxRecent;
#define swapLong(a,b) {long tmp; tmp=a; a=b; b=tmp;}
#define swapInt(a,b) {int tmp; tmp=a; a=b; b=tmp;}
@ -101,8 +101,8 @@ static void vc_free(VRelList vrl);
static void vc_getindices(Oid relid, int *nindices, Relation **Irel);
static void vc_clsindices(int nindices, Relation *Irel);
static void vc_mkindesc(Relation onerel, int nindices, Relation *Irel, IndDesc **Idesc);
static void *vc_find_eq(void *bot, int nelem, int size, void *elm,
int (*compar) (const void *, const void *));
static void *vc_find_eq(void *bot, int nelem, int size, void *elm,
int (*compar) (const void *, const void *));
static int vc_cmp_blk(const void *left, const void *right);
static int vc_cmp_offno(const void *left, const void *right);
static int vc_cmp_vtlinks(const void *left, const void *right);
@ -222,14 +222,15 @@ vc_shutdown()
{
/* on entry, we are not in a transaction */
/* Flush the init file that relcache.c uses to save startup time.
* The next backend startup will rebuild the init file with up-to-date
* information from pg_class. This lets the optimizer see the stats that
* we've collected for certain critical system indexes. See relcache.c
* for more details.
/*
* Flush the init file that relcache.c uses to save startup time. The
* next backend startup will rebuild the init file with up-to-date
* information from pg_class. This lets the optimizer see the stats
* that we've collected for certain critical system indexes. See
* relcache.c for more details.
*
* Ignore any failure to unlink the file, since it might not be there
* if no backend has been started since the last vacuum...
* Ignore any failure to unlink the file, since it might not be there if
* no backend has been started since the last vacuum...
*/
unlink(RELCACHE_INIT_FILENAME);
@ -578,7 +579,7 @@ vc_vacone(Oid relid, bool analyze, List *va_cols)
/* update statistics in pg_class */
vc_updstats(vacrelstats->relid, vacrelstats->num_pages,
vacrelstats->num_tuples, vacrelstats->hasindex, vacrelstats);
vacrelstats->num_tuples, vacrelstats->hasindex, vacrelstats);
/* next command frees attribute stats */
CommitTransactionCommand();
@ -601,7 +602,7 @@ vc_scanheap(VRelStats *vacrelstats, Relation onerel,
blkno;
ItemId itemid;
Buffer buf;
HeapTupleData tuple;
HeapTupleData tuple;
Page page,
tempPage = NULL;
OffsetNumber offnum,
@ -712,7 +713,7 @@ vc_scanheap(VRelStats *vacrelstats, Relation onerel,
else if (tuple.t_data->t_infomask & HEAP_MOVED_OFF)
{
if (TransactionIdDidCommit((TransactionId)
tuple.t_data->t_cmin))
tuple.t_data->t_cmin))
{
tuple.t_data->t_infomask |= HEAP_XMIN_INVALID;
tupgone = true;
@ -759,7 +760,7 @@ vc_scanheap(VRelStats *vacrelstats, Relation onerel,
else
{
elog(NOTICE, "Rel %s: TID %u/%u: InsertTransactionInProgress %u - can't shrink relation",
relname, blkno, offnum, tuple.t_data->t_xmin);
relname, blkno, offnum, tuple.t_data->t_xmin);
do_shrinking = false;
}
}
@ -799,6 +800,7 @@ vc_scanheap(VRelStats *vacrelstats, Relation onerel,
}
else if (!TransactionIdIsInProgress(tuple.t_data->t_xmax))
{
/*
* Not Aborted, Not Committed, Not in Progress - so it
* from crashed process. - vadim 06/02/97
@ -812,11 +814,12 @@ vc_scanheap(VRelStats *vacrelstats, Relation onerel,
relname, blkno, offnum, tuple.t_data->t_xmax);
do_shrinking = false;
}
/*
* If tuple is recently deleted then
* we must not remove it from relation.
* If tuple is recently deleted then we must not remove it
* from relation.
*/
if (tupgone && tuple.t_data->t_xmax >= XmaxRecent &&
if (tupgone && tuple.t_data->t_xmax >= XmaxRecent &&
tuple.t_data->t_infomask & HEAP_XMIN_COMMITTED)
{
tupgone = false;
@ -826,20 +829,21 @@ vc_scanheap(VRelStats *vacrelstats, Relation onerel,
tuple.t_data->t_infomask |= HEAP_XMAX_COMMITTED;
pgchanged = true;
}
/*
* If we do shrinking and this tuple is updated one
* then remember it to construct updated tuple
* dependencies.
*/
if (do_shrinking && !(ItemPointerEquals(&(tuple.t_self),
&(tuple.t_data->t_ctid))))
if (do_shrinking && !(ItemPointerEquals(&(tuple.t_self),
&(tuple.t_data->t_ctid))))
{
if (free_vtlinks == 0)
{
free_vtlinks = 1000;
vtlinks = (VTupleLink) repalloc(vtlinks,
(free_vtlinks + num_vtlinks) *
sizeof(VTupleLinkData));
vtlinks = (VTupleLink) repalloc(vtlinks,
(free_vtlinks + num_vtlinks) *
sizeof(VTupleLinkData));
}
vtlinks[num_vtlinks].new_tid = tuple.t_data->t_ctid;
vtlinks[num_vtlinks].this_tid = tuple.t_self;
@ -962,8 +966,8 @@ vc_scanheap(VRelStats *vacrelstats, Relation onerel,
if (usable_free_size > 0 && num_vtlinks > 0)
{
qsort((char *) vtlinks, num_vtlinks, sizeof (VTupleLinkData),
vc_cmp_vtlinks);
qsort((char *) vtlinks, num_vtlinks, sizeof(VTupleLinkData),
vc_cmp_vtlinks);
vacrelstats->vtlinks = vtlinks;
vacrelstats->num_vtlinks = num_vtlinks;
}
@ -980,10 +984,10 @@ vc_scanheap(VRelStats *vacrelstats, Relation onerel,
Tup %u: Vac %u, Keep/VTL %u/%u, Crash %u, UnUsed %u, MinLen %u, MaxLen %u; \
Re-using: Free/Avail. Space %u/%u; EndEmpty/Avail. Pages %u/%u. \
Elapsed %u/%u sec.",
nblocks, changed_pages, vacuum_pages->vpl_num_pages, empty_pages,
new_pages, num_tuples, tups_vacuumed,
nkeep, vacrelstats->num_vtlinks, ncrash,
nunused, min_tlen, max_tlen, free_size, usable_free_size,
nblocks, changed_pages, vacuum_pages->vpl_num_pages, empty_pages,
new_pages, num_tuples, tups_vacuumed,
nkeep, vacrelstats->num_vtlinks, ncrash,
nunused, min_tlen, max_tlen, free_size, usable_free_size,
empty_end_pages, fraged_pages->vpl_num_pages,
ru1.ru_stime.tv_sec - ru0.ru_stime.tv_sec,
ru1.ru_utime.tv_sec - ru0.ru_utime.tv_sec);
@ -1019,8 +1023,8 @@ vc_rpfheap(VRelStats *vacrelstats, Relation onerel,
max_offset;
ItemId itemid,
newitemid;
HeapTupleData tuple,
newtup;
HeapTupleData tuple,
newtup;
TupleDesc tupdesc = NULL;
Datum *idatum = NULL;
char *inulls = NULL;
@ -1128,7 +1132,8 @@ vc_rpfheap(VRelStats *vacrelstats, Relation onerel,
else
Assert(!isempty);
chain_tuple_moved = false; /* no one chain-tuple was moved off this page, yet */
chain_tuple_moved = false; /* no one chain-tuple was moved
* off this page, yet */
vpc->vpd_blkno = blkno;
maxoff = PageGetMaxOffsetNumber(page);
for (offnum = FirstOffsetNumber;
@ -1146,28 +1151,30 @@ vc_rpfheap(VRelStats *vacrelstats, Relation onerel,
if (!(tuple.t_data->t_infomask & HEAP_XMIN_COMMITTED))
{
if ((TransactionId)tuple.t_data->t_cmin != myXID)
if ((TransactionId) tuple.t_data->t_cmin != myXID)
elog(ERROR, "Invalid XID in t_cmin");
if (tuple.t_data->t_infomask & HEAP_MOVED_IN)
elog(ERROR, "HEAP_MOVED_IN was not expected");
/*
* If this (chain) tuple is moved by me already then
* I have to check is it in vpc or not - i.e. is it
* moved while cleaning this page or some previous one.
/*
* If this (chain) tuple is moved by me already then I
* have to check is it in vpc or not - i.e. is it moved
* while cleaning this page or some previous one.
*/
if (tuple.t_data->t_infomask & HEAP_MOVED_OFF)
{
if (keep_tuples == 0)
continue;
if (chain_tuple_moved) /* some chains was moved while */
{ /* cleaning this page */
if (chain_tuple_moved) /* some chains was moved
* while */
{ /* cleaning this page */
Assert(vpc->vpd_offsets_free > 0);
for (i = 0; i < vpc->vpd_offsets_free; i++)
{
if (vpc->vpd_offsets[i] == offnum)
break;
}
if (i >= vpc->vpd_offsets_free) /* not found */
if (i >= vpc->vpd_offsets_free) /* not found */
{
vpc->vpd_offsets[vpc->vpd_offsets_free++] = offnum;
keep_tuples--;
@ -1184,29 +1191,29 @@ vc_rpfheap(VRelStats *vacrelstats, Relation onerel,
}
/*
* If this tuple is in the chain of tuples created in
* updates by "recent" transactions then we have to
* move all chain of tuples to another places.
* If this tuple is in the chain of tuples created in updates
* by "recent" transactions then we have to move all chain of
* tuples to another places.
*/
if ((tuple.t_data->t_infomask & HEAP_UPDATED &&
if ((tuple.t_data->t_infomask & HEAP_UPDATED &&
tuple.t_data->t_xmin >= XmaxRecent) ||
(!(tuple.t_data->t_infomask & HEAP_XMAX_INVALID) &&
(!(tuple.t_data->t_infomask & HEAP_XMAX_INVALID) &&
!(ItemPointerEquals(&(tuple.t_self), &(tuple.t_data->t_ctid)))))
{
Buffer Cbuf = buf;
Page Cpage;
ItemId Citemid;
ItemPointerData Ctid;
HeapTupleData tp = tuple;
Size tlen = tuple_len;
VTupleMove vtmove = (VTupleMove)
palloc(100 * sizeof(VTupleMoveData));
int num_vtmove = 0;
int free_vtmove = 100;
VPageDescr to_vpd = fraged_pages->vpl_pagedesc[0];
int to_item = 0;
bool freeCbuf = false;
int ti;
Buffer Cbuf = buf;
Page Cpage;
ItemId Citemid;
ItemPointerData Ctid;
HeapTupleData tp = tuple;
Size tlen = tuple_len;
VTupleMove vtmove = (VTupleMove)
palloc(100 * sizeof(VTupleMoveData));
int num_vtmove = 0;
int free_vtmove = 100;
VPageDescr to_vpd = fraged_pages->vpl_pagedesc[0];
int to_item = 0;
bool freeCbuf = false;
int ti;
if (vacrelstats->vtlinks == NULL)
elog(ERROR, "No one parent tuple was found");
@ -1215,22 +1222,23 @@ vc_rpfheap(VRelStats *vacrelstats, Relation onerel,
WriteBuffer(cur_buffer);
cur_buffer = InvalidBuffer;
}
/*
* If this tuple is in the begin/middle of the chain
* then we have to move to the end of chain.
* If this tuple is in the begin/middle of the chain then
* we have to move to the end of chain.
*/
while (!(tp.t_data->t_infomask & HEAP_XMAX_INVALID) &&
!(ItemPointerEquals(&(tp.t_self), &(tp.t_data->t_ctid))))
while (!(tp.t_data->t_infomask & HEAP_XMAX_INVALID) &&
!(ItemPointerEquals(&(tp.t_self), &(tp.t_data->t_ctid))))
{
Ctid = tp.t_data->t_ctid;
if (freeCbuf)
ReleaseBuffer(Cbuf);
freeCbuf = true;
Cbuf = ReadBuffer(onerel,
ItemPointerGetBlockNumber(&Ctid));
Cbuf = ReadBuffer(onerel,
ItemPointerGetBlockNumber(&Ctid));
Cpage = BufferGetPage(Cbuf);
Citemid = PageGetItemId(Cpage,
ItemPointerGetOffsetNumber(&Ctid));
Citemid = PageGetItemId(Cpage,
ItemPointerGetOffsetNumber(&Ctid));
if (!ItemIdIsUsed(Citemid))
elog(ERROR, "Child itemid marked as unused");
tp.t_data = (HeapTupleHeader) PageGetItem(Cpage, Citemid);
@ -1238,16 +1246,16 @@ vc_rpfheap(VRelStats *vacrelstats, Relation onerel,
tlen = tp.t_len = ItemIdGetLength(Citemid);
}
/* first, can chain be moved ? */
for ( ; ; )
for (;;)
{
if (!vc_enough_space(to_vpd, tlen))
{
if (to_vpd != last_fraged_page &&
!vc_enough_space(to_vpd, vacrelstats->min_tlen))
!vc_enough_space(to_vpd, vacrelstats->min_tlen))
{
Assert(num_fraged_pages > to_item + 1);
memmove(fraged_pages->vpl_pagedesc + to_item,
fraged_pages->vpl_pagedesc + to_item + 1,
fraged_pages->vpl_pagedesc + to_item + 1,
sizeof(VPageDescr *) * (num_fraged_pages - to_item - 1));
num_fraged_pages--;
Assert(last_fraged_page == fraged_pages->vpl_pagedesc[num_fraged_pages - 1]);
@ -1257,7 +1265,8 @@ vc_rpfheap(VRelStats *vacrelstats, Relation onerel,
if (vc_enough_space(fraged_pages->vpl_pagedesc[i], tlen))
break;
}
if (i == num_fraged_pages) /* can't move item anywhere */
if (i == num_fraged_pages) /* can't move item
* anywhere */
{
for (i = 0; i < num_vtmove; i++)
{
@ -1277,9 +1286,9 @@ vc_rpfheap(VRelStats *vacrelstats, Relation onerel,
if (free_vtmove == 0)
{
free_vtmove = 1000;
vtmove = (VTupleMove) repalloc(vtmove,
(free_vtmove + num_vtmove) *
sizeof(VTupleMoveData));
vtmove = (VTupleMove) repalloc(vtmove,
(free_vtmove + num_vtmove) *
sizeof(VTupleMoveData));
}
vtmove[num_vtmove].tid = tp.t_self;
vtmove[num_vtmove].vpd = to_vpd;
@ -1289,56 +1298,59 @@ vc_rpfheap(VRelStats *vacrelstats, Relation onerel,
vtmove[num_vtmove].cleanVpd = false;
free_vtmove--;
num_vtmove++;
/*
* All done ?
*/
if (!(tp.t_data->t_infomask & HEAP_UPDATED) ||
tp.t_data->t_xmin < XmaxRecent)
if (!(tp.t_data->t_infomask & HEAP_UPDATED) ||
tp.t_data->t_xmin < XmaxRecent)
break;
/*
* Well, try to find tuple with old row version
*/
for ( ; ; )
for (;;)
{
Buffer Pbuf;
Page Ppage;
ItemId Pitemid;
HeapTupleData Ptp;
VTupleLinkData vtld,
*vtlp;
Buffer Pbuf;
Page Ppage;
ItemId Pitemid;
HeapTupleData Ptp;
VTupleLinkData vtld,
*vtlp;
vtld.new_tid = tp.t_self;
vtlp = (VTupleLink)
vc_find_eq((void *) (vacrelstats->vtlinks),
vacrelstats->num_vtlinks,
sizeof(VTupleLinkData),
(void *) &vtld,
vc_cmp_vtlinks);
vtlp = (VTupleLink)
vc_find_eq((void *) (vacrelstats->vtlinks),
vacrelstats->num_vtlinks,
sizeof(VTupleLinkData),
(void *) &vtld,
vc_cmp_vtlinks);
if (vtlp == NULL)
elog(ERROR, "Parent tuple was not found");
tp.t_self = vtlp->this_tid;
Pbuf = ReadBuffer(onerel,
Pbuf = ReadBuffer(onerel,
ItemPointerGetBlockNumber(&(tp.t_self)));
Ppage = BufferGetPage(Pbuf);
Pitemid = PageGetItemId(Ppage,
ItemPointerGetOffsetNumber(&(tp.t_self)));
Pitemid = PageGetItemId(Ppage,
ItemPointerGetOffsetNumber(&(tp.t_self)));
if (!ItemIdIsUsed(Pitemid))
elog(ERROR, "Parent itemid marked as unused");
Ptp.t_data = (HeapTupleHeader) PageGetItem(Ppage, Pitemid);
Assert(Ptp.t_data->t_xmax == tp.t_data->t_xmin);
/*
* If this tuple is updated version of row and
* it was created by the same transaction then
* no one is interested in this tuple -
* mark it as removed.
* If this tuple is updated version of row and it
* was created by the same transaction then no one
* is interested in this tuple - mark it as
* removed.
*/
if (Ptp.t_data->t_infomask & HEAP_UPDATED &&
if (Ptp.t_data->t_infomask & HEAP_UPDATED &&
Ptp.t_data->t_xmin == Ptp.t_data->t_xmax)
{
TransactionIdStore(myXID,
(TransactionId*) &(Ptp.t_data->t_cmin));
Ptp.t_data->t_infomask &=
~(HEAP_XMIN_COMMITTED|HEAP_XMIN_INVALID|HEAP_MOVED_IN);
TransactionIdStore(myXID,
(TransactionId *) &(Ptp.t_data->t_cmin));
Ptp.t_data->t_infomask &=
~(HEAP_XMIN_COMMITTED | HEAP_XMIN_INVALID | HEAP_MOVED_IN);
Ptp.t_data->t_infomask |= HEAP_MOVED_OFF;
WriteBuffer(Pbuf);
continue;
@ -1354,7 +1366,7 @@ vc_rpfheap(VRelStats *vacrelstats, Relation onerel,
}
if (freeCbuf)
ReleaseBuffer(Cbuf);
if (num_vtmove == 0) /* chain can't be moved */
if (num_vtmove == 0) /* chain can't be moved */
{
pfree(vtmove);
break;
@ -1364,19 +1376,20 @@ vc_rpfheap(VRelStats *vacrelstats, Relation onerel,
{
/* Get tuple from chain */
tuple.t_self = vtmove[ti].tid;
Cbuf = ReadBuffer(onerel,
ItemPointerGetBlockNumber(&(tuple.t_self)));
Cbuf = ReadBuffer(onerel,
ItemPointerGetBlockNumber(&(tuple.t_self)));
Cpage = BufferGetPage(Cbuf);
Citemid = PageGetItemId(Cpage,
Citemid = PageGetItemId(Cpage,
ItemPointerGetOffsetNumber(&(tuple.t_self)));
tuple.t_data = (HeapTupleHeader) PageGetItem(Cpage, Citemid);
tuple_len = tuple.t_len = ItemIdGetLength(Citemid);
/* Get page to move in */
cur_buffer = ReadBuffer(onerel, vtmove[ti].vpd->vpd_blkno);
/*
* We should LockBuffer(cur_buffer) but don't, at the
* moment. If you'll do LockBuffer then UNLOCK it
* before index_insert: unique btree-s call heap_fetch
* We should LockBuffer(cur_buffer) but don't, at the
* moment. If you'll do LockBuffer then UNLOCK it
* before index_insert: unique btree-s call heap_fetch
* to get t_infomask of inserted heap tuple !!!
*/
ToPage = BufferGetPage(cur_buffer);
@ -1385,22 +1398,23 @@ vc_rpfheap(VRelStats *vacrelstats, Relation onerel,
vc_vacpage(ToPage, vtmove[ti].vpd);
heap_copytuple_with_tuple(&tuple, &newtup);
RelationInvalidateHeapTuple(onerel, &tuple);
TransactionIdStore(myXID, (TransactionId*) &(newtup.t_data->t_cmin));
newtup.t_data->t_infomask &=
~(HEAP_XMIN_COMMITTED|HEAP_XMIN_INVALID|HEAP_MOVED_OFF);
TransactionIdStore(myXID, (TransactionId *) &(newtup.t_data->t_cmin));
newtup.t_data->t_infomask &=
~(HEAP_XMIN_COMMITTED | HEAP_XMIN_INVALID | HEAP_MOVED_OFF);
newtup.t_data->t_infomask |= HEAP_MOVED_IN;
newoff = PageAddItem(ToPage, (Item) newtup.t_data, tuple_len,
InvalidOffsetNumber, LP_USED);
InvalidOffsetNumber, LP_USED);
if (newoff == InvalidOffsetNumber)
{
elog(ERROR, "\
moving chain: failed to add item with len = %u to page %u",
tuple_len, vtmove[ti].vpd->vpd_blkno);
tuple_len, vtmove[ti].vpd->vpd_blkno);
}
newitemid = PageGetItemId(ToPage, newoff);
pfree(newtup.t_data);
newtup.t_data = (HeapTupleHeader) PageGetItem(ToPage, newitemid);
ItemPointerSet(&(newtup.t_self), vtmove[ti].vpd->vpd_blkno, newoff);
/*
* Set t_ctid pointing to itself for last tuple in
* chain and to next tuple in chain otherwise.
@ -1411,19 +1425,20 @@ moving chain: failed to add item with len = %u to page %u",
newtup.t_data->t_ctid = Ctid;
Ctid = newtup.t_self;
TransactionIdStore(myXID, (TransactionId*) &(tuple.t_data->t_cmin));
tuple.t_data->t_infomask &=
~(HEAP_XMIN_COMMITTED|HEAP_XMIN_INVALID|HEAP_MOVED_IN);
TransactionIdStore(myXID, (TransactionId *) &(tuple.t_data->t_cmin));
tuple.t_data->t_infomask &=
~(HEAP_XMIN_COMMITTED | HEAP_XMIN_INVALID | HEAP_MOVED_IN);
tuple.t_data->t_infomask |= HEAP_MOVED_OFF;
num_moved++;
/*
* Remember that we moved tuple from the current page
* (corresponding index tuple will be cleaned).
*/
if (Cbuf == buf)
vpc->vpd_offsets[vpc->vpd_offsets_free++] =
ItemPointerGetOffsetNumber(&(tuple.t_self));
vpc->vpd_offsets[vpc->vpd_offsets_free++] =
ItemPointerGetOffsetNumber(&(tuple.t_self));
else
keep_tuples++;
@ -1432,12 +1447,12 @@ moving chain: failed to add item with len = %u to page %u",
for (i = 0, idcur = Idesc; i < nindices; i++, idcur++)
{
FormIndexDatum(idcur->natts,
(AttrNumber *) &(idcur->tform->indkey[0]),
&newtup,
tupdesc,
idatum,
inulls,
idcur->finfoP);
(AttrNumber *) &(idcur->tform->indkey[0]),
&newtup,
tupdesc,
idatum,
inulls,
idcur->finfoP);
iresult = index_insert(Irel[i],
idatum,
inulls,
@ -1507,13 +1522,13 @@ moving chain: failed to add item with len = %u to page %u",
RelationInvalidateHeapTuple(onerel, &tuple);
/*
* Mark new tuple as moved_in by vacuum and
* store vacuum XID in t_cmin !!!
/*
* Mark new tuple as moved_in by vacuum and store vacuum XID
* in t_cmin !!!
*/
TransactionIdStore(myXID, (TransactionId*) &(newtup.t_data->t_cmin));
newtup.t_data->t_infomask &=
~(HEAP_XMIN_COMMITTED|HEAP_XMIN_INVALID|HEAP_MOVED_OFF);
TransactionIdStore(myXID, (TransactionId *) &(newtup.t_data->t_cmin));
newtup.t_data->t_infomask &=
~(HEAP_XMIN_COMMITTED | HEAP_XMIN_INVALID | HEAP_MOVED_OFF);
newtup.t_data->t_infomask |= HEAP_MOVED_IN;
/* add tuple to the page */
@ -1532,13 +1547,13 @@ failed to add item with len = %u to page %u (free space %u, nusd %u, noff %u)",
ItemPointerSet(&(newtup.t_data->t_ctid), cur_page->vpd_blkno, newoff);
newtup.t_self = newtup.t_data->t_ctid;
/*
* Mark old tuple as moved_off by vacuum and
* store vacuum XID in t_cmin !!!
/*
* Mark old tuple as moved_off by vacuum and store vacuum XID
* in t_cmin !!!
*/
TransactionIdStore(myXID, (TransactionId*) &(tuple.t_data->t_cmin));
tuple.t_data->t_infomask &=
~(HEAP_XMIN_COMMITTED|HEAP_XMIN_INVALID|HEAP_MOVED_IN);
TransactionIdStore(myXID, (TransactionId *) &(tuple.t_data->t_cmin));
tuple.t_data->t_infomask &=
~(HEAP_XMIN_COMMITTED | HEAP_XMIN_INVALID | HEAP_MOVED_IN);
tuple.t_data->t_infomask |= HEAP_MOVED_OFF;
cur_page->vpd_offsets_used++;
@ -1572,11 +1587,11 @@ failed to add item with len = %u to page %u (free space %u, nusd %u, noff %u)",
if (offnum < maxoff && keep_tuples > 0)
{
OffsetNumber off;
OffsetNumber off;
for (off = OffsetNumberNext(offnum);
off <= maxoff;
off = OffsetNumberNext(off))
off <= maxoff;
off = OffsetNumberNext(off))
{
itemid = PageGetItemId(page, off);
if (!ItemIdIsUsed(itemid))
@ -1584,21 +1599,22 @@ failed to add item with len = %u to page %u (free space %u, nusd %u, noff %u)",
tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
if (tuple.t_data->t_infomask & HEAP_XMIN_COMMITTED)
continue;
if ((TransactionId)tuple.t_data->t_cmin != myXID)
if ((TransactionId) tuple.t_data->t_cmin != myXID)
elog(ERROR, "Invalid XID in t_cmin (4)");
if (tuple.t_data->t_infomask & HEAP_MOVED_IN)
elog(ERROR, "HEAP_MOVED_IN was not expected (2)");
if (tuple.t_data->t_infomask & HEAP_MOVED_OFF)
{
if (chain_tuple_moved) /* some chains was moved while */
{ /* cleaning this page */
if (chain_tuple_moved) /* some chains was moved
* while */
{ /* cleaning this page */
Assert(vpc->vpd_offsets_free > 0);
for (i = 0; i < vpc->vpd_offsets_free; i++)
{
if (vpc->vpd_offsets[i] == off)
break;
}
if (i >= vpc->vpd_offsets_free) /* not found */
if (i >= vpc->vpd_offsets_free) /* not found */
{
vpc->vpd_offsets[vpc->vpd_offsets_free++] = off;
Assert(keep_tuples > 0);
@ -1619,8 +1635,8 @@ failed to add item with len = %u to page %u (free space %u, nusd %u, noff %u)",
{
if (chain_tuple_moved) /* else - they are ordered */
{
qsort((char *) (vpc->vpd_offsets), vpc->vpd_offsets_free,
sizeof(OffsetNumber), vc_cmp_offno);
qsort((char *) (vpc->vpd_offsets), vpc->vpd_offsets_free,
sizeof(OffsetNumber), vc_cmp_offno);
}
vc_reappage(&Nvpl, vpc);
WriteBuffer(buf);
@ -1645,6 +1661,7 @@ failed to add item with len = %u to page %u (free space %u, nusd %u, noff %u)",
if (num_moved > 0)
{
/*
* We have to commit our tuple' movings before we'll truncate
* relation, but we shouldn't lose our locks. And so - quick hack:
@ -1657,8 +1674,8 @@ failed to add item with len = %u to page %u (free space %u, nusd %u, noff %u)",
}
/*
* Clean uncleaned reapped pages from vacuum_pages list list and set xmin
* committed for inserted tuples
* Clean uncleaned reapped pages from vacuum_pages list list and set
* xmin committed for inserted tuples
*/
checked_moved = 0;
for (i = 0, vpp = vacuum_pages->vpl_pagedesc; i < vacuumed_pages; i++, vpp++)
@ -1671,7 +1688,8 @@ failed to add item with len = %u to page %u (free space %u, nusd %u, noff %u)",
if (!PageIsEmpty(page))
vc_vacpage(page, *vpp);
}
else /* this page was used */
else
/* this page was used */
{
num_tuples = 0;
max_offset = PageGetMaxOffsetNumber(page);
@ -1685,7 +1703,7 @@ failed to add item with len = %u to page %u (free space %u, nusd %u, noff %u)",
tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
if (!(tuple.t_data->t_infomask & HEAP_XMIN_COMMITTED))
{
if ((TransactionId)tuple.t_data->t_cmin != myXID)
if ((TransactionId) tuple.t_data->t_cmin != myXID)
elog(ERROR, "Invalid XID in t_cmin (2)");
if (tuple.t_data->t_infomask & HEAP_MOVED_IN)
{
@ -1734,8 +1752,8 @@ Elapsed %u/%u sec.",
}
Assert(keep_tuples >= 0);
for (i = 0; i < nindices; i++)
vc_vaconeind(&Nvpl, Irel[i],
vacrelstats->num_tuples, keep_tuples);
vc_vaconeind(&Nvpl, Irel[i],
vacrelstats->num_tuples, keep_tuples);
}
/*
@ -1757,7 +1775,7 @@ Elapsed %u/%u sec.",
if (!(tuple.t_data->t_infomask & HEAP_XMIN_COMMITTED))
{
if ((TransactionId)tuple.t_data->t_cmin != myXID)
if ((TransactionId) tuple.t_data->t_cmin != myXID)
elog(ERROR, "Invalid XID in t_cmin (3)");
if (tuple.t_data->t_infomask & HEAP_MOVED_OFF)
{
@ -1998,7 +2016,7 @@ vc_vaconeind(VPageList vpl, Relation indrel, int num_tuples, int keep_tuples)
getrusage(RUSAGE_SELF, &ru1);
elog(MESSAGE_LEVEL, "Index %s: Pages %u; Tuples %u: Deleted %u. Elapsed %u/%u sec.",
indrel->rd_rel->relname.data, num_pages,
indrel->rd_rel->relname.data, num_pages,
num_index_tuples - keep_tuples, tups_vacuumed,
ru1.ru_stime.tv_sec - ru0.ru_stime.tv_sec,
ru1.ru_utime.tv_sec - ru0.ru_utime.tv_sec);
@ -2208,25 +2226,25 @@ vc_bucketcpy(Form_pg_attribute attr, Datum value, Datum *bucket, int16 *bucket_l
static void
vc_updstats(Oid relid, int num_pages, int num_tuples, bool hasindex, VRelStats *vacrelstats)
{
Relation rd,
ad,
sd;
HeapScanDesc scan;
HeapTupleData rtup;
HeapTuple ctup,
atup,
stup;
Form_pg_class pgcform;
ScanKeyData askey;
Form_pg_attribute attp;
Buffer buffer;
Relation rd,
ad,
sd;
HeapScanDesc scan;
HeapTupleData rtup;
HeapTuple ctup,
atup,
stup;
Form_pg_class pgcform;
ScanKeyData askey;
Form_pg_attribute attp;
Buffer buffer;
/*
* update number of tuples and number of pages in pg_class
*/
ctup = SearchSysCacheTupleCopy(RELOID,
ObjectIdGetDatum(relid),
0, 0, 0);
ObjectIdGetDatum(relid),
0, 0, 0);
if (!HeapTupleIsValid(ctup))
elog(ERROR, "pg_class entry for relid %u vanished during vacuuming",
relid);
@ -2237,7 +2255,7 @@ vc_updstats(Oid relid, int num_pages, int num_tuples, bool hasindex, VRelStats *
rtup.t_self = ctup->t_self;
heap_fetch(rd, SnapshotNow, &rtup, &buffer);
pfree(ctup);
/* overwrite the existing statistics in the tuple */
vc_setpagelock(rd, ItemPointerGetBlockNumber(&(rtup.t_self)));
pgcform = (Form_pg_class) GETSTRUCT(&rtup);
@ -2317,8 +2335,7 @@ vc_updstats(Oid relid, int num_pages, int num_tuples, bool hasindex, VRelStats *
attp->attdisbursion = selratio;
/*
* Invalidate the cache for the tuple
* and write the buffer
* Invalidate the cache for the tuple and write the buffer
*/
RelationInvalidateHeapTuple(ad, atup);
WriteNoReleaseBuffer(abuffer);
@ -2375,8 +2392,7 @@ vc_updstats(Oid relid, int num_pages, int num_tuples, bool hasindex, VRelStats *
}
/*
* Invalidate the cached pg_class tuple and
* write the buffer
* Invalidate the cached pg_class tuple and write the buffer
*/
RelationInvalidateHeapTuple(rd, &rtup);
@ -2504,8 +2520,8 @@ vc_free(VRelList vrl)
}
static void *
vc_find_eq(void *bot, int nelem, int size, void *elm,
int (*compar) (const void *, const void *))
vc_find_eq(void *bot, int nelem, int size, void *elm,
int (*compar) (const void *, const void *))
{
int res;
int last = nelem - 1;
@ -2527,16 +2543,16 @@ vc_find_eq(void *bot, int nelem, int size, void *elm,
}
if (last_move == true)
{
res = compar(elm, (void *)((char *)bot + last * size));
res = compar(elm, (void *) ((char *) bot + last * size));
if (res > 0)
return NULL;
if (res == 0)
return (void *)((char *)bot + last * size);
return (void *) ((char *) bot + last * size);
last_move = false;
}
res = compar(elm, (void *)((char *)bot + celm * size));
res = compar(elm, (void *) ((char *) bot + celm * size));
if (res == 0)
return (void *)((char *)bot + celm * size);
return (void *) ((char *) bot + celm * size);
if (res < 0)
{
if (celm == 0)
@ -2551,7 +2567,7 @@ vc_find_eq(void *bot, int nelem, int size, void *elm,
return NULL;
last = last - celm - 1;
bot = (void *)((char *)bot + (celm + 1) * size);
bot = (void *) ((char *) bot + (celm + 1) * size);
celm = (last + 1) / 2;
first_move = true;
}
@ -2591,25 +2607,25 @@ static int
vc_cmp_vtlinks(const void *left, const void *right)
{
if (((VTupleLink)left)->new_tid.ip_blkid.bi_hi <
((VTupleLink)right)->new_tid.ip_blkid.bi_hi)
if (((VTupleLink) left)->new_tid.ip_blkid.bi_hi <
((VTupleLink) right)->new_tid.ip_blkid.bi_hi)
return -1;
if (((VTupleLink)left)->new_tid.ip_blkid.bi_hi >
((VTupleLink)right)->new_tid.ip_blkid.bi_hi)
if (((VTupleLink) left)->new_tid.ip_blkid.bi_hi >
((VTupleLink) right)->new_tid.ip_blkid.bi_hi)
return 1;
/* bi_hi-es are equal */
if (((VTupleLink)left)->new_tid.ip_blkid.bi_lo <
((VTupleLink)right)->new_tid.ip_blkid.bi_lo)
if (((VTupleLink) left)->new_tid.ip_blkid.bi_lo <
((VTupleLink) right)->new_tid.ip_blkid.bi_lo)
return -1;
if (((VTupleLink)left)->new_tid.ip_blkid.bi_lo >
((VTupleLink)right)->new_tid.ip_blkid.bi_lo)
if (((VTupleLink) left)->new_tid.ip_blkid.bi_lo >
((VTupleLink) right)->new_tid.ip_blkid.bi_lo)
return 1;
/* bi_lo-es are equal */
if (((VTupleLink)left)->new_tid.ip_posid <
((VTupleLink)right)->new_tid.ip_posid)
if (((VTupleLink) left)->new_tid.ip_posid <
((VTupleLink) right)->new_tid.ip_posid)
return -1;
if (((VTupleLink)left)->new_tid.ip_posid >
((VTupleLink)right)->new_tid.ip_posid)
if (((VTupleLink) left)->new_tid.ip_posid >
((VTupleLink) right)->new_tid.ip_posid)
return 1;
return 0;

View File

@ -2,7 +2,7 @@
* Routines for handling of 'SET var TO',
* 'SHOW var' and 'RESET var' statements.
*
* $Id: variable.c,v 1.19 1999/02/18 06:00:44 momjian Exp $
* $Id: variable.c,v 1.20 1999/05/25 16:08:28 momjian Exp $
*
*/
@ -45,10 +45,12 @@ static bool parse_ksqo(const char *);
static bool show_XactIsoLevel(void);
static bool reset_XactIsoLevel(void);
static bool parse_XactIsoLevel(const char *);
#ifdef QUERY_LIMIT
static bool show_query_limit(void);
static bool reset_query_limit(void);
static bool parse_query_limit(const char *);
#endif
extern Cost _cpu_page_wight_;
@ -545,41 +547,41 @@ reset_timezone()
static bool
parse_query_limit(const char *value)
{
int32 limit;
int32 limit;
if (value == NULL) {
reset_query_limit();
return(TRUE);
}
/* why is pg_atoi's arg not declared "const char *" ? */
limit = pg_atoi((char *) value, sizeof(int32), '\0');
if (limit <= -1) {
elog(ERROR, "Bad value for # of query limit (%s)", value);
}
ExecutorLimit(limit);
return(TRUE);
if (value == NULL)
{
reset_query_limit();
return (TRUE);
}
/* why is pg_atoi's arg not declared "const char *" ? */
limit = pg_atoi((char *) value, sizeof(int32), '\0');
if (limit <= -1)
elog(ERROR, "Bad value for # of query limit (%s)", value);
ExecutorLimit(limit);
return (TRUE);
}
static bool
show_query_limit(void)
{
int limit;
int limit;
limit = ExecutorGetLimit();
if (limit == ALL_TUPLES) {
elog(NOTICE, "No query limit is set");
} else {
elog(NOTICE, "query limit is %d",limit);
}
return(TRUE);
limit = ExecutorGetLimit();
if (limit == ALL_TUPLES)
elog(NOTICE, "No query limit is set");
else
elog(NOTICE, "query limit is %d", limit);
return (TRUE);
}
static bool
reset_query_limit(void)
{
ExecutorLimit(ALL_TUPLES);
return(TRUE);
ExecutorLimit(ALL_TUPLES);
return (TRUE);
}
#endif
/*-----------------------------------------------------------------------*/
@ -685,10 +687,10 @@ ResetPGVariable(const char *name)
/*-----------------------------------------------------------------------
KSQO code will one day be unnecessary when the optimizer makes use of
KSQO code will one day be unnecessary when the optimizer makes use of
indexes when multiple ORs are specified in the where clause.
See optimizer/prep/prepkeyset.c for more on this.
daveh@insightdist.com 6/16/98
daveh@insightdist.com 6/16/98
-----------------------------------------------------------------------*/
static bool
parse_ksqo(const char *value)
@ -732,7 +734,7 @@ reset_ksqo()
static bool
parse_XactIsoLevel(const char *value)
{
if (value == NULL)
{
reset_XactIsoLevel();
@ -770,7 +772,7 @@ show_XactIsoLevel()
static bool
reset_XactIsoLevel()
{
if (SerializableSnapshot != NULL)
{
elog(ERROR, "SET TRANSACTION ISOLATION LEVEL must be called before any query");

View File

@ -5,11 +5,11 @@
*
* Copyright (c) 1994, Regents of the University of California
*
* $Id: view.c,v 1.32 1999/02/13 23:15:12 momjian Exp $
* $Id: view.c,v 1.33 1999/05/25 16:08:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#include <stdio.h>
#include <stdio.h>
#include <string.h>
#include <postgres.h>
@ -230,9 +230,9 @@ UpdateRangeTableOfViewParse(char *viewName, Query *viewParse)
* table... CURRENT first, then NEW....
*/
rt_entry1 = addRangeTableEntry(NULL, (char *) viewName, "*CURRENT*",
FALSE, FALSE);
FALSE, FALSE);
rt_entry2 = addRangeTableEntry(NULL, (char *) viewName, "*NEW*",
FALSE, FALSE);
FALSE, FALSE);
new_rt = lcons(rt_entry2, old_rt);
new_rt = lcons(rt_entry1, new_rt);

View File

@ -6,15 +6,15 @@
* Copyright (c) 1994, Regents of the University of California
*
* DESCRIPTION
* This code provides support for a tee node, which allows
* multiple parent in a megaplan.
* This code provides support for a tee node, which allows
* multiple parent in a megaplan.
*
* INTERFACE ROUTINES
* ExecTee
* ExecInitTee
* ExecEndTee
*
* $Id: nodeTee.c,v 1.1 1999/03/23 16:50:49 momjian Exp $
* $Id: nodeTee.c,v 1.2 1999/05/25 16:08:50 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -46,12 +46,12 @@
bool
ExecInitTee(Tee *node, EState *currentEstate, Plan *parent)
{
TeeState *teeState;
Plan *outerPlan;
int len;
TeeState *teeState;
Plan *outerPlan;
int len;
Relation bufferRel;
TupleDesc tupType;
EState *estate;
EState *estate;
/*
* it is possible that the Tee has already been initialized since it
@ -167,7 +167,7 @@ ExecInitTee(Tee *node, EState *currentEstate, Plan *parent)
else
bufferRel = heap_open(
heap_create_with_catalog(teeState->tee_bufferRelname,
tupType, RELKIND_RELATION, false));
tupType, RELKIND_RELATION, false));
}
else
{
@ -176,7 +176,7 @@ ExecInitTee(Tee *node, EState *currentEstate, Plan *parent)
newoid());
bufferRel = heap_open(
heap_create_with_catalog(teeState->tee_bufferRelname,
tupType, RELKIND_RELATION, false));
tupType, RELKIND_RELATION, false));
}
teeState->tee_bufferRel = bufferRel;
@ -339,6 +339,7 @@ ExecTee(Tee *node, Plan *parent)
slot = ExecProcNode(childNode, (Plan *) node);
if (!TupIsNull(slot))
{
/*
* heap_insert changes something...
*/

View File

@ -5,7 +5,7 @@
*
* Copyright (c) 1994, Regents of the University of California
*
* $Id: execAmi.c,v 1.34 1999/05/10 00:45:05 momjian Exp $
* $Id: execAmi.c,v 1.35 1999/05/25 16:08:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -24,7 +24,7 @@
* ExecCreatR function to create temporary relations
*
*/
#include <stdio.h>
#include <stdio.h>
#include "postgres.h"

File diff suppressed because it is too large Load Diff

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/execQual.c,v 1.50 1999/03/20 02:07:31 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/execQual.c,v 1.51 1999/05/25 16:08:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -66,7 +66,7 @@ bool execConstByVal;
int execConstLen;
/* static functions decls */
static Datum ExecEvalAggref(Aggref *aggref, ExprContext *econtext, bool *isNull);
static Datum ExecEvalAggref(Aggref * aggref, ExprContext *econtext, bool *isNull);
static Datum ExecEvalArrayRef(ArrayRef *arrayRef, ExprContext *econtext,
bool *isNull, bool *isDone);
static Datum ExecEvalAnd(Expr *andExpr, ExprContext *econtext, bool *isNull);
@ -190,7 +190,7 @@ ExecEvalArrayRef(ArrayRef *arrayRef,
* ----------------------------------------------------------------
*/
static Datum
ExecEvalAggref(Aggref *aggref, ExprContext *econtext, bool *isNull)
ExecEvalAggref(Aggref * aggref, ExprContext *econtext, bool *isNull)
{
*isNull = econtext->ecxt_nulls[aggref->aggno];
return econtext->ecxt_values[aggref->aggno];
@ -232,7 +232,7 @@ ExecEvalVar(Var *variable, ExprContext *econtext, bool *isNull)
int16 len;
/*
* get the slot we want
* get the slot we want
*/
switch (variable->varno)
{
@ -251,7 +251,7 @@ ExecEvalVar(Var *variable, ExprContext *econtext, bool *isNull)
}
/*
* extract tuple information from the slot
* extract tuple information from the slot
*/
heapTuple = slot->val;
tuple_type = slot->ttc_tupleDescriptor;
@ -270,7 +270,7 @@ ExecEvalVar(Var *variable, ExprContext *econtext, bool *isNull)
* the entire tuple, we give back a whole slot so that callers know
* what the tuple looks like.
*/
if (attnum == InvalidAttrNumber)
if (attnum == InvalidAttrNumber)
{
TupleTableSlot *tempSlot;
TupleDesc td;
@ -299,26 +299,25 @@ ExecEvalVar(Var *variable, ExprContext *econtext, bool *isNull)
isNull); /* return: is attribute null? */
/*
* return null if att is null
* return null if att is null
*/
if (*isNull)
return (Datum) NULL;
/*
* get length and type information..
* ??? what should we do about variable length attributes
* - variable length attributes have their length stored
* in the first 4 bytes of the memory pointed to by the
* returned value.. If we can determine that the type
* is a variable length type, we can do the right thing.
* -cim 9/15/89
* get length and type information.. ??? what should we do about
* variable length attributes - variable length attributes have their
* length stored in the first 4 bytes of the memory pointed to by the
* returned value.. If we can determine that the type is a variable
* length type, we can do the right thing. -cim 9/15/89
*/
if (attnum < 0)
{
/*
* If this is a pseudo-att, we get the type and fake the length.
* There ought to be a routine to return the real lengths, so
* we'll mark this one ... XXX -mao
* If this is a pseudo-att, we get the type and fake the length.
* There ought to be a routine to return the real lengths, so
* we'll mark this one ... XXX -mao
*/
len = heap_sysattrlen(attnum); /* XXX see -mao above */
byval = heap_sysattrbyval(attnum); /* XXX see -mao above */
@ -609,11 +608,11 @@ ExecEvalFuncArgs(FunctionCachePtr fcache,
i = 0;
foreach(arg, argList)
{
/*
* evaluate the expression, in general functions cannot take
* sets as arguments but we make an exception in the case of
* nested dot expressions. We have to watch out for this case
* here.
* evaluate the expression, in general functions cannot take sets
* as arguments but we make an exception in the case of nested dot
* expressions. We have to watch out for this case here.
*/
argV[i] = (Datum)
ExecEvalExpr((Node *) lfirst(arg),
@ -671,10 +670,10 @@ ExecMakeFunctionResult(Node *node,
}
/*
* arguments is a list of expressions to evaluate
* before passing to the function manager.
* We collect the results of evaluating the expressions
* into a datum array (argV) and pass this array to arrayFmgr()
* arguments is a list of expressions to evaluate before passing to
* the function manager. We collect the results of evaluating the
* expressions into a datum array (argV) and pass this array to
* arrayFmgr()
*/
if (fcache->nargs != 0)
{
@ -743,8 +742,8 @@ ExecMakeFunctionResult(Node *node,
}
/*
* now return the value gotten by calling the function manager,
* passing the function the evaluated parameter values.
* now return the value gotten by calling the function manager,
* passing the function the evaluated parameter values.
*/
if (fcache->language == SQLlanguageId)
{
@ -843,12 +842,12 @@ ExecEvalOper(Expr *opClause, ExprContext *econtext, bool *isNull)
bool isDone;
/*
* an opclause is a list (op args). (I think)
* an opclause is a list (op args). (I think)
*
* we extract the oid of the function associated with
* the op and then pass the work onto ExecMakeFunctionResult
* which evaluates the arguments and returns the result of
* calling the function on the evaluated arguments.
* we extract the oid of the function associated with the op and then
* pass the work onto ExecMakeFunctionResult which evaluates the
* arguments and returns the result of calling the function on the
* evaluated arguments.
*/
op = (Oper *) opClause->oper;
argList = opClause->args;
@ -865,8 +864,8 @@ ExecEvalOper(Expr *opClause, ExprContext *econtext, bool *isNull)
}
/*
* call ExecMakeFunctionResult() with a dummy isDone that we ignore.
* We don't have operator whose arguments are sets.
* call ExecMakeFunctionResult() with a dummy isDone that we ignore.
* We don't have operator whose arguments are sets.
*/
return ExecMakeFunctionResult((Node *) op, argList, econtext, isNull, &isDone);
}
@ -887,14 +886,14 @@ ExecEvalFunc(Expr *funcClause,
FunctionCachePtr fcache;
/*
* an funcclause is a list (func args). (I think)
* an funcclause is a list (func args). (I think)
*
* we extract the oid of the function associated with
* the func node and then pass the work onto ExecMakeFunctionResult
* which evaluates the arguments and returns the result of
* calling the function on the evaluated arguments.
* we extract the oid of the function associated with the func node and
* then pass the work onto ExecMakeFunctionResult which evaluates the
* arguments and returns the result of calling the function on the
* evaluated arguments.
*
* this is nearly identical to the ExecEvalOper code.
* this is nearly identical to the ExecEvalOper code.
*/
func = (Func *) funcClause->oper;
argList = funcClause->args;
@ -939,21 +938,21 @@ ExecEvalNot(Expr *notclause, ExprContext *econtext, bool *isNull)
clause = lfirst(notclause->args);
/*
* We don't iterate over sets in the quals, so pass in an isDone
* flag, but ignore it.
* We don't iterate over sets in the quals, so pass in an isDone flag,
* but ignore it.
*/
expr_value = ExecEvalExpr(clause, econtext, isNull, &isDone);
/*
* if the expression evaluates to null, then we just
* cascade the null back to whoever called us.
* if the expression evaluates to null, then we just cascade the null
* back to whoever called us.
*/
if (*isNull)
return expr_value;
/*
* evaluation of 'not' is simple.. expr is false, then
* return 'true' and vice versa.
* evaluation of 'not' is simple.. expr is false, then return 'true'
* and vice versa.
*/
if (DatumGetInt32(expr_value) == 0)
return (Datum) true;
@ -978,22 +977,19 @@ ExecEvalOr(Expr *orExpr, ExprContext *econtext, bool *isNull)
clauses = orExpr->args;
/*
* we use three valued logic functions here...
* we evaluate each of the clauses in turn,
* as soon as one is true we return that
* value. If none is true and none of the
* clauses evaluate to NULL we return
* the value of the last clause evaluated (which
* should be false) with *isNull set to false else
* if none is true and at least one clause evaluated
* to NULL we set *isNull flag to true -
* we use three valued logic functions here... we evaluate each of the
* clauses in turn, as soon as one is true we return that value. If
* none is true and none of the clauses evaluate to NULL we return
* the value of the last clause evaluated (which should be false) with
* *isNull set to false else if none is true and at least one clause
* evaluated to NULL we set *isNull flag to true -
*/
foreach(clause, clauses)
{
/*
* We don't iterate over sets in the quals, so pass in an isDone
* flag, but ignore it.
* We don't iterate over sets in the quals, so pass in an isDone
* flag, but ignore it.
*/
const_value = ExecEvalExpr((Node *) lfirst(clause),
econtext,
@ -1001,34 +997,32 @@ ExecEvalOr(Expr *orExpr, ExprContext *econtext, bool *isNull)
&isDone);
/*
* if the expression evaluates to null, then we
* remember it in the local IsNull flag, if none of the
* clauses are true then we need to set *isNull
* to true again.
* if the expression evaluates to null, then we remember it in the
* local IsNull flag, if none of the clauses are true then we need
* to set *isNull to true again.
*/
if (*isNull)
{
IsNull = *isNull;
/*
* Many functions don't (or can't!) check if an argument is NULL
* or NOT_NULL and may return TRUE (1) with *isNull TRUE
* (an_int4_column <> 1: int4ne returns TRUE for NULLs).
* Not having time to fix the function manager I want to fix OR:
* if we had 'x <> 1 OR x isnull' then when x is NULL
* TRUE was returned by the 'x <> 1' clause ...
* but ExecQualClause says that the qualification should *fail*
* if isnull is TRUE for any value returned by ExecEvalExpr.
* So, force this rule here:
* if isnull is TRUE then the clause failed.
* Note: nullvalue() & nonnullvalue() always sets isnull to FALSE for NULLs.
* - vadim 09/22/97
* Many functions don't (or can't!) check if an argument is
* NULL or NOT_NULL and may return TRUE (1) with *isNull TRUE
* (an_int4_column <> 1: int4ne returns TRUE for NULLs). Not
* having time to fix the function manager I want to fix OR:
* if we had 'x <> 1 OR x isnull' then when x is NULL TRUE was
* returned by the 'x <> 1' clause ... but ExecQualClause says
* that the qualification should *fail* if isnull is TRUE for
* any value returned by ExecEvalExpr. So, force this rule
* here: if isnull is TRUE then the clause failed. Note:
* nullvalue() & nonnullvalue() always sets isnull to FALSE
* for NULLs. - vadim 09/22/97
*/
const_value = 0;
}
/*
* if we have a true result, then we return it.
* if we have a true result, then we return it.
*/
if (DatumGetInt32(const_value) != 0)
return const_value;
@ -1057,18 +1051,16 @@ ExecEvalAnd(Expr *andExpr, ExprContext *econtext, bool *isNull)
clauses = andExpr->args;
/*
* we evaluate each of the clauses in turn,
* as soon as one is false we return that
* value. If none are false or NULL then we return
* the value of the last clause evaluated, which
* should be true.
* we evaluate each of the clauses in turn, as soon as one is false we
* return that value. If none are false or NULL then we return the
* value of the last clause evaluated, which should be true.
*/
foreach(clause, clauses)
{
/*
* We don't iterate over sets in the quals, so pass in an isDone
* flag, but ignore it.
* We don't iterate over sets in the quals, so pass in an isDone
* flag, but ignore it.
*/
const_value = ExecEvalExpr((Node *) lfirst(clause),
econtext,
@ -1076,17 +1068,16 @@ ExecEvalAnd(Expr *andExpr, ExprContext *econtext, bool *isNull)
&isDone);
/*
* if the expression evaluates to null, then we
* remember it in IsNull, if none of the clauses after
* this evaluates to false we will have to set *isNull
* to true again.
* if the expression evaluates to null, then we remember it in
* IsNull, if none of the clauses after this evaluates to false we
* will have to set *isNull to true again.
*/
if (*isNull)
IsNull = *isNull;
/*
* if we have a false result, then we return it, since the
* conjunction must be false.
* if we have a false result, then we return it, since the
* conjunction must be false.
*/
if (DatumGetInt32(const_value) == 0)
return const_value;
@ -1106,7 +1097,7 @@ ExecEvalAnd(Expr *andExpr, ExprContext *econtext, bool *isNull)
* ----------------------------------------------------------------
*/
static Datum
ExecEvalCase(CaseExpr *caseExpr, ExprContext *econtext, bool *isNull)
ExecEvalCase(CaseExpr * caseExpr, ExprContext *econtext, bool *isNull)
{
List *clauses;
List *clause;
@ -1117,17 +1108,16 @@ ExecEvalCase(CaseExpr *caseExpr, ExprContext *econtext, bool *isNull)
clauses = caseExpr->args;
/*
* we evaluate each of the WHEN clauses in turn,
* as soon as one is true we return the corresponding
* result. If none are true then we return the value
* of the default clause, or NULL.
* we evaluate each of the WHEN clauses in turn, as soon as one is
* true we return the corresponding result. If none are true then we
* return the value of the default clause, or NULL.
*/
foreach(clause, clauses)
{
/*
* We don't iterate over sets in the quals, so pass in an isDone
* flag, but ignore it.
* We don't iterate over sets in the quals, so pass in an isDone
* flag, but ignore it.
*/
wclause = lfirst(clause);
@ -1137,8 +1127,8 @@ ExecEvalCase(CaseExpr *caseExpr, ExprContext *econtext, bool *isNull)
&isDone);
/*
* if we have a true test, then we return the result,
* since the case statement is satisfied.
* if we have a true test, then we return the result, since the
* case statement is satisfied.
*/
if (DatumGetInt32(const_value) != 0)
{
@ -1159,9 +1149,7 @@ ExecEvalCase(CaseExpr *caseExpr, ExprContext *econtext, bool *isNull)
&isDone);
}
else
{
*isNull = true;
}
return const_value;
}
@ -1204,8 +1192,8 @@ ExecEvalExpr(Node *expression,
*isDone = true;
/*
* here we dispatch the work to the appropriate type
* of function given the type of our expression.
* here we dispatch the work to the appropriate type of function given
* the type of our expression.
*/
if (expression == NULL)
{
@ -1287,7 +1275,7 @@ ExecEvalExpr(Node *expression,
}
return retDatum;
} /* ExecEvalExpr() */
} /* ExecEvalExpr() */
/* ----------------------------------------------------------------
@ -1325,16 +1313,15 @@ ExecQualClause(Node *clause, ExprContext *econtext)
ExecEvalExpr(clause, econtext, &isNull, &isDone);
/*
* this is interesting behaviour here. When a clause evaluates
* to null, then we consider this as passing the qualification.
* it seems kind of like, if the qual is NULL, then there's no
* qual..
* this is interesting behaviour here. When a clause evaluates to
* null, then we consider this as passing the qualification. it seems
* kind of like, if the qual is NULL, then there's no qual..
*/
if (isNull)
return true;
/*
* remember, we return true when the qualification fails..
* remember, we return true when the qualification fails..
*/
if (DatumGetInt32(expr_value) == 0)
return true;
@ -1356,7 +1343,7 @@ ExecQual(List *qual, ExprContext *econtext)
bool result;
/*
* debugging stuff
* debugging stuff
*/
EV_printf("ExecQual: qual is ");
EV_nodeDisplay(qual);
@ -1365,18 +1352,18 @@ ExecQual(List *qual, ExprContext *econtext)
IncrProcessed();
/*
* return true immediately if no qual
* return true immediately if no qual
*/
if (qual == NIL)
return true;
/*
* a "qual" is a list of clauses. To evaluate the
* qual, we evaluate each of the clauses in the list.
* a "qual" is a list of clauses. To evaluate the qual, we evaluate
* each of the clauses in the list.
*
* ExecQualClause returns true when we know the qualification
* *failed* so we just pass each clause in qual to it until
* we know the qual failed or there are no more clauses.
* ExecQualClause returns true when we know the qualification *failed* so
* we just pass each clause in qual to it until we know the qual
* failed or there are no more clauses.
*/
result = false;
@ -1388,9 +1375,9 @@ ExecQual(List *qual, ExprContext *econtext)
}
/*
* if result is true, then it means a clause failed so we
* return false. if result is false then it means no clause
* failed so we return true.
* if result is true, then it means a clause failed so we return
* false. if result is false then it means no clause failed so we
* return true.
*/
if (result == true)
return false;
@ -1447,48 +1434,46 @@ ExecTargetList(List *targetlist,
bool isNull;
/*
* debugging stuff
* debugging stuff
*/
EV_printf("ExecTargetList: tl is ");
EV_nodeDisplay(targetlist);
EV_printf("\n");
/*
* Return a dummy tuple if the targetlist is empty.
* the dummy tuple is necessary to differentiate
* between passing and failing the qualification.
* Return a dummy tuple if the targetlist is empty. the dummy tuple is
* necessary to differentiate between passing and failing the
* qualification.
*/
if (targetlist == NIL)
{
/*
* I now think that the only time this makes
* any sense is when we run a delete query. Then
* we need to return something other than nil
* so we know to delete the tuple associated
* with the saved tupleid.. see what ExecutePlan
* does with the returned tuple.. -cim 9/21/89
* I now think that the only time this makes any sense is when we
* run a delete query. Then we need to return something other
* than nil so we know to delete the tuple associated with the
* saved tupleid.. see what ExecutePlan does with the returned
* tuple.. -cim 9/21/89
*
* It could also happen in queries like:
* retrieve (foo.all) where bar.a = 3
* It could also happen in queries like: retrieve (foo.all) where
* bar.a = 3
*
* is this a new phenomenon? it might cause bogus behavior
* if we try to free this tuple later!! I put a hook in
* ExecProject to watch out for this case -mer 24 Aug 1992
* is this a new phenomenon? it might cause bogus behavior if we try
* to free this tuple later!! I put a hook in ExecProject to watch
* out for this case -mer 24 Aug 1992
*
* We must return dummy tuple!!! Try
* select t1.x from t1, t2 where t1.y = 1 and t2.y = 1
* - t2 scan target list will be empty and so no one tuple
* will be returned! But Mer was right - dummy tuple
* must be palloced... - vadim 03/01/1999
* We must return dummy tuple!!! Try select t1.x from t1, t2 where
* t1.y = 1 and t2.y = 1 - t2 scan target list will be empty and
* so no one tuple will be returned! But Mer was right - dummy
* tuple must be palloced... - vadim 03/01/1999
*/
*isDone = true;
return (HeapTuple) palloc(1);
}
/*
* allocate an array of char's to hold the "null" information
* only if we have a really large targetlist. otherwise we use
* the stack.
* allocate an array of char's to hold the "null" information only if
* we have a really large targetlist. otherwise we use the stack.
*/
if (nodomains > 64)
{
@ -1502,20 +1487,21 @@ ExecTargetList(List *targetlist,
}
/*
* evaluate all the expressions in the target list
* evaluate all the expressions in the target list
*/
EV_printf("ExecTargetList: setting target list values\n");
*isDone = true;
foreach(tl, targetlist)
{
/*
* remember, a target list is a list of lists:
* remember, a target list is a list of lists:
*
* ((<resdom | fjoin> expr) (<resdom | fjoin> expr) ...)
* ((<resdom | fjoin> expr) (<resdom | fjoin> expr) ...)
*
* tl is a pointer to successive cdr's of the targetlist
* tle is a pointer to the target list entry in tl
* tl is a pointer to successive cdr's of the targetlist tle is a
* pointer to the target list entry in tl
*/
tle = lfirst(tl);
@ -1572,7 +1558,7 @@ ExecTargetList(List *targetlist,
curNode < nNodes;
curNode++, fjTlist = lnext(fjTlist))
{
#ifdef NOT_USED /* what is this?? */
#ifdef NOT_USED /* what is this?? */
Node *outernode = lfirst(fjTlist);
fjRes = (Resdom *) outernode->iterexpr;
@ -1590,19 +1576,19 @@ ExecTargetList(List *targetlist,
}
/*
* form the new result tuple (in the "normal" context)
* form the new result tuple (in the "normal" context)
*/
newTuple = (HeapTuple) heap_formtuple(targettype, values, null_head);
/*
* free the nulls array if we allocated one..
* free the nulls array if we allocated one..
*/
if (nodomains > 64)
{
pfree(null_head);
pfree(fjIsNull);
}
return newTuple;
}
@ -1631,13 +1617,13 @@ ExecProject(ProjectionInfo *projInfo, bool *isDone)
HeapTuple newTuple;
/*
* sanity checks
* sanity checks
*/
if (projInfo == NULL)
return (TupleTableSlot *) NULL;
/*
* get the projection info we want
* get the projection info we want
*/
slot = projInfo->pi_slot;
targetlist = projInfo->pi_targetlist;
@ -1648,7 +1634,7 @@ ExecProject(ProjectionInfo *projInfo, bool *isDone)
econtext = projInfo->pi_exprContext;
/*
* form a new (result) tuple
* form a new (result) tuple
*/
newTuple = ExecTargetList(targetlist,
len,
@ -1658,11 +1644,10 @@ ExecProject(ProjectionInfo *projInfo, bool *isDone)
isDone);
/*
* store the tuple in the projection slot and return the slot.
* store the tuple in the projection slot and return the slot.
*
* If there's no projection target list we don't want to pfree
* the bogus tuple that ExecTargetList passes back to us.
* -mer 24 Aug 1992
* If there's no projection target list we don't want to pfree the bogus
* tuple that ExecTargetList passes back to us. -mer 24 Aug 1992
*/
return (TupleTableSlot *)
ExecStoreTuple(newTuple,/* tuple to store */
@ -1670,4 +1655,3 @@ ExecProject(ProjectionInfo *projInfo, bool *isDone)
InvalidBuffer, /* tuple has no buffer */
true);
}

View File

@ -14,7 +14,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/execTuples.c,v 1.24 1999/03/23 16:50:48 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/execTuples.c,v 1.25 1999/05/25 16:08:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -467,6 +467,7 @@ ExecSetSlotPolicy(TupleTableSlot *slot, /* slot to change */
return old_shouldFree;
}
#endif
/* --------------------------------
@ -650,6 +651,7 @@ ExecInitMarkedTupleSlot(EState *estate, MergeJoinState *mergestate)
INIT_SLOT_ALLOC;
mergestate->mj_MarkedTupleSlot = (TupleTableSlot *) slot;
}
#endif
/* ----------------

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/execUtils.c,v 1.44 1999/03/20 01:13:22 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/execUtils.c,v 1.45 1999/05/25 16:08:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -402,7 +402,7 @@ ExecFreeExprContext(CommonState *commonstate)
void
ExecFreeTypeInfo(CommonState *commonstate)
{
TupleDesc tupDesc;
TupleDesc tupDesc;
tupDesc = commonstate->cs_ResultTupleSlot->ttc_tupleDescriptor;
if (tupDesc == NULL)
@ -498,12 +498,12 @@ ExecAssignScanTypeFromOuterPlan(Plan *node, CommonScanState *csstate)
* Routines dealing with the structure 'attribute' which conatains
* the type information about attributes in a tuple:
*
* ExecMakeTypeInfo(noType)
* ExecMakeTypeInfo(noType)
* returns pointer to array of 'noType' structure 'attribute'.
* ExecSetTypeInfo(index, typeInfo, attNum, attLen)
* ExecSetTypeInfo(index, typeInfo, attNum, attLen)
* sets the element indexed by 'index' in typeInfo with
* the values: attNum, attLen.
* ExecFreeTypeInfo(typeInfo)
* ExecFreeTypeInfo(typeInfo)
* frees the structure 'typeInfo'.
* ----------------------------------------------------------------
*/
@ -677,7 +677,7 @@ ExecGetIndexKeyInfo(Form_pg_index indexTuple,
*/
numKeys = 0;
for (i = 0; i < INDEX_MAX_KEYS &&
indexTuple->indkey[i] != InvalidAttrNumber; i++)
indexTuple->indkey[i] != InvalidAttrNumber; i++)
numKeys++;
/* ----------------
@ -711,7 +711,7 @@ ExecGetIndexKeyInfo(Form_pg_index indexTuple,
*/
CXT1_printf("ExecGetIndexKeyInfo: context is %d\n", CurrentMemoryContext);
attKeys = (AttrNumber *)palloc(numKeys * sizeof(AttrNumber));
attKeys = (AttrNumber *) palloc(numKeys * sizeof(AttrNumber));
for (i = 0; i < numKeys; i++)
attKeys[i] = indexTuple->indkey[i];
@ -917,19 +917,20 @@ ExecOpenIndices(Oid resultRelationOid,
if (indexDesc != NULL)
{
relationDescs[i++] = indexDesc;
/*
* Hack for not btree and hash indices: they use relation level
* exclusive locking on updation (i.e. - they are not ready
* for MVCC) and so we have to exclusively lock indices here
* to prevent deadlocks if we will scan them - index_beginscan
* places AccessShareLock, indices update methods don't use
* locks at all. We release this lock in ExecCloseIndices.
* Note, that hashes use page level locking - i.e. are not
* deadlock-free, - let's them be on their way -:))
* vadim 03-12-1998
* Hack for not btree and hash indices: they use relation
* level exclusive locking on updation (i.e. - they are
* not ready for MVCC) and so we have to exclusively lock
* indices here to prevent deadlocks if we will scan them
* - index_beginscan places AccessShareLock, indices
* update methods don't use locks at all. We release this
* lock in ExecCloseIndices. Note, that hashes use page
* level locking - i.e. are not deadlock-free, - let's
* them be on their way -:)) vadim 03-12-1998
*/
if (indexDesc->rd_rel->relam != BTREE_AM_OID &&
indexDesc->rd_rel->relam != HASH_AM_OID)
if (indexDesc->rd_rel->relam != BTREE_AM_OID &&
indexDesc->rd_rel->relam != HASH_AM_OID)
LockRelation(indexDesc, AccessExclusiveLock);
}
}
@ -1014,15 +1015,17 @@ ExecCloseIndices(RelationInfo *resultRelationInfo)
{
if (relationDescs[i] == NULL)
continue;
/*
* Notes in ExecOpenIndices.
*/
if (relationDescs[i]->rd_rel->relam != BTREE_AM_OID &&
relationDescs[i]->rd_rel->relam != HASH_AM_OID)
if (relationDescs[i]->rd_rel->relam != BTREE_AM_OID &&
relationDescs[i]->rd_rel->relam != HASH_AM_OID)
UnlockRelation(relationDescs[i], AccessExclusiveLock);
index_close(relationDescs[i]);
}
/*
* XXX should free indexInfo array here too.
*/
@ -1210,7 +1213,7 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
result = index_insert(relationDescs[i], /* index relation */
datum, /* array of heaptuple Datums */
nulls, /* info on nulls */
&(heapTuple->t_self), /* tid of heap tuple */
&(heapTuple->t_self), /* tid of heap tuple */
heapRelation);
/* ----------------

View File

@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/functions.c,v 1.25 1999/05/13 07:28:29 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/functions.c,v 1.26 1999/05/25 16:08:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -107,9 +107,9 @@ init_execution_state(FunctionCachePtr fcache,
preves = (execution_state *) NULL;
planTree_list = pg_parse_and_plan(fcache->src, fcache->argOidVect,
nargs, &queryTree_list, None, FALSE);
nargs, &queryTree_list, None, FALSE);
foreach (qtl_item, queryTree_list)
foreach(qtl_item, queryTree_list)
{
Query *queryTree = lfirst(qtl_item);
Plan *planTree = lfirst(planTree_list);
@ -199,7 +199,7 @@ postquel_getnext(execution_state *es)
feature = (LAST_POSTQUEL_COMMAND(es)) ? EXEC_RETONE : EXEC_RUN;
return ExecutorRun(es->qd, es->estate, feature, (Node *)NULL, (Node *)NULL);
return ExecutorRun(es->qd, es->estate, feature, (Node *) NULL, (Node *) NULL);
}
static void

View File

@ -45,7 +45,7 @@ typedef struct AggFuncInfo
FmgrInfo finalfn;
} AggFuncInfo;
static Datum aggGetAttr(TupleTableSlot *tuple, Aggref *aggref, bool *isNull);
static Datum aggGetAttr(TupleTableSlot *tuple, Aggref * aggref, bool *isNull);
/* ---------------------------------------
@ -121,7 +121,8 @@ ExecAgg(Agg *node)
*/
/*
* We loop retrieving groups until we find one matching node->plan.qual
* We loop retrieving groups until we find one matching
* node->plan.qual
*/
do
{
@ -133,7 +134,7 @@ ExecAgg(Agg *node)
econtext = aggstate->csstate.cstate.cs_ExprContext;
nagg = length(node->aggs);
value1 = node->aggstate->csstate.cstate.cs_ExprContext->ecxt_values;
nulls = node->aggstate->csstate.cstate.cs_ExprContext->ecxt_nulls;
@ -163,7 +164,7 @@ ExecAgg(Agg *node)
finalfn_oid;
aggref->aggno = ++aggno;
/* ---------------------
* find transfer functions of all the aggregates and initialize
* their initial values
@ -172,7 +173,7 @@ ExecAgg(Agg *node)
aggname = aggref->aggname;
aggTuple = SearchSysCacheTuple(AGGNAME,
PointerGetDatum(aggname),
ObjectIdGetDatum(aggref->basetype),
ObjectIdGetDatum(aggref->basetype),
0, 0);
if (!HeapTupleIsValid(aggTuple))
elog(ERROR, "ExecAgg: cache lookup failed for aggregate \"%s\"(%s)",
@ -195,9 +196,9 @@ ExecAgg(Agg *node)
fmgr_info(xfn2_oid, &aggFuncInfo[aggno].xfn2);
aggFuncInfo[aggno].xfn2_oid = xfn2_oid;
value2[aggno] = (Datum) AggNameGetInitVal((char *) aggname,
aggp->aggbasetype,
2,
&isNull2);
aggp->aggbasetype,
2,
&isNull2);
/* ------------------------------------------
* If there is a second transition function, its initial
* value must exist -- as it does not depend on data values,
@ -213,9 +214,9 @@ ExecAgg(Agg *node)
fmgr_info(xfn1_oid, &aggFuncInfo[aggno].xfn1);
aggFuncInfo[aggno].xfn1_oid = xfn1_oid;
value1[aggno] = (Datum) AggNameGetInitVal((char *) aggname,
aggp->aggbasetype,
1,
&isNull1);
aggp->aggbasetype,
1,
&isNull1);
/* ------------------------------------------
* If the initial value for the first transition function
@ -245,6 +246,7 @@ ExecAgg(Agg *node)
outerslot = ExecProcNode(outerPlan, (Plan *) node);
if (TupIsNull(outerslot))
{
/*
* when the outerplan doesn't return a single tuple,
* create a dummy heaptuple anyway because we still need
@ -299,27 +301,29 @@ ExecAgg(Agg *node)
{
if (noInitValue[aggno])
{
/*
* value1 has not been initialized.
* This is the first non-NULL input value.
* We use it as the initial value for value1.
* value1 has not been initialized. This is the
* first non-NULL input value. We use it as the
* initial value for value1.
*
* But we can't just use it straight, we have to
* make a copy of it since the tuple from which it
* came will be freed on the next iteration of the
* But we can't just use it straight, we have to make
* a copy of it since the tuple from which it came
* will be freed on the next iteration of the
* scan. This requires finding out how to copy
* the Datum. We assume the datum is of the agg's
* basetype, or at least binary compatible with it.
* basetype, or at least binary compatible with
* it.
*/
Type aggBaseType = typeidType(aggref->basetype);
int attlen = typeLen(aggBaseType);
bool byVal = typeByVal(aggBaseType);
Type aggBaseType = typeidType(aggref->basetype);
int attlen = typeLen(aggBaseType);
bool byVal = typeByVal(aggBaseType);
if (byVal)
value1[aggno] = newVal;
else
{
if (attlen == -1) /* variable length */
if (attlen == -1) /* variable length */
attlen = VARSIZE((struct varlena *) newVal);
value1[aggno] = (Datum) palloc(attlen);
memcpy((char *) (value1[aggno]), (char *) newVal,
@ -330,13 +334,14 @@ ExecAgg(Agg *node)
}
else
{
/*
* apply the transition functions.
*/
args[0] = value1[aggno];
args[1] = newVal;
value1[aggno] = (Datum) fmgr_c(&aggfns->xfn1,
(FmgrValues *) args, &isNull1);
value1[aggno] = (Datum) fmgr_c(&aggfns->xfn1,
(FmgrValues *) args, &isNull1);
Assert(!isNull1);
}
}
@ -344,8 +349,8 @@ ExecAgg(Agg *node)
if (aggfns->xfn2.fn_addr != NULL)
{
args[0] = value2[aggno];
value2[aggno] = (Datum) fmgr_c(&aggfns->xfn2,
(FmgrValues *) args, &isNull2);
value2[aggno] = (Datum) fmgr_c(&aggfns->xfn2,
(FmgrValues *) args, &isNull2);
Assert(!isNull2);
}
}
@ -395,7 +400,7 @@ ExecAgg(Agg *node)
else
elog(NOTICE, "ExecAgg: no valid transition functions??");
value1[aggno] = (Datum) fmgr_c(&aggfns->finalfn,
(FmgrValues *) args, &(nulls[aggno]));
(FmgrValues *) args, &(nulls[aggno]));
}
else if (aggfns->xfn1.fn_addr != NULL)
{
@ -441,10 +446,11 @@ ExecAgg(Agg *node)
* As long as the retrieved group does not match the
* qualifications it is ignored and the next group is fetched
*/
if(node->plan.qual != NULL)
qual_result = ExecQual(fix_opids(node->plan.qual), econtext);
else qual_result = false;
if (node->plan.qual != NULL)
qual_result = ExecQual(fix_opids(node->plan.qual), econtext);
else
qual_result = false;
if (oneTuple)
pfree(oneTuple);
}
@ -466,7 +472,7 @@ ExecInitAgg(Agg *node, EState *estate, Plan *parent)
AggState *aggstate;
Plan *outerPlan;
ExprContext *econtext;
/*
* assign the node's execution state
*/
@ -478,7 +484,7 @@ ExecInitAgg(Agg *node, EState *estate, Plan *parent)
aggstate = makeNode(AggState);
node->aggstate = aggstate;
aggstate->agg_done = FALSE;
/*
* assign node's base id and create expression context
*/
@ -494,7 +500,7 @@ ExecInitAgg(Agg *node, EState *estate, Plan *parent)
ExecInitResultTupleSlot(estate, &aggstate->csstate.cstate);
econtext = aggstate->csstate.cstate.cs_ExprContext;
econtext->ecxt_values = (Datum *) palloc(sizeof(Datum) * length(node->aggs));
econtext->ecxt_values = (Datum *) palloc(sizeof(Datum) * length(node->aggs));
MemSet(econtext->ecxt_values, 0, sizeof(Datum) * length(node->aggs));
econtext->ecxt_nulls = (char *) palloc(sizeof(char) * length(node->aggs));
MemSet(econtext->ecxt_nulls, 0, sizeof(char) * length(node->aggs));
@ -538,8 +544,8 @@ int
ExecCountSlotsAgg(Agg *node)
{
return ExecCountSlotsNode(outerPlan(node)) +
ExecCountSlotsNode(innerPlan(node)) +
AGG_NSLOTS;
ExecCountSlotsNode(innerPlan(node)) +
AGG_NSLOTS;
}
/* ------------------------
@ -576,7 +582,7 @@ ExecEndAgg(Agg *node)
*/
static Datum
aggGetAttr(TupleTableSlot *slot,
Aggref *aggref,
Aggref * aggref,
bool *isNull)
{
Datum result;
@ -622,10 +628,11 @@ aggGetAttr(TupleTableSlot *slot,
return (Datum) tempSlot;
}
result = heap_getattr(heapTuple, /* tuple containing attribute */
attnum, /* attribute number of desired attribute */
tuple_type,/* tuple descriptor of tuple */
isNull); /* return: is attribute null? */
result = heap_getattr(heapTuple, /* tuple containing attribute */
attnum, /* attribute number of desired
* attribute */
tuple_type, /* tuple descriptor of tuple */
isNull); /* return: is attribute null? */
/* ----------------
* return null if att is null

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/nodeAppend.c,v 1.18 1999/02/21 03:48:40 scrappy Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/nodeAppend.c,v 1.19 1999/05/25 16:08:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -151,7 +151,7 @@ exec_append_initialize_next(Append *node)
if (appendstate->as_junkFilter_list)
{
estate->es_junkFilter = (JunkFilter *) nth(whichplan,
appendstate->as_junkFilter_list);
appendstate->as_junkFilter_list);
}
if (appendstate->as_result_relation_info_list)
{

View File

@ -13,7 +13,7 @@
* columns. (ie. tuples from the same group are consecutive)
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/nodeGroup.c,v 1.25 1999/02/13 23:15:21 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/nodeGroup.c,v 1.26 1999/05/25 16:08:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -193,8 +193,8 @@ ExecGroupOneTuple(Group *node)
grpstate->grp_done = TRUE;
return NULL;
}
grpstate->grp_firstTuple = firsttuple =
heap_copytuple(outerslot->val);
grpstate->grp_firstTuple = firsttuple =
heap_copytuple(outerslot->val);
}
/*

View File

@ -6,7 +6,7 @@
* Copyright (c) 1994, Regents of the University of California
*
*
* $Id: nodeHash.c,v 1.35 1999/05/18 21:33:06 tgl Exp $
* $Id: nodeHash.c,v 1.36 1999/05/25 16:08:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -19,7 +19,7 @@
*/
#include <sys/types.h>
#include <stdio.h>
#include <stdio.h>
#include <math.h>
#include <string.h>
@ -80,7 +80,8 @@ ExecHash(Hash *node)
*/
for (i = 0; i < nbatch; i++)
{
File tfile = OpenTemporaryFile();
File tfile = OpenTemporaryFile();
Assert(tfile >= 0);
hashtable->innerBatchFile[i] = BufFileCreate(tfile);
}
@ -247,30 +248,33 @@ ExecHashTableCreate(Hash *node)
int i;
Portal myPortal;
char myPortalName[64];
MemoryContext oldcxt;
MemoryContext oldcxt;
/* ----------------
* Get information about the size of the relation to be hashed
* (it's the "outer" subtree of this node, but the inner relation of
* the hashjoin).
* Caution: this is only the planner's estimates, and so
* can't be trusted too far. Apply a healthy fudge factor.
* Caution: this is only the planner's estimates, and so
* can't be trusted too far. Apply a healthy fudge factor.
* ----------------
*/
outerNode = outerPlan(node);
ntuples = outerNode->plan_size;
if (ntuples <= 0) /* force a plausible size if no info */
ntuples = 1000;
/* estimate tupsize based on footprint of tuple in hashtable...
* but what about palloc overhead?
/*
* estimate tupsize based on footprint of tuple in hashtable... but
* what about palloc overhead?
*/
tupsize = MAXALIGN(outerNode->plan_width) +
MAXALIGN(sizeof(HashJoinTupleData));
inner_rel_bytes = (double) ntuples * tupsize * FUDGE_FAC;
inner_rel_bytes = (double) ntuples *tupsize * FUDGE_FAC;
/*
* Target hashtable size is SortMem kilobytes, but not less than
* sqrt(estimated inner rel size), so as to avoid horrible performance.
* sqrt(estimated inner rel size), so as to avoid horrible
* performance.
*/
hash_table_bytes = sqrt(inner_rel_bytes);
if (hash_table_bytes < (SortMem * 1024L))
@ -278,17 +282,19 @@ ExecHashTableCreate(Hash *node)
/*
* Count the number of hash buckets we want for the whole relation,
* for an average bucket load of NTUP_PER_BUCKET (per virtual bucket!).
* for an average bucket load of NTUP_PER_BUCKET (per virtual
* bucket!).
*/
totalbuckets = (int) ceil((double) ntuples * FUDGE_FAC / NTUP_PER_BUCKET);
/*
* Count the number of buckets we think will actually fit in the
* target memory size, at a loading of NTUP_PER_BUCKET (physical buckets).
* NOTE: FUDGE_FAC here determines the fraction of the hashtable space
* reserved to allow for nonuniform distribution of hash values.
* Perhaps this should be a different number from the other uses of
* FUDGE_FAC, but since we have no real good way to pick either one...
* target memory size, at a loading of NTUP_PER_BUCKET (physical
* buckets). NOTE: FUDGE_FAC here determines the fraction of the
* hashtable space reserved to allow for nonuniform distribution of
* hash values. Perhaps this should be a different number from the
* other uses of FUDGE_FAC, but since we have no real good way to pick
* either one...
*/
bucketsize = NTUP_PER_BUCKET * tupsize;
nbuckets = (int) (hash_table_bytes / (bucketsize * FUDGE_FAC));
@ -297,21 +303,25 @@ ExecHashTableCreate(Hash *node)
if (totalbuckets <= nbuckets)
{
/* We have enough space, so no batching. In theory we could
* even reduce nbuckets, but since that could lead to poor
* behavior if estimated ntuples is much less than reality,
* it seems better to make more buckets instead of fewer.
/*
* We have enough space, so no batching. In theory we could even
* reduce nbuckets, but since that could lead to poor behavior if
* estimated ntuples is much less than reality, it seems better to
* make more buckets instead of fewer.
*/
totalbuckets = nbuckets;
nbatch = 0;
}
else
{
/* Need to batch; compute how many batches we want to use.
* Note that nbatch doesn't have to have anything to do with
* the ratio totalbuckets/nbuckets; in fact, it is the number
* of groups we will use for the part of the data that doesn't
* fall into the first nbuckets hash buckets.
/*
* Need to batch; compute how many batches we want to use. Note
* that nbatch doesn't have to have anything to do with the ratio
* totalbuckets/nbuckets; in fact, it is the number of groups we
* will use for the part of the data that doesn't fall into the
* first nbuckets hash buckets.
*/
nbatch = (int) ceil((inner_rel_bytes - hash_table_bytes) /
hash_table_bytes);
@ -319,16 +329,17 @@ ExecHashTableCreate(Hash *node)
nbatch = 1;
}
/* Now, totalbuckets is the number of (virtual) hashbuckets for the
/*
* Now, totalbuckets is the number of (virtual) hashbuckets for the
* whole relation, and nbuckets is the number of physical hashbuckets
* we will use in the first pass. Data falling into the first nbuckets
* virtual hashbuckets gets handled in the first pass; everything else
* gets divided into nbatch batches to be processed in additional
* passes.
* we will use in the first pass. Data falling into the first
* nbuckets virtual hashbuckets gets handled in the first pass;
* everything else gets divided into nbatch batches to be processed in
* additional passes.
*/
#ifdef HJDEBUG
printf("nbatch = %d, totalbuckets = %d, nbuckets = %d\n",
nbatch, totalbuckets, nbuckets);
printf("nbatch = %d, totalbuckets = %d, nbuckets = %d\n",
nbatch, totalbuckets, nbuckets);
#endif
/* ----------------
@ -353,14 +364,16 @@ ExecHashTableCreate(Hash *node)
* ----------------
*/
i = 0;
do {
do
{
i++;
sprintf(myPortalName, "<hashtable %d>", i);
myPortal = GetPortalByName(myPortalName);
} while (PortalIsValid(myPortal));
myPortal = CreatePortal(myPortalName);
Assert(PortalIsValid(myPortal));
hashtable->myPortal = (void*) myPortal; /* kluge for circular includes */
hashtable->myPortal = (void *) myPortal; /* kluge for circular
* includes */
hashtable->hashCxt = (MemoryContext) PortalGetVariableMemory(myPortal);
hashtable->batchCxt = (MemoryContext) PortalGetHeapMemory(myPortal);
@ -392,8 +405,9 @@ ExecHashTableCreate(Hash *node)
/* The files will not be opened until later... */
}
/* Prepare portal for the first-scan space allocations;
* allocate the hashbucket array therein, and set each bucket "empty".
/*
* Prepare portal for the first-scan space allocations; allocate the
* hashbucket array therein, and set each bucket "empty".
*/
MemoryContextSwitchTo(hashtable->batchCxt);
StartPortalAllocMode(DefaultAllocMode, 0);
@ -405,9 +419,7 @@ ExecHashTableCreate(Hash *node)
elog(ERROR, "Insufficient memory for hash table.");
for (i = 0; i < nbuckets; i++)
{
hashtable->buckets[i] = NULL;
}
MemoryContextSwitchTo(oldcxt);
@ -436,7 +448,7 @@ ExecHashTableDestroy(HashJoinTable hashtable)
/* Destroy the portal to release all working memory */
/* cast here is a kluge for circular includes... */
PortalDestroy((Portal*) & hashtable->myPortal);
PortalDestroy((Portal *) &hashtable->myPortal);
/* And drop the control block */
pfree(hashtable);
@ -468,15 +480,15 @@ ExecHashTableInsert(HashJoinTable hashtable,
* put the tuple in hash table
* ---------------
*/
HashJoinTuple hashTuple;
int hashTupleSize;
HashJoinTuple hashTuple;
int hashTupleSize;
hashTupleSize = MAXALIGN(sizeof(*hashTuple)) + heapTuple->t_len;
hashTuple = (HashJoinTuple) MemoryContextAlloc(hashtable->batchCxt,
hashTupleSize);
if (hashTuple == NULL)
elog(ERROR, "Insufficient memory for hash table.");
memcpy((char *) & hashTuple->htup,
memcpy((char *) &hashTuple->htup,
(char *) heapTuple,
sizeof(hashTuple->htup));
hashTuple->htup.t_data = (HeapTupleHeader)
@ -493,8 +505,9 @@ ExecHashTableInsert(HashJoinTable hashtable,
* put the tuple into a tmp file for other batches
* -----------------
*/
int batchno = (hashtable->nbatch * (bucketno - hashtable->nbuckets)) /
(hashtable->totalbuckets - hashtable->nbuckets);
int batchno = (hashtable->nbatch * (bucketno - hashtable->nbuckets)) /
(hashtable->totalbuckets - hashtable->nbuckets);
hashtable->innerBatchSize[batchno]++;
ExecHashJoinSaveTuple(heapTuple,
hashtable->innerBatchFile[batchno]);
@ -563,26 +576,23 @@ ExecScanHashBucket(HashJoinState *hjstate,
List *hjclauses,
ExprContext *econtext)
{
HashJoinTable hashtable = hjstate->hj_HashTable;
HashJoinTuple hashTuple = hjstate->hj_CurTuple;
HashJoinTable hashtable = hjstate->hj_HashTable;
HashJoinTuple hashTuple = hjstate->hj_CurTuple;
/* hj_CurTuple is NULL to start scanning a new bucket, or the address
/*
* hj_CurTuple is NULL to start scanning a new bucket, or the address
* of the last tuple returned from the current bucket.
*/
if (hashTuple == NULL)
{
hashTuple = hashtable->buckets[hjstate->hj_CurBucketNo];
}
else
{
hashTuple = hashTuple->next;
}
while (hashTuple != NULL)
{
HeapTuple heapTuple = & hashTuple->htup;
HeapTuple heapTuple = &hashTuple->htup;
TupleTableSlot *inntuple;
bool qualResult;
bool qualResult;
/* insert hashtable's tuple into exec slot so ExecQual sees it */
inntuple = ExecStoreTuple(heapTuple, /* tuple to store */
@ -618,28 +628,34 @@ ExecScanHashBucket(HashJoinState *hjstate,
static int
hashFunc(Datum key, int len, bool byVal)
{
unsigned int h = 0;
unsigned char *k;
unsigned int h = 0;
unsigned char *k;
if (byVal)
{
if (byVal) {
/*
* If it's a by-value data type, use the 'len' least significant bytes
* of the Datum value. This should do the right thing on either
* bigendian or littleendian hardware --- see the Datum access
* macros in c.h.
* If it's a by-value data type, use the 'len' least significant
* bytes of the Datum value. This should do the right thing on
* either bigendian or littleendian hardware --- see the Datum
* access macros in c.h.
*/
while (len-- > 0) {
while (len-- > 0)
{
h = (h * PRIME1) ^ (key & 0xFF);
key >>= 8;
}
} else {
}
else
{
/*
* If this is a variable length type, then 'k' points to a "struct
* varlena" and len == -1. NOTE: VARSIZE returns the "real" data
* length plus the sizeof the "vl_len" attribute of varlena (the
* length information). 'k' points to the beginning of the varlena
* struct, so we have to use "VARDATA" to find the beginning of the
* "real" data.
* struct, so we have to use "VARDATA" to find the beginning of
* the "real" data.
*/
if (len == -1)
{
@ -647,9 +663,7 @@ hashFunc(Datum key, int len, bool byVal)
k = (unsigned char *) VARDATA(key);
}
else
{
k = (unsigned char *) key;
}
while (len-- > 0)
h = (h * PRIME1) ^ (*k++);
}
@ -669,7 +683,7 @@ hashFunc(Datum key, int len, bool byVal)
void
ExecHashTableReset(HashJoinTable hashtable, long ntuples)
{
MemoryContext oldcxt;
MemoryContext oldcxt;
int nbuckets = hashtable->nbuckets;
int i;
@ -682,13 +696,14 @@ ExecHashTableReset(HashJoinTable hashtable, long ntuples)
StartPortalAllocMode(DefaultAllocMode, 0);
/*
* We still use the same number of physical buckets as in the first pass.
* (It could be different; but we already decided how many buckets would
* be appropriate for the allowed memory, so stick with that number.)
* We MUST set totalbuckets to equal nbuckets, because from now on
* no tuples will go out to temp files; there are no more virtual buckets,
* only real buckets. (This implies that tuples will go into different
* bucket numbers than they did on the first pass, but that's OK.)
* We still use the same number of physical buckets as in the first
* pass. (It could be different; but we already decided how many
* buckets would be appropriate for the allowed memory, so stick with
* that number.) We MUST set totalbuckets to equal nbuckets, because
* from now on no tuples will go out to temp files; there are no more
* virtual buckets, only real buckets. (This implies that tuples will
* go into different bucket numbers than they did on the first pass,
* but that's OK.)
*/
hashtable->totalbuckets = nbuckets;
@ -700,9 +715,7 @@ ExecHashTableReset(HashJoinTable hashtable, long ntuples)
elog(ERROR, "Insufficient memory for hash table.");
for (i = 0; i < nbuckets; i++)
{
hashtable->buckets[i] = NULL;
}
MemoryContextSwitchTo(oldcxt);
}
@ -710,6 +723,7 @@ ExecHashTableReset(HashJoinTable hashtable, long ntuples)
void
ExecReScanHash(Hash *node, ExprContext *exprCtxt, Plan *parent)
{
/*
* if chgParam of subnode is not null then plan will be re-scanned by
* first ExecProcNode.

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/nodeHashjoin.c,v 1.20 1999/05/18 21:33:06 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/nodeHashjoin.c,v 1.21 1999/05/25 16:08:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -23,10 +23,10 @@
#include "optimizer/clauses.h" /* for get_leftop */
static TupleTableSlot *ExecHashJoinOuterGetTuple(Plan *node, Plan *parent,
HashJoinState *hjstate);
HashJoinState *hjstate);
static TupleTableSlot *ExecHashJoinGetSavedTuple(HashJoinState *hjstate,
BufFile *file,
TupleTableSlot *tupleSlot);
BufFile * file,
TupleTableSlot *tupleSlot);
static int ExecHashJoinGetBatch(int bucketno, HashJoinTable hashtable);
static int ExecHashJoinNewBatch(HashJoinState *hjstate);
@ -132,7 +132,8 @@ ExecHashJoin(HashJoin *node)
*/
for (i = 0; i < hashtable->nbatch; i++)
{
File tfile = OpenTemporaryFile();
File tfile = OpenTemporaryFile();
Assert(tfile >= 0);
hashtable->outerBatchFile[i] = BufFileCreate(tfile);
}
@ -149,6 +150,7 @@ ExecHashJoin(HashJoin *node)
for (;;)
{
/*
* if the current outer tuple is nil, get a new one
*/
@ -159,6 +161,7 @@ ExecHashJoin(HashJoin *node)
hjstate);
if (TupIsNull(outerTupleSlot))
{
/*
* when the last batch runs out, clean up and exit
*/
@ -168,8 +171,8 @@ ExecHashJoin(HashJoin *node)
}
/*
* now we have an outer tuple, find the corresponding bucket for
* this tuple from the hash table
* now we have an outer tuple, find the corresponding bucket
* for this tuple from the hash table
*/
econtext->ecxt_outertuple = outerTupleSlot;
hjstate->hj_CurBucketNo = ExecHashGetBucket(hashtable, econtext,
@ -184,20 +187,23 @@ ExecHashJoin(HashJoin *node)
*/
if (hashtable->curbatch == 0)
{
int batch = ExecHashJoinGetBatch(hjstate->hj_CurBucketNo,
hashtable);
int batch = ExecHashJoinGetBatch(hjstate->hj_CurBucketNo,
hashtable);
if (batch > 0)
{
/*
* Need to postpone this outer tuple to a later batch.
* Save it in the corresponding outer-batch file.
*/
int batchno = batch - 1;
int batchno = batch - 1;
hashtable->outerBatchSize[batchno]++;
ExecHashJoinSaveTuple(outerTupleSlot->val,
hashtable->outerBatchFile[batchno]);
hashtable->outerBatchFile[batchno]);
ExecClearTuple(outerTupleSlot);
continue; /* loop around for a new outer tuple */
continue; /* loop around for a new outer tuple */
}
}
}
@ -212,6 +218,7 @@ ExecHashJoin(HashJoin *node)
econtext);
if (curtuple == NULL)
break; /* out of matches */
/*
* we've got a match, but still need to test qpqual
*/
@ -427,32 +434,33 @@ ExecEndHashJoin(HashJoin *node)
static TupleTableSlot *
ExecHashJoinOuterGetTuple(Plan *node, Plan *parent, HashJoinState *hjstate)
{
HashJoinTable hashtable = hjstate->hj_HashTable;
int curbatch = hashtable->curbatch;
HashJoinTable hashtable = hjstate->hj_HashTable;
int curbatch = hashtable->curbatch;
TupleTableSlot *slot;
if (curbatch == 0)
{ /* if it is the first pass */
slot = ExecProcNode(node, parent);
if (! TupIsNull(slot))
if (!TupIsNull(slot))
return slot;
/*
* We have just reached the end of the first pass.
* Try to switch to a saved batch.
* We have just reached the end of the first pass. Try to switch
* to a saved batch.
*/
curbatch = ExecHashJoinNewBatch(hjstate);
}
/*
* Try to read from a temp file.
* Loop allows us to advance to new batch as needed.
* Try to read from a temp file. Loop allows us to advance to new
* batch as needed.
*/
while (curbatch <= hashtable->nbatch)
{
slot = ExecHashJoinGetSavedTuple(hjstate,
hashtable->outerBatchFile[curbatch-1],
hashtable->outerBatchFile[curbatch - 1],
hjstate->hj_OuterTupleSlot);
if (! TupIsNull(slot))
if (!TupIsNull(slot))
return slot;
curbatch = ExecHashJoinNewBatch(hjstate);
}
@ -470,12 +478,12 @@ ExecHashJoinOuterGetTuple(Plan *node, Plan *parent, HashJoinState *hjstate)
static TupleTableSlot *
ExecHashJoinGetSavedTuple(HashJoinState *hjstate,
BufFile *file,
BufFile * file,
TupleTableSlot *tupleSlot)
{
HeapTupleData htup;
size_t nread;
HeapTuple heapTuple;
HeapTupleData htup;
size_t nread;
HeapTuple heapTuple;
nread = BufFileRead(file, (void *) &htup, sizeof(HeapTupleData));
if (nread == 0)
@ -484,8 +492,8 @@ ExecHashJoinGetSavedTuple(HashJoinState *hjstate,
elog(ERROR, "Read from hashjoin temp file failed");
heapTuple = palloc(HEAPTUPLESIZE + htup.t_len);
memcpy((char *) heapTuple, (char *) &htup, sizeof(HeapTupleData));
heapTuple->t_data = (HeapTupleHeader)
((char *) heapTuple + HEAPTUPLESIZE);
heapTuple->t_data = (HeapTupleHeader)
((char *) heapTuple + HEAPTUPLESIZE);
nread = BufFileRead(file, (void *) heapTuple->t_data, htup.t_len);
if (nread != (size_t) htup.t_len)
elog(ERROR, "Read from hashjoin temp file failed");
@ -506,16 +514,17 @@ ExecHashJoinNewBatch(HashJoinState *hjstate)
int newbatch = hashtable->curbatch + 1;
long *innerBatchSize = hashtable->innerBatchSize;
long *outerBatchSize = hashtable->outerBatchSize;
BufFile *innerFile;
BufFile *innerFile;
TupleTableSlot *slot;
ExprContext *econtext;
Var *innerhashkey;
if (newbatch > 1)
{
/*
* We no longer need the previous outer batch file;
* close it right away to free disk space.
* We no longer need the previous outer batch file; close it right
* away to free disk space.
*/
BufFileClose(hashtable->outerBatchFile[newbatch - 2]);
hashtable->outerBatchFile[newbatch - 2] = NULL;
@ -541,8 +550,8 @@ ExecHashJoinNewBatch(HashJoinState *hjstate)
return newbatch; /* no more batches */
/*
* Rewind inner and outer batch files for this batch,
* so that we can start reading them.
* Rewind inner and outer batch files for this batch, so that we can
* start reading them.
*/
if (BufFileSeek(hashtable->outerBatchFile[newbatch - 1], 0L,
SEEK_SET) != 0L)
@ -571,7 +580,8 @@ ExecHashJoinNewBatch(HashJoinState *hjstate)
}
/*
* after we build the hash table, the inner batch file is no longer needed
* after we build the hash table, the inner batch file is no longer
* needed
*/
BufFileClose(innerFile);
hashtable->innerBatchFile[newbatch - 1] = NULL;
@ -615,9 +625,9 @@ ExecHashJoinGetBatch(int bucketno, HashJoinTable hashtable)
void
ExecHashJoinSaveTuple(HeapTuple heapTuple,
BufFile *file)
BufFile * file)
{
size_t written;
size_t written;
written = BufFileWrite(file, (void *) heapTuple, sizeof(HeapTupleData));
if (written != sizeof(HeapTupleData))

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/nodeIndexscan.c,v 1.35 1999/05/10 00:45:06 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/nodeIndexscan.c,v 1.36 1999/05/25 16:08:43 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -91,13 +91,14 @@ IndexNext(IndexScan *node)
IndexScanDesc scandesc;
Relation heapRelation;
RetrieveIndexResult result;
HeapTuple tuple;
HeapTuple tuple;
TupleTableSlot *slot;
Buffer buffer = InvalidBuffer;
int numIndices;
bool bBackward;
int indexNumber;
bool bBackward;
int indexNumber;
/* ----------------
* extract necessary information from index scan node
* ----------------
@ -114,14 +115,14 @@ IndexNext(IndexScan *node)
/*
* Check if we are evaluating PlanQual for tuple of this relation.
* Additional checking is not good, but no other way for now.
* We could introduce new nodes for this case and handle
* IndexScan --> NewNode switching in Init/ReScan plan...
* Additional checking is not good, but no other way for now. We could
* introduce new nodes for this case and handle IndexScan --> NewNode
* switching in Init/ReScan plan...
*/
if (estate->es_evTuple != NULL &&
if (estate->es_evTuple != NULL &&
estate->es_evTuple[node->scan.scanrelid - 1] != NULL)
{
int iptr;
int iptr;
slot->ttc_buffer = InvalidBuffer;
slot->ttc_shouldFree = false;
@ -138,7 +139,7 @@ IndexNext(IndexScan *node)
scanstate->cstate.cs_ExprContext))
break;
}
if (iptr == numIndices) /* would not be returned by indices */
if (iptr == numIndices) /* would not be returned by indices */
slot->val = NULL;
/* Flag for the next call that no more tuples */
estate->es_evTupleNull[node->scan.scanrelid - 1] = true;
@ -153,26 +154,26 @@ IndexNext(IndexScan *node)
* appropriate heap tuple.. else return NULL.
* ----------------
*/
bBackward = ScanDirectionIsBackward(direction);
if (bBackward)
{
indexNumber = numIndices - indexstate->iss_IndexPtr - 1;
if (indexNumber < 0)
{
indexNumber = 0;
indexstate->iss_IndexPtr = numIndices - 1;
}
}
else
{
if ((indexNumber = indexstate->iss_IndexPtr) < 0)
{
indexNumber = 0;
indexstate->iss_IndexPtr = 0;
}
}
while (indexNumber < numIndices)
{
bBackward = ScanDirectionIsBackward(direction);
if (bBackward)
{
indexNumber = numIndices - indexstate->iss_IndexPtr - 1;
if (indexNumber < 0)
{
indexNumber = 0;
indexstate->iss_IndexPtr = numIndices - 1;
}
}
else
{
if ((indexNumber = indexstate->iss_IndexPtr) < 0)
{
indexNumber = 0;
indexstate->iss_IndexPtr = 0;
}
}
while (indexNumber < numIndices)
{
scandesc = scanDescs[indexstate->iss_IndexPtr];
while ((result = index_getnext(scandesc, direction)) != NULL)
{
@ -224,14 +225,14 @@ IndexNext(IndexScan *node)
if (BufferIsValid(buffer))
ReleaseBuffer(buffer);
}
if (indexNumber < numIndices)
{
indexNumber++;
if (bBackward)
indexstate->iss_IndexPtr--;
else
indexstate->iss_IndexPtr++;
}
if (indexNumber < numIndices)
{
indexNumber++;
if (bBackward)
indexstate->iss_IndexPtr--;
else
indexstate->iss_IndexPtr++;
}
}
/* ----------------
* if we get here it means the index scan failed so we
@ -323,7 +324,7 @@ ExecIndexReScan(IndexScan *node, ExprContext *exprCtxt, Plan *parent)
indexstate->iss_IndexPtr = -1;
/* If this is re-scanning of PlanQual ... */
if (estate->es_evTuple != NULL &&
if (estate->es_evTuple != NULL &&
estate->es_evTuple[node->scan.scanrelid - 1] != NULL)
{
estate->es_evTupleNull[node->scan.scanrelid - 1] = false;
@ -703,7 +704,7 @@ ExecInitIndexScan(IndexScan *node, EState *estate, Plan *parent)
run_keys = (n_keys <= 0) ? NULL :
(int *) palloc(n_keys * sizeof(int));
CXT1_printf("ExecInitIndexScan: context is %d\n",CurrentMemoryContext);
CXT1_printf("ExecInitIndexScan: context is %d\n", CurrentMemoryContext);
/* ----------------
* for each opclause in the given qual,

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/nodeMaterial.c,v 1.21 1999/02/13 23:15:24 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/nodeMaterial.c,v 1.22 1999/05/25 16:08:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -116,7 +116,7 @@ ExecMaterial(Material *node)
if (TupIsNull(slot))
break;
/*
* heap_insert changes something...
*/
@ -124,12 +124,12 @@ ExecMaterial(Material *node)
heapTuple = heap_copytuple(slot->val);
else
heapTuple = slot->val;
heap_insert(tempRelation, heapTuple);
if (slot->ttc_buffer != InvalidBuffer)
pfree(heapTuple);
ExecClearTuple(slot);
}
currentRelation = tempRelation;
@ -360,8 +360,8 @@ ExecMaterialReScan(Material *node, ExprContext *exprCtxt, Plan *parent)
return;
matstate->csstate.css_currentScanDesc = ExecReScanR(matstate->csstate.css_currentRelation,
matstate->csstate.css_currentScanDesc,
node->plan.state->es_direction, 0, NULL);
matstate->csstate.css_currentScanDesc,
node->plan.state->es_direction, 0, NULL);
}

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/nodeMergejoin.c,v 1.26 1999/05/10 00:45:07 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/nodeMergejoin.c,v 1.27 1999/05/25 16:08:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -103,7 +103,7 @@ static bool MergeCompare(List *eqQual, List *compareQual, ExprContext *econtext)
* ----------------------------------------------------------------
*/
static List *
MJFormSkipQual(List *qualList, char * replaceopname)
MJFormSkipQual(List *qualList, char *replaceopname)
{
List *qualCopy;
List *qualcdr;
@ -148,14 +148,14 @@ MJFormSkipQual(List *qualList, char * replaceopname)
* ----------------
*/
optup = get_operator_tuple(op->opno);
if (!HeapTupleIsValid(optup)) /* shouldn't happen */
if (!HeapTupleIsValid(optup)) /* shouldn't happen */
elog(ERROR, "MJFormSkipQual: operator %u not found", op->opno);
opform = (Form_pg_operator) GETSTRUCT(optup);
oprleft = opform->oprleft;
oprright = opform->oprright;
/* ----------------
* Now look up the matching "<" or ">" operator. If there isn't one,
* Now look up the matching "<" or ">" operator. If there isn't one,
* whoever marked the "=" operator mergejoinable was a loser.
* ----------------
*/
@ -166,7 +166,7 @@ MJFormSkipQual(List *qualList, char * replaceopname)
CharGetDatum('b'));
if (!HeapTupleIsValid(optup))
elog(ERROR,
"MJFormSkipQual: mergejoin operator %u has no matching %s op",
"MJFormSkipQual: mergejoin operator %u has no matching %s op",
op->opno, replaceopname);
opform = (Form_pg_operator) GETSTRUCT(optup);

View File

@ -27,7 +27,7 @@
* SeqScan (emp.all)
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/nodeResult.c,v 1.10 1999/03/20 01:13:22 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/nodeResult.c,v 1.11 1999/05/25 16:08:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -263,8 +263,8 @@ ExecEndResult(Result *node)
* is freed at end-transaction time. -cim 6/2/91
* ----------------
*/
ExecFreeExprContext(&resstate->cstate); /* XXX - new for us - er1p */
ExecFreeTypeInfo(&resstate->cstate); /* XXX - new for us - er1p */
ExecFreeExprContext(&resstate->cstate); /* XXX - new for us - er1p */
ExecFreeTypeInfo(&resstate->cstate); /* XXX - new for us - er1p */
ExecFreeProjectionInfo(&resstate->cstate);
/* ----------------
@ -278,7 +278,8 @@ ExecEndResult(Result *node)
* ----------------
*/
ExecClearTuple(resstate->cstate.cs_ResultTupleSlot);
pfree(resstate); node->resstate = NULL; /* XXX - new for us - er1p */
pfree(resstate);
node->resstate = NULL; /* XXX - new for us - er1p */
}
void

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/executor/nodeSeqscan.c,v 1.17 1999/02/13 23:15:26 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/executor/nodeSeqscan.c,v 1.18 1999/05/25 16:08:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -68,11 +68,11 @@ SeqNext(SeqScan *node)
/*
* Check if we are evaluating PlanQual for tuple of this relation.
* Additional checking is not good, but no other way for now.
* We could introduce new nodes for this case and handle
* SeqScan --> NewNode switching in Init/ReScan plan...
* Additional checking is not good, but no other way for now. We could
* introduce new nodes for this case and handle SeqScan --> NewNode
* switching in Init/ReScan plan...
*/
if (estate->es_evTuple != NULL &&
if (estate->es_evTuple != NULL &&
estate->es_evTuple[node->scanrelid - 1] != NULL)
{
slot->ttc_buffer = InvalidBuffer;
@ -83,10 +83,11 @@ SeqNext(SeqScan *node)
return (slot);
}
slot->val = estate->es_evTuple[node->scanrelid - 1];
/*
* Note that unlike IndexScan, SeqScan never use keys
* in heap_beginscan (and this is very bad) - so, here
* we have not check are keys ok or not.
* Note that unlike IndexScan, SeqScan never use keys in
* heap_beginscan (and this is very bad) - so, here we have not
* check are keys ok or not.
*/
/* Flag for the next call that no more tuples */
estate->es_evTupleNull[node->scanrelid - 1] = true;
@ -401,10 +402,11 @@ ExecSeqReScan(SeqScan *node, ExprContext *exprCtxt, Plan *parent)
outerPlan = outerPlan((Plan *) node);
ExecReScan(outerPlan, exprCtxt, parent);
}
else /* otherwise, we are scanning a relation */
else
/* otherwise, we are scanning a relation */
{
/* If this is re-scanning of PlanQual ... */
if (estate->es_evTuple != NULL &&
if (estate->es_evTuple != NULL &&
estate->es_evTuple[node->scanrelid - 1] != NULL)
{
estate->es_evTupleNull[node->scanrelid - 1] = false;

View File

@ -58,15 +58,16 @@ ExecSubPlan(SubPlan *node, List *pvar, ExprContext *econtext)
ExecReScan(plan, (ExprContext *) NULL, plan);
/*
* For all sublink types except EXPR_SUBLINK, the result type is boolean,
* and we have a fairly clear idea of how to combine multiple subitems
* and deal with NULL values or an empty subplan result.
* For all sublink types except EXPR_SUBLINK, the result type is
* boolean, and we have a fairly clear idea of how to combine multiple
* subitems and deal with NULL values or an empty subplan result.
*
* For EXPR_SUBLINK, the result type is whatever the combining operator
* returns. We have no way to deal with more than one column in the
* subplan result --- hopefully the parser forbids that. More seriously,
* it's unclear what to do with NULL values or an empty subplan result.
* For now, we error out, but should something else happen?
* subplan result --- hopefully the parser forbids that. More
* seriously, it's unclear what to do with NULL values or an empty
* subplan result. For now, we error out, but should something else
* happen?
*/
for (slot = ExecProcNode(plan, plan);
@ -105,14 +106,14 @@ ExecSubPlan(SubPlan *node, List *pvar, ExprContext *econtext)
}
if (subLinkType != EXPR_SUBLINK)
{
if ((! (bool) result && !(sublink->useor)) ||
if ((!(bool) result && !(sublink->useor)) ||
((bool) result && sublink->useor))
break;
}
i++;
}
if (subLinkType == ALL_SUBLINK && ! (bool) result)
if (subLinkType == ALL_SUBLINK && !(bool) result)
break;
if (subLinkType == ANY_SUBLINK && (bool) result)
break;
@ -120,7 +121,7 @@ ExecSubPlan(SubPlan *node, List *pvar, ExprContext *econtext)
if (!found)
{
/* deal with empty subplan result. Note default result is 'false' */
/* deal with empty subplan result. Note default result is 'false' */
if (subLinkType == ALL_SUBLINK)
result = (Datum) true;
else if (subLinkType == EXPR_SUBLINK)

View File

@ -3,7 +3,7 @@
* spi.c
* Server Programming Interface
*
* $Id: spi.c,v 1.37 1999/05/13 07:28:30 tgl Exp $
* $Id: spi.c,v 1.38 1999/05/25 16:08:48 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -19,9 +19,9 @@ static _SPI_connection *_SPI_current = NULL;
static int _SPI_connected = -1;
static int _SPI_curid = -1;
DLLIMPORT uint32 SPI_processed = 0;
DLLIMPORT uint32 SPI_processed = 0;
DLLIMPORT SPITupleTable *SPI_tuptable;
DLLIMPORT int SPI_result;
DLLIMPORT int SPI_result;
static int _SPI_execute(char *src, int tcount, _SPI_plan *plan);
static int _SPI_pquery(QueryDesc *queryDesc, EState *state, int tcount);
@ -49,8 +49,8 @@ extern void ShowUsage(void);
int
SPI_connect()
{
char pname[64];
PortalVariableMemory pvmem;
char pname[64];
PortalVariableMemory pvmem;
/*
* It's possible on startup and after commit/abort. In future we'll
@ -345,8 +345,8 @@ SPI_modifytuple(Relation rel, HeapTuple tuple, int natts, int *attnum,
mtuple = heap_formtuple(rel->rd_att, v, n);
infomask = mtuple->t_data->t_infomask;
memmove(&(mtuple->t_data->t_oid), &(tuple->t_data->t_oid),
((char *) &(tuple->t_data->t_hoff) -
(char *) &(tuple->t_data->t_oid)));
((char *) &(tuple->t_data->t_hoff) -
(char *) &(tuple->t_data->t_oid)));
mtuple->t_data->t_infomask = infomask;
mtuple->t_data->t_natts = numberOfAttributes;
}
@ -411,8 +411,8 @@ SPI_getvalue(HeapTuple tuple, TupleDesc tupdesc, int fnumber)
val = heap_getattr(tuple, fnumber, tupdesc, &isnull);
if (isnull)
return NULL;
if (! getTypeOutAndElem((Oid) tupdesc->attrs[fnumber - 1]->atttypid,
&foutoid, &typelem))
if (!getTypeOutAndElem((Oid) tupdesc->attrs[fnumber - 1]->atttypid,
&foutoid, &typelem))
{
SPI_result = SPI_ERROR_NOOUTFUNC;
return NULL;
@ -549,13 +549,13 @@ SPI_pfree(void *pointer)
/* =================== private functions =================== */
/*
* spi_printtup
* spi_printtup
* store tuple retrieved by Executor into SPITupleTable
* of current SPI procedure
*
*/
void
spi_printtup(HeapTuple tuple, TupleDesc tupdesc, DestReceiver* self)
spi_printtup(HeapTuple tuple, TupleDesc tupdesc, DestReceiver * self)
{
SPITupleTable *tuptable;
MemoryContext oldcxt;
@ -633,12 +633,13 @@ _SPI_execute(char *src, int tcount, _SPI_plan *plan)
_SPI_current->qtlist = queryTree_list;
foreach (queryTree_list_item, queryTree_list)
foreach(queryTree_list_item, queryTree_list)
{
queryTree = (Query *) lfirst(queryTree_list_item);
planTree = lfirst(planTree_list);
planTree_list = lnext(planTree_list);
islastquery = (planTree_list == NIL); /* assume lists are same len */
islastquery = (planTree_list == NIL); /* assume lists are same
* len */
if (queryTree->commandType == CMD_UTILITY)
{
@ -658,7 +659,7 @@ _SPI_execute(char *src, int tcount, _SPI_plan *plan)
if (plan == NULL)
{
ProcessUtility(queryTree->utilityStmt, None);
if (! islastquery)
if (!islastquery)
CommandCounterIncrement();
else
return res;
@ -717,17 +718,18 @@ _SPI_execute_plan(_SPI_plan *plan, Datum *Values, char *Nulls, int tcount)
_SPI_current->tuptable = NULL;
_SPI_current->qtlist = NULL;
foreach (queryTree_list_item, queryTree_list)
foreach(queryTree_list_item, queryTree_list)
{
queryTree = (Query *) lfirst(queryTree_list_item);
planTree = lfirst(planTree_list);
planTree_list = lnext(planTree_list);
islastquery = (planTree_list == NIL); /* assume lists are same len */
islastquery = (planTree_list == NIL); /* assume lists are same
* len */
if (queryTree->commandType == CMD_UTILITY)
{
ProcessUtility(queryTree->utilityStmt, None);
if (! islastquery)
if (!islastquery)
CommandCounterIncrement();
else
return SPI_OK_UTILITY;
@ -777,7 +779,7 @@ _SPI_pquery(QueryDesc *queryDesc, EState *state, int tcount)
char *intoName = NULL;
int res;
Const tcount_const;
Node *count = NULL;
Node *count = NULL;
switch (operation)
{
@ -833,18 +835,18 @@ _SPI_pquery(QueryDesc *queryDesc, EState *state, int tcount)
* ----------------
*/
memset(&tcount_const, 0, sizeof(tcount_const));
tcount_const.type = T_Const;
tcount_const.consttype = INT4OID;
tcount_const.constlen = sizeof(int4);
tcount_const.constvalue = (Datum)tcount;
tcount_const.constisnull = FALSE;
tcount_const.constbyval = TRUE;
tcount_const.constisset = FALSE;
tcount_const.constiscast = FALSE;
count = (Node *)&tcount_const;
tcount_const.type = T_Const;
tcount_const.consttype = INT4OID;
tcount_const.constlen = sizeof(int4);
tcount_const.constvalue = (Datum) tcount;
tcount_const.constisnull = FALSE;
tcount_const.constbyval = TRUE;
tcount_const.constisset = FALSE;
tcount_const.constiscast = FALSE;
count = (Node *) &tcount_const;
}
if (state == NULL) /* plan preparation */
return res;
#ifdef SPI_EXECUTOR_STATS
@ -922,7 +924,7 @@ _SPI_procmem()
}
/*
* _SPI_begin_call
* _SPI_begin_call
*
*/
static int

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/lib/Attic/fstack.c,v 1.10 1999/02/13 23:15:34 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/lib/Attic/fstack.c,v 1.11 1999/05/25 16:08:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -20,20 +20,20 @@
*/
/*
* FixedItemIsValid
* FixedItemIsValid
* True iff item is valid.
*/
#define FixedItemIsValid(item) PointerIsValid(item)
/*
* FixedStackGetItemBase
* FixedStackGetItemBase
* Returns base of enclosing structure.
*/
#define FixedStackGetItemBase(stack, item) \
((Pointer)((char *)(item) - (stack)->offset))
/*
* FixedStackGetItem
* FixedStackGetItem
* Returns item of given pointer to enclosing structure.
*/
#define FixedStackGetItem(stack, pointer) \
@ -84,7 +84,7 @@ FixedStackPush(FixedStack stack, Pointer pointer)
#ifdef USE_ASSERT_CHECKING
/*
* FixedStackContains
* FixedStackContains
* True iff ordered stack contains given element.
*
* Note:

View File

@ -8,7 +8,7 @@
*
* Copyright (c) 1994, Regents of the University of California
*
* $Id: stringinfo.c,v 1.15 1999/04/25 03:19:25 tgl Exp $
* $Id: stringinfo.c,v 1.16 1999/05/25 16:08:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -67,17 +67,18 @@ initStringInfo(StringInfo str)
static void
enlargeStringInfo(StringInfo str, int needed)
{
int newlen;
char *newdata;
int newlen;
char *newdata;
needed += str->len + 1; /* total space required now */
if (needed <= str->maxlen)
return; /* got enough space already */
/*
* We don't want to allocate just a little more space with each append;
* for efficiency, double the buffer size each time it overflows.
* Actually, we might need to more than double it if 'needed' is big...
* We don't want to allocate just a little more space with each
* append; for efficiency, double the buffer size each time it
* overflows. Actually, we might need to more than double it if
* 'needed' is big...
*/
newlen = 2 * str->maxlen;
while (needed > newlen)
@ -86,7 +87,7 @@ enlargeStringInfo(StringInfo str, int needed)
newdata = palloc(newlen);
if (newdata == NULL)
elog(ERROR,
"enlargeStringInfo: Out of memory (%d bytes requested)", newlen);
"enlargeStringInfo: Out of memory (%d bytes requested)", newlen);
/* OK, transfer data into new buffer, and release old buffer */
memcpy(newdata, str->data, str->len + 1);
@ -107,11 +108,11 @@ enlargeStringInfo(StringInfo str, int needed)
* generated in a single call (not on the total string length).
*/
void
appendStringInfo(StringInfo str, const char *fmt, ...)
appendStringInfo(StringInfo str, const char *fmt,...)
{
va_list args;
char buffer[1024];
int buflen;
va_list args;
char buffer[1024];
int buflen;
Assert(str != NULL);
@ -164,7 +165,8 @@ appendBinaryStringInfo(StringInfo str, const char *data, int datalen)
memcpy(str->data + str->len, data, datalen);
str->len += datalen;
/* Keep a trailing null in place, even though it's probably useless
/*
* Keep a trailing null in place, even though it's probably useless
* for binary data...
*/
str->data[str->len] = '\0';

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/libpq/auth.c,v 1.35 1999/04/16 04:59:03 tgl Exp $
* $Header: /cvsroot/pgsql/src/backend/libpq/auth.c,v 1.36 1999/05/25 16:08:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -75,13 +75,13 @@ static int map_old_to_new(Port *port, UserAuth old, int status);
static int
pg_krb4_recvauth(Port *port)
{
long krbopts = 0; /* one-way authentication */
KTEXT_ST clttkt;
char instance[INST_SZ+1],
version[KRB_SENDAUTH_VLEN+1];
AUTH_DAT auth_data;
Key_schedule key_sched;
int status;
long krbopts = 0; /* one-way authentication */
KTEXT_ST clttkt;
char instance[INST_SZ + 1],
version[KRB_SENDAUTH_VLEN + 1];
AUTH_DAT auth_data;
Key_schedule key_sched;
int status;
strcpy(instance, "*"); /* don't care, but arg gets expanded
* anyway */
@ -99,7 +99,7 @@ pg_krb4_recvauth(Port *port)
if (status != KSUCCESS)
{
snprintf(PQerrormsg, ERROR_MSG_LENGTH,
"pg_krb4_recvauth: kerberos error: %s\n", krb_err_txt[status]);
"pg_krb4_recvauth: kerberos error: %s\n", krb_err_txt[status]);
fputs(PQerrormsg, stderr);
pqdebug("%s", PQerrormsg);
return STATUS_ERROR;
@ -107,7 +107,7 @@ pg_krb4_recvauth(Port *port)
if (strncmp(version, PG_KRB4_VERSION, KRB_SENDAUTH_VLEN))
{
snprintf(PQerrormsg, ERROR_MSG_LENGTH,
"pg_krb4_recvauth: protocol version != \"%s\"\n", PG_KRB4_VERSION);
"pg_krb4_recvauth: protocol version != \"%s\"\n", PG_KRB4_VERSION);
fputs(PQerrormsg, stderr);
pqdebug("%s", PQerrormsg);
return STATUS_ERROR;
@ -115,8 +115,8 @@ pg_krb4_recvauth(Port *port)
if (strncmp(port->user, auth_data.pname, SM_USER))
{
snprintf(PQerrormsg, ERROR_MSG_LENGTH,
"pg_krb4_recvauth: name \"%s\" != \"%s\"\n",
port->user, auth_data.pname);
"pg_krb4_recvauth: name \"%s\" != \"%s\"\n",
port->user, auth_data.pname);
fputs(PQerrormsg, stderr);
pqdebug("%s", PQerrormsg);
return STATUS_ERROR;
@ -129,7 +129,7 @@ static int
pg_krb4_recvauth(Port *port)
{
snprintf(PQerrormsg, ERROR_MSG_LENGTH,
"pg_krb4_recvauth: Kerberos not implemented on this server.\n");
"pg_krb4_recvauth: Kerberos not implemented on this server.\n");
fputs(PQerrormsg, stderr);
pqdebug("%s", PQerrormsg);
@ -223,7 +223,7 @@ pg_krb5_recvauth(Port *port)
if (code = krb5_parse_name(servbuf, &server))
{
snprintf(PQerrormsg, ERROR_MSG_LENGTH,
"pg_krb5_recvauth: Kerberos error %d in krb5_parse_name\n", code);
"pg_krb5_recvauth: Kerberos error %d in krb5_parse_name\n", code);
com_err("pg_krb5_recvauth", code, "in krb5_parse_name");
return STATUS_ERROR;
}
@ -256,7 +256,7 @@ pg_krb5_recvauth(Port *port)
(krb5_authenticator **) NULL))
{
snprintf(PQerrormsg, ERROR_MSG_LENGTH,
"pg_krb5_recvauth: Kerberos error %d in krb5_recvauth\n", code);
"pg_krb5_recvauth: Kerberos error %d in krb5_recvauth\n", code);
com_err("pg_krb5_recvauth", code, "in krb5_recvauth");
krb5_free_principal(server);
return STATUS_ERROR;
@ -271,7 +271,7 @@ pg_krb5_recvauth(Port *port)
if ((code = krb5_unparse_name(client, &kusername)))
{
snprintf(PQerrormsg, ERROR_MSG_LENGTH,
"pg_krb5_recvauth: Kerberos error %d in krb5_unparse_name\n", code);
"pg_krb5_recvauth: Kerberos error %d in krb5_unparse_name\n", code);
com_err("pg_krb5_recvauth", code, "in krb5_unparse_name");
krb5_free_principal(client);
return STATUS_ERROR;
@ -280,7 +280,7 @@ pg_krb5_recvauth(Port *port)
if (!kusername)
{
snprintf(PQerrormsg, ERROR_MSG_LENGTH,
"pg_krb5_recvauth: could not decode username\n");
"pg_krb5_recvauth: could not decode username\n");
fputs(PQerrormsg, stderr);
pqdebug("%s", PQerrormsg);
return STATUS_ERROR;
@ -289,7 +289,7 @@ pg_krb5_recvauth(Port *port)
if (strncmp(username, kusername, SM_USER))
{
snprintf(PQerrormsg, ERROR_MSG_LENGTH,
"pg_krb5_recvauth: name \"%s\" != \"%s\"\n", port->user, kusername);
"pg_krb5_recvauth: name \"%s\" != \"%s\"\n", port->user, kusername);
fputs(PQerrormsg, stderr);
pqdebug("%s", PQerrormsg);
pfree(kusername);
@ -304,7 +304,7 @@ static int
pg_krb5_recvauth(Port *port)
{
snprintf(PQerrormsg, ERROR_MSG_LENGTH,
"pg_krb5_recvauth: Kerberos not implemented on this server.\n");
"pg_krb5_recvauth: Kerberos not implemented on this server.\n");
fputs(PQerrormsg, stderr);
pqdebug("%s", PQerrormsg);
@ -359,7 +359,7 @@ pg_passwordv0_recvauth(void *arg, PacketLen len, void *pkt)
if (user == NULL || password == NULL)
{
snprintf(PQerrormsg, ERROR_MSG_LENGTH,
"pg_password_recvauth: badly formed password packet.\n");
"pg_password_recvauth: badly formed password packet.\n");
fputs(PQerrormsg, stderr);
pqdebug("%s", PQerrormsg);
@ -405,7 +405,7 @@ pg_passwordv0_recvauth(void *arg, PacketLen len, void *pkt)
void
auth_failed(Port *port)
{
char buffer[512];
char buffer[512];
const char *authmethod = "Unknown auth method:";
switch (port->auth_method)
@ -449,9 +449,9 @@ be_recvauth(Port *port)
/*
* Get the authentication method to use for this frontend/database
* combination. Note: a failure return indicates a problem with
* the hba config file, not with the request. hba.c should have
* dropped an error message into the postmaster logfile if it failed.
* combination. Note: a failure return indicates a problem with the
* hba config file, not with the request. hba.c should have dropped
* an error message into the postmaster logfile if it failed.
*/
if (hba_getauthmethod(&port->raddr, port->user, port->database,
@ -470,27 +470,28 @@ be_recvauth(Port *port)
{
/* Handle new style authentication. */
AuthRequest areq = AUTH_REQ_OK;
PacketDoneProc auth_handler = NULL;
AuthRequest areq = AUTH_REQ_OK;
PacketDoneProc auth_handler = NULL;
switch (port->auth_method)
{
case uaReject:
/*
* This could have come from an explicit "reject" entry
* in pg_hba.conf, but more likely it means there was no
* matching entry. Take pity on the poor user and issue
* a helpful error message. NOTE: this is not a security
* breach, because all the info reported here is known
* at the frontend and must be assumed known to bad guys.
* This could have come from an explicit "reject" entry in
* pg_hba.conf, but more likely it means there was no
* matching entry. Take pity on the poor user and issue a
* helpful error message. NOTE: this is not a security
* breach, because all the info reported here is known at
* the frontend and must be assumed known to bad guys.
* We're merely helping out the less clueful good guys.
* NOTE 2: libpq-be.h defines the maximum error message
* length as 99 characters. It probably wouldn't hurt
* anything to increase it, but there might be some
* client out there that will fail. So, be terse.
* anything to increase it, but there might be some client
* out there that will fail. So, be terse.
*/
{
char buffer[512];
char buffer[512];
const char *hostinfo = "localhost";
if (port->raddr.sa.sa_family == AF_INET)

Some files were not shown because too many files have changed in this diff Show More