Fix misspelling.

This commit is contained in:
Bruce Momjian 1999-07-07 09:11:15 +00:00
parent ab1b88fd0d
commit 1391098851
3 changed files with 26 additions and 16 deletions

View File

@ -114,7 +114,7 @@ COMMANDS
* Allow CLUSTER on all tables at once, and improve CLUSTER
* Generate error on CREATE OPERATOR of ~~, ~ and and ~*
* Add SIMILAR TO to allow character classes, 'pg_[a-c]%'
* Auto-destroy sequence on DROP of table with SERIAL
* Auto-destroy sequence on DROP of table with SERIAL(Ryan)
* Allow LOCK TABLE tab1, tab2, tab3 so all tables locked in unison
* Allow INSERT/UPDATE of system-generated oid value for a row
* Allow ESCAPE '\' at the end of LIKE for ANSI compliance
@ -135,7 +135,9 @@ CLIENTS
MISC
* Increase identifier length(NAMEDATALEN) if small performance hit
* Allow row re-use without vacuum, maybe?(Vadim)
* Allow row re-use without vacuum(Vadim)
* Create a background process for each database that runs while
database is idle, finding superceeded rows, gathering stats and vacuuming
* Add UNIQUE capability to non-btree indexes
* Certain indexes will not shrink, i.e. oid indexes with many inserts
* Restore unused oid's on backend exit if no one else has gotten oids
@ -143,7 +145,8 @@ MISC
* Allow WHERE restriction on ctid
* Allow cursors to be DECLAREd/OPENed/CLOSEed outside transactions
* Allow PQrequestCancel() to terminate when in waiting-for-lock state
* Transaction log, so re-do log can be on a separate disk
* Transaction log, so re-do log can be on a separate disk by
logging SQL queries, or before/after row images
* Populate backend status area and write program to dump status data
* Make oid use unsigned int more reliably, pg_atoi()
* Allow subqueries in target list
@ -177,6 +180,7 @@ INDEXES
* Allow LIMIT ability on single-table queries that have no ORDER BY to use
a matching index
* Improve LIMIT processing by using index to limit rows processed
* Have optimizer take LIMIT into account when considering index scans
CACHE
@ -199,6 +203,11 @@ MISC
* Create more system table indexes for faster cache lookups
* Improve Subplan list handling
* Allow Subplans to use efficient joins(hash, merge) with upper variable
* use fmgr_info()/fmgr_faddr() instead of fmgr() calls in high-traffic
places, like GROUP BY, UNIQUE, index processing, etc.
* improve dynamic memory allocation by introducing tuple-context memory
allocation
* add pooled memory allocation where allocations are freed only as a group
SOURCE CODE
@ -227,6 +236,7 @@ Developers who have claimed items are:
* Michael is Michael Meskes <meskes@postgresql.org>
* Oleg is Oleg Bartunov <oleg@sai.msu.su>
* Peter is Peter T Mount <peter@retep.org.uk>
* Ryan is Ryan Bradetich <rbrad@hpb50023.boi.hp.com>
* Stefan Simkovics <ssimkovi@rainbow.studorg.tuwien.ac.at>
* Tatsuo is Tatsuo Ishii <t-ishii@sra.co.jp>
* Tom is Tom Lane <tgl@sss.pgh.pa.us>

View File

@ -2,7 +2,7 @@
* Routines for handling of 'SET var TO',
* 'SHOW var' and 'RESET var' statements.
*
* $Id: variable.c,v 1.21 1999/06/17 15:15:48 momjian Exp $
* $Id: variable.c,v 1.22 1999/07/07 09:11:13 momjian Exp $
*
*/
@ -42,7 +42,7 @@ static bool show_XactIsoLevel(void);
static bool reset_XactIsoLevel(void);
static bool parse_XactIsoLevel(const char *);
extern Cost _cpu_page_wight_;
extern Cost _cpu_page_weight_;
extern Cost _cpu_index_page_wight_;
extern bool _use_geqo_;
extern int32 _use_geqo_rels_;
@ -246,7 +246,7 @@ parse_cost_heap(const char *value)
}
res = float4in((char *) value);
_cpu_page_wight_ = *res;
_cpu_page_weight_ = *res;
return TRUE;
}
@ -255,14 +255,14 @@ static bool
show_cost_heap()
{
elog(NOTICE, "COST_HEAP is %f", _cpu_page_wight_);
elog(NOTICE, "COST_HEAP is %f", _cpu_page_weight_);
return TRUE;
}
static bool
reset_cost_heap()
{
_cpu_page_wight_ = _CPU_PAGE_WEIGHT_;
_cpu_page_weight_ = _CPU_PAGE_WEIGHT_;
return TRUE;
}

View File

@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/costsize.c,v 1.38 1999/05/25 22:41:27 momjian Exp $
* $Header: /cvsroot/pgsql/src/backend/optimizer/path/costsize.c,v 1.39 1999/07/07 09:11:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@ -51,7 +51,7 @@ bool _enable_nestloop_ = true;
bool _enable_mergejoin_ = true;
bool _enable_hashjoin_ = true;
Cost _cpu_page_wight_ = _CPU_PAGE_WEIGHT_;
Cost _cpu_page_weight_ = _CPU_PAGE_WEIGHT_;
Cost _cpu_index_page_wight_ = _CPU_INDEX_PAGE_WEIGHT_;
/*
@ -93,7 +93,7 @@ cost_seqscan(int relid, int relpages, int reltuples)
else
{
temp += relpages;
temp += _cpu_page_wight_ * reltuples;
temp += _cpu_page_weight_ * reltuples;
}
Assert(temp >= 0);
return temp;
@ -159,7 +159,7 @@ cost_index(Oid indexid,
temp += _cpu_index_page_wight_ * selec * indextuples;
/* per heap tuples */
temp += _cpu_page_wight_ * selec * reltuples;
temp += _cpu_page_weight_ * selec * reltuples;
Assert(temp >= 0);
return temp;
@ -213,7 +213,7 @@ cost_sort(List *pathkeys, int tuples, int width)
* could be base_log(tuples, NBuffers), but we are only doing 2-way
* merges
*/
temp += _cpu_page_wight_ * tuples * base_log((double) tuples, 2.0);
temp += _cpu_page_weight_ * tuples * base_log((double) tuples, 2.0);
Assert(temp > 0);
@ -236,7 +236,7 @@ cost_result(int tuples, int width)
Cost temp = 0;
temp = temp + page_size(tuples, width);
temp = temp + _cpu_page_wight_ * tuples;
temp = temp + _cpu_page_weight_ * tuples;
Assert(temp >= 0);
return temp;
}
@ -310,7 +310,7 @@ cost_mergejoin(Cost outercost,
temp += cost_sort(outersortkeys, outersize, outerwidth);
if (innersortkeys) /* do we need to sort? */
temp += cost_sort(innersortkeys, innersize, innerwidth);
temp += _cpu_page_wight_ * (outersize + innersize);
temp += _cpu_page_weight_ * (outersize + innersize);
Assert(temp >= 0);
@ -361,7 +361,7 @@ cost_hashjoin(Cost outercost,
temp += outercost + innercost;
/* cost of computing hash function: must do it once per tuple */
temp += _cpu_page_wight_ * (outersize + innersize);
temp += _cpu_page_weight_ * (outersize + innersize);
/* cost of main-memory hashtable */
temp += (innerpages < NBuffers) ? innerpages : NBuffers;