Fix whitespace issues found by git diff --check, add gitattributes

Set per file type attributes in .gitattributes to fine-tune whitespace
checks.  With the associated cleanups, the tree is now clean for git
This commit is contained in:
Peter Eisentraut 2013-11-10 09:20:52 -05:00
parent dca09ac533
commit 001e114b8d
78 changed files with 303 additions and 288 deletions

34
.gitattributes vendored Normal file
View File

@ -0,0 +1,34 @@
* whitespace=space-before-tab,trailing-space
*.[chly] whitespace=space-before-tab,trailing-space,indent-with-non-tab,tabwidth=4
*.dsl whitespace=space-before-tab,trailing-space,tab-in-indent
*.patch -whitespace
*.pl whitespace=space-before-tab,trailing-space,tabwidth=4
*.po whitespace=space-before-tab,trailing-space,tab-in-indent,-blank-at-eof
*.sgml whitespace=space-before-tab,trailing-space,tab-in-indent,-blank-at-eol
*.x[ms]l whitespace=space-before-tab,trailing-space,tab-in-indent
# Avoid confusing ASCII underlines with leftover merge conflict markers
README conflict-marker-size=32
README.* conflict-marker-size=32
# Certain data files that contain special whitespace, and other special cases
**/data/*.data -whitespace
contrib/tsearch2/sql/tsearch2.sql whitespace=space-before-tab,blank-at-eof,-blank-at-eol
doc/bug.template whitespace=space-before-tab,-blank-at-eof,blank-at-eol
src/backend/catalog/sql_features.txt whitespace=space-before-tab,blank-at-eof,-blank-at-eol
src/backend/tsearch/hunspell_sample.affix whitespace=-blank-at-eof
# Test output files that contain extra whitespace
**/expected/*.out -whitespace
**/output/*.source -whitespace
src/interfaces/ecpg/test/expected/* -whitespace
src/interfaces/libpq/test/expected.out whitespace=-blank-at-eof
# These files are maintained or generated elsewhere. We take them as is.
configure -whitespace
ppport.h -whitespace
src/backend/regex/COPYRIGHT -whitespace
src/backend/regex/re_syntax.n -whitespace
src/backend/snowball/libstemmer/*.c -whitespace
src/backend/utils/mb/Unicode/*-std.txt -whitespace
src/include/snowball/libstemmer/* -whitespace

View File

@ -919,7 +919,7 @@ fi
if test "$with_gssapi" = yes ; then
if test "$PORTNAME" != "win32"; then
AC_SEARCH_LIBS(gss_init_sec_context, [gssapi_krb5 gss 'gssapi -lkrb5 -lcrypto'], [],
[AC_MSG_ERROR([could not find function 'gss_init_sec_context' required for GSSAPI])])
[AC_MSG_ERROR([could not find function 'gss_init_sec_context' required for GSSAPI])])
else
LIBS="$LIBS -lgssapi32"
fi

View File

@ -5,7 +5,7 @@
-- A version of 1.1 was shipped with these objects mistakenly in 9.3.0.
-- Therefore we only add them if we detect that they aren't already there and
-- Therefore we only add them if we detect that they aren't already there and
-- dependent on the extension.
DO LANGUAGE plpgsql
@ -26,7 +26,7 @@ BEGIN
AND x.extname = 'hstore';
IF NOT FOUND
THEN
THEN
CREATE FUNCTION hstore_to_json(hstore)
RETURNS json

View File

@ -341,4 +341,3 @@ insert into test_json_agg values ('rec1','"a key" =>1, b => t, c => null, d=> 12
('rec2','"a key" =>2, b => f, c => "null", d=> -12345, e => 012345.6, f=> -1.234, g=> 0.345e-4');
select json_agg(q) from test_json_agg q;
select json_agg(q) from (select f1, hstore_to_json_loose(f2) as f2 from test_json_agg) q;

View File

@ -80,7 +80,7 @@ else
$outf = ($opt{u}) ? 'distinct( message.mid )' : 'message.mid';
}
my $sql =
"select $outf from "
"select $outf from "
. join(', ', keys %table)
. " where "
. join(' AND ', @where) . ';';

View File

@ -368,8 +368,7 @@ usage(void)
" -j, --jobs=NUM number of threads (default: 1)\n"
" -l, --log write transaction times to log file\n"
" -M, --protocol=simple|extended|prepared\n"
" protocol for submitting queries "
"(default: simple)\n"
" protocol for submitting queries (default: simple)\n"
" -n, --no-vacuum do not run VACUUM before tests\n"
" -N, --skip-some-updates skip updates of pgbench_tellers and pgbench_branches\n"
" -P, --progress=NUM show thread progress report every NUM seconds\n"
@ -377,8 +376,7 @@ usage(void)
" -R, --rate=NUM target rate in transactions per second\n"
" -s, --scale=NUM report this scale factor in output\n"
" -S, --select-only perform SELECT-only transactions\n"
" -t, --transactions number of transactions each client runs "
"(default: 10)\n"
" -t, --transactions number of transactions each client runs (default: 10)\n"
" -T, --time=NUM duration of benchmark test in seconds\n"
" -v, --vacuum-all vacuum all four standard tables before tests\n"
" --aggregate-interval=NUM aggregate data over NUM seconds\n"
@ -933,7 +931,7 @@ top:
* Use inverse transform sampling to randomly generate a delay, such
* that the series of delays will approximate a Poisson distribution
* centered on the throttle_delay time.
*
*
* 10000 implies a 9.2 (-log(1/10000)) to 0.0 (log 1) delay multiplier,
* and results in a 0.055 % target underestimation bias:
*
@ -1211,9 +1209,9 @@ top:
}
/*
* This ensures that a throttling delay is inserted before proceeding
* with sql commands, after the first transaction. The first transaction
* throttling is performed when first entering doCustom.
* This ensures that a throttling delay is inserted before proceeding
* with sql commands, after the first transaction. The first transaction
* throttling is performed when first entering doCustom.
*/
if (trans_needs_throttle) {
trans_needs_throttle = false;
@ -2180,7 +2178,7 @@ printResults(int ttype, int normal_xacts, int nclients,
latency, 0.001 * sqrt(sqlat - 1000000.0 * latency * latency));
}
else
{
{
/* only an average latency computed from the duration is available */
printf("latency average: %.3f ms\n",
1000.0 * duration * nclients / normal_xacts);
@ -2569,7 +2567,7 @@ main(int argc, char **argv)
}
}
/* compute a per thread delay */
/* compute a per thread delay */
throttle_delay *= nthreads;
if (argc > optind)

View File

@ -11,7 +11,7 @@ select pgp_sym_decrypt(pgp_sym_encrypt('Secret.', 'key'), 'key');
-- check whether the defaults are ok
select pgp_sym_decrypt(pgp_sym_encrypt('Secret.', 'key'),
'key', 'expect-cipher-algo=aes128,
'key', 'expect-cipher-algo=aes128,
expect-disable-mdc=0,
expect-sess-key=0,
expect-s2k-mode=3,
@ -25,7 +25,7 @@ select pgp_sym_decrypt(pgp_sym_encrypt('Secret.', 'key'),
-- maybe the expect- stuff simply does not work
select pgp_sym_decrypt(pgp_sym_encrypt('Secret.', 'key'),
'key', 'expect-cipher-algo=bf,
'key', 'expect-cipher-algo=bf,
expect-disable-mdc=1,
expect-sess-key=1,
expect-s2k-mode=0,
@ -56,7 +56,7 @@ select pgp_sym_decrypt_bytea(pgp_sym_encrypt('Text', 'baz'), 'baz');
-- algorithm change
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 'cipher-algo=bf'),
'key', 'expect-cipher-algo=bf');
'key', 'expect-cipher-algo=bf');
pgp_sym_decrypt
-----------------
Secret.
@ -64,7 +64,7 @@ select pgp_sym_decrypt(
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 'cipher-algo=aes'),
'key', 'expect-cipher-algo=aes128');
'key', 'expect-cipher-algo=aes128');
pgp_sym_decrypt
-----------------
Secret.
@ -72,7 +72,7 @@ select pgp_sym_decrypt(
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 'cipher-algo=aes192'),
'key', 'expect-cipher-algo=aes192');
'key', 'expect-cipher-algo=aes192');
pgp_sym_decrypt
-----------------
Secret.
@ -81,7 +81,7 @@ select pgp_sym_decrypt(
-- s2k change
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 's2k-mode=0'),
'key', 'expect-s2k-mode=0');
'key', 'expect-s2k-mode=0');
pgp_sym_decrypt
-----------------
Secret.
@ -89,7 +89,7 @@ select pgp_sym_decrypt(
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 's2k-mode=1'),
'key', 'expect-s2k-mode=1');
'key', 'expect-s2k-mode=1');
pgp_sym_decrypt
-----------------
Secret.
@ -97,7 +97,7 @@ select pgp_sym_decrypt(
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 's2k-mode=3'),
'key', 'expect-s2k-mode=3');
'key', 'expect-s2k-mode=3');
pgp_sym_decrypt
-----------------
Secret.
@ -106,7 +106,7 @@ select pgp_sym_decrypt(
-- s2k digest change
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 's2k-digest-algo=md5'),
'key', 'expect-s2k-digest-algo=md5');
'key', 'expect-s2k-digest-algo=md5');
pgp_sym_decrypt
-----------------
Secret.
@ -114,7 +114,7 @@ select pgp_sym_decrypt(
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 's2k-digest-algo=sha1'),
'key', 'expect-s2k-digest-algo=sha1');
'key', 'expect-s2k-digest-algo=sha1');
pgp_sym_decrypt
-----------------
Secret.
@ -123,7 +123,7 @@ select pgp_sym_decrypt(
-- sess key
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 'sess-key=0'),
'key', 'expect-sess-key=0');
'key', 'expect-sess-key=0');
pgp_sym_decrypt
-----------------
Secret.
@ -131,7 +131,7 @@ select pgp_sym_decrypt(
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 'sess-key=1'),
'key', 'expect-sess-key=1');
'key', 'expect-sess-key=1');
pgp_sym_decrypt
-----------------
Secret.
@ -139,7 +139,7 @@ select pgp_sym_decrypt(
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 'sess-key=1, cipher-algo=bf'),
'key', 'expect-sess-key=1, expect-cipher-algo=bf');
'key', 'expect-sess-key=1, expect-cipher-algo=bf');
pgp_sym_decrypt
-----------------
Secret.
@ -147,7 +147,7 @@ select pgp_sym_decrypt(
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 'sess-key=1, cipher-algo=aes192'),
'key', 'expect-sess-key=1, expect-cipher-algo=aes192');
'key', 'expect-sess-key=1, expect-cipher-algo=aes192');
pgp_sym_decrypt
-----------------
Secret.
@ -155,7 +155,7 @@ select pgp_sym_decrypt(
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 'sess-key=1, cipher-algo=aes256'),
'key', 'expect-sess-key=1, expect-cipher-algo=aes256');
'key', 'expect-sess-key=1, expect-cipher-algo=aes256');
pgp_sym_decrypt
-----------------
Secret.
@ -164,7 +164,7 @@ select pgp_sym_decrypt(
-- no mdc
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 'disable-mdc=1'),
'key', 'expect-disable-mdc=1');
'key', 'expect-disable-mdc=1');
pgp_sym_decrypt
-----------------
Secret.
@ -173,7 +173,7 @@ select pgp_sym_decrypt(
-- crlf
select encode(pgp_sym_decrypt_bytea(
pgp_sym_encrypt(E'1\n2\n3\r\n', 'key', 'convert-crlf=1'),
'key'), 'hex');
'key'), 'hex');
encode
----------------------
310d0a320d0a330d0d0a
@ -182,7 +182,7 @@ select encode(pgp_sym_decrypt_bytea(
-- conversion should be lossless
select encode(digest(pgp_sym_decrypt(
pgp_sym_encrypt(E'\r\n0\n1\r\r\n\n2\r', 'key', 'convert-crlf=1'),
'key', 'convert-crlf=1'), 'sha1'), 'hex') as result,
'key', 'convert-crlf=1'), 'sha1'), 'hex') as result,
encode(digest(E'\r\n0\n1\r\r\n\n2\r', 'sha1'), 'hex') as expect;
result | expect
------------------------------------------+------------------------------------------

View File

@ -8,7 +8,7 @@ select pgp_sym_decrypt(pgp_sym_encrypt('Secret.', 'key'), 'key');
-- check whether the defaults are ok
select pgp_sym_decrypt(pgp_sym_encrypt('Secret.', 'key'),
'key', 'expect-cipher-algo=aes128,
'key', 'expect-cipher-algo=aes128,
expect-disable-mdc=0,
expect-sess-key=0,
expect-s2k-mode=3,
@ -18,7 +18,7 @@ select pgp_sym_decrypt(pgp_sym_encrypt('Secret.', 'key'),
-- maybe the expect- stuff simply does not work
select pgp_sym_decrypt(pgp_sym_encrypt('Secret.', 'key'),
'key', 'expect-cipher-algo=bf,
'key', 'expect-cipher-algo=bf,
expect-disable-mdc=1,
expect-sess-key=1,
expect-s2k-mode=0,
@ -36,62 +36,62 @@ select pgp_sym_decrypt_bytea(pgp_sym_encrypt('Text', 'baz'), 'baz');
-- algorithm change
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 'cipher-algo=bf'),
'key', 'expect-cipher-algo=bf');
'key', 'expect-cipher-algo=bf');
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 'cipher-algo=aes'),
'key', 'expect-cipher-algo=aes128');
'key', 'expect-cipher-algo=aes128');
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 'cipher-algo=aes192'),
'key', 'expect-cipher-algo=aes192');
'key', 'expect-cipher-algo=aes192');
-- s2k change
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 's2k-mode=0'),
'key', 'expect-s2k-mode=0');
'key', 'expect-s2k-mode=0');
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 's2k-mode=1'),
'key', 'expect-s2k-mode=1');
'key', 'expect-s2k-mode=1');
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 's2k-mode=3'),
'key', 'expect-s2k-mode=3');
'key', 'expect-s2k-mode=3');
-- s2k digest change
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 's2k-digest-algo=md5'),
'key', 'expect-s2k-digest-algo=md5');
'key', 'expect-s2k-digest-algo=md5');
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 's2k-digest-algo=sha1'),
'key', 'expect-s2k-digest-algo=sha1');
'key', 'expect-s2k-digest-algo=sha1');
-- sess key
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 'sess-key=0'),
'key', 'expect-sess-key=0');
'key', 'expect-sess-key=0');
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 'sess-key=1'),
'key', 'expect-sess-key=1');
'key', 'expect-sess-key=1');
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 'sess-key=1, cipher-algo=bf'),
'key', 'expect-sess-key=1, expect-cipher-algo=bf');
'key', 'expect-sess-key=1, expect-cipher-algo=bf');
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 'sess-key=1, cipher-algo=aes192'),
'key', 'expect-sess-key=1, expect-cipher-algo=aes192');
'key', 'expect-sess-key=1, expect-cipher-algo=aes192');
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 'sess-key=1, cipher-algo=aes256'),
'key', 'expect-sess-key=1, expect-cipher-algo=aes256');
'key', 'expect-sess-key=1, expect-cipher-algo=aes256');
-- no mdc
select pgp_sym_decrypt(
pgp_sym_encrypt('Secret.', 'key', 'disable-mdc=1'),
'key', 'expect-disable-mdc=1');
'key', 'expect-disable-mdc=1');
-- crlf
select encode(pgp_sym_decrypt_bytea(
pgp_sym_encrypt(E'1\n2\n3\r\n', 'key', 'convert-crlf=1'),
'key'), 'hex');
'key'), 'hex');
-- conversion should be lossless
select encode(digest(pgp_sym_decrypt(
pgp_sym_encrypt(E'\r\n0\n1\r\r\n\n2\r', 'key', 'convert-crlf=1'),
'key', 'convert-crlf=1'), 'sha1'), 'hex') as result,
'key', 'convert-crlf=1'), 'sha1'), 'hex') as result,
encode(digest(E'\r\n0\n1\r\r\n\n2\r', 'sha1'), 'hex') as expect;

View File

@ -793,9 +793,9 @@ CREATE EVENT TRIGGER noddl ON ddl_command_start
<screen>
=# \dy
List of event triggers
Name | Event | Owner | Enabled | Procedure | Tags
Name | Event | Owner | Enabled | Procedure | Tags
-------+-------------------+-------+---------+-----------+------
noddl | ddl_command_start | dim | enabled | noddl |
noddl | ddl_command_start | dim | enabled | noddl |
(1 row)
=# CREATE TABLE foo(id serial);

View File

@ -13469,7 +13469,7 @@ SELECT * FROM unnest2(ARRAY[[1,2],[3,4]]);
<programlisting>
-- set returning function WITH ORDINALITY
SELECT * FROM pg_ls_dir('.') WITH ORDINALITY AS t(ls,n);
ls | n
ls | n
-----------------+----
pg_serial | 1
pg_twophase | 2

View File

@ -439,7 +439,7 @@ pgbench <optional> <replaceable>options</> </optional> <replaceable>dbname</>
transactions go past their original scheduled end time, it is
possible for later ones to catch up again.
</para>
<para>
<para>
When throttling is active, the average and maximum transaction
schedule lag time are reported in ms. This is the delay between
the original scheduled transaction time and the actual transaction

View File

@ -376,7 +376,7 @@ TABLE [ ONLY ] <replaceable class="parameter">table_name</replaceable> [ * ]
For example:
<programlisting>
SELECT * FROM unnest(ARRAY['a','b','c','d','e','f']) WITH ORDINALITY;
unnest | ordinality
unnest | ordinality
--------+----------
a | 1
b | 2

View File

@ -148,7 +148,7 @@ ereport(ERROR,
function and does not return to the caller. If the severity level is
lower than <literal>ERROR</>, <function>ereport</> returns normally.
</para>
<para>
The available auxiliary routines for <function>ereport</> are:
<itemizedlist>

View File

@ -1620,7 +1620,7 @@ SELECT
count(*) AS unfiltered,
count(*) FILTER (WHERE i < 5) AS filtered
FROM generate_series(1,10) AS s(i);
unfiltered | filtered
unfiltered | filtered
------------+----------
10 | 4
(1 row)

View File

@ -3861,7 +3861,7 @@ pg_language_aclmask(Oid lang_oid, Oid roleid,
* relative to the same snapshot that will be used to read the underlying
* data. The caller will actually pass NULL for an instantaneous MVCC
* snapshot, since all we do with the snapshot argument is pass it through
* to systable_beginscan().
* to systable_beginscan().
*/
AclMode
pg_largeobject_aclmask_snapshot(Oid lobj_oid, Oid roleid,

View File

@ -2693,7 +2693,7 @@ getObjectIdentity(const ObjectAddress *object)
break;
case OCLASS_PROC:
appendStringInfoString(&buffer,
appendStringInfoString(&buffer,
format_procedure_qualified(object->objectId));
break;
@ -3288,7 +3288,7 @@ getObjectIdentity(const ObjectAddress *object)
elog(ERROR, "cache lookup failed for event trigger %u",
object->objectId);
trigForm = (Form_pg_event_trigger) GETSTRUCT(tup);
appendStringInfoString(&buffer,
appendStringInfoString(&buffer,
quote_identifier(NameStr(trigForm->evtname)));
ReleaseSysCache(tup);
break;

View File

@ -8815,7 +8815,7 @@ ATExecSetRelOptions(Relation rel, List *defList, AlterTableType operation,
* actually auto-updatable or not.
*/
if (check_option)
{
{
const char *view_updatable_error =
view_query_is_auto_updatable(view_query,
security_barrier, true);
@ -10044,10 +10044,10 @@ relation_mark_replica_identity(Relation rel, char ri_type, Oid indexOid,
*/
pg_class = heap_open(RelationRelationId, RowExclusiveLock);
pg_class_tuple = SearchSysCacheCopy1(RELOID,
ObjectIdGetDatum(RelationGetRelid(rel)));
ObjectIdGetDatum(RelationGetRelid(rel)));
if (!HeapTupleIsValid(pg_class_tuple))
elog(ERROR, "cache lookup failed for relation \"%s\"",
RelationGetRelationName(rel));
RelationGetRelationName(rel));
pg_class_form = (Form_pg_class) GETSTRUCT(pg_class_tuple);
if (pg_class_form->relreplident != ri_type)
{
@ -10115,7 +10115,7 @@ relation_mark_replica_identity(Relation rel, char ri_type, Oid indexOid,
simple_heap_update(pg_index, &pg_index_tuple->t_self, pg_index_tuple);
CatalogUpdateIndexes(pg_index, pg_index_tuple);
InvokeObjectPostAlterHookArg(IndexRelationId, thisIndexOid, 0,
InvalidOid, is_internal);
InvalidOid, is_internal);
}
heap_freetuple(pg_index_tuple);
}
@ -10214,15 +10214,15 @@ ATExecReplicaIdentity(Relation rel, ReplicaIdentityStmt *stmt, LOCKMODE lockmode
/* Of the system columns, only oid is indexable. */
if (attno <= 0 && attno != ObjectIdAttributeNumber)
elog(ERROR, "internal column %u in unique index \"%s\"",
attno, RelationGetRelationName(indexRel));
attno, RelationGetRelationName(indexRel));
attr = rel->rd_att->attrs[attno - 1];
if (!attr->attnotnull)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("index \"%s\" cannot be used as replica identity because column \"%s\" is nullable",
RelationGetRelationName(indexRel),
NameStr(attr->attname))));
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("index \"%s\" cannot be used as replica identity because column \"%s\" is nullable",
RelationGetRelationName(indexRel),
NameStr(attr->attname))));
}
/* This index is suitable for use as a replica identity. Mark it. */

View File

@ -561,4 +561,3 @@ makeFuncCall(List *name, List *args, int location)
n->over = NULL;
return n;
}

View File

@ -116,7 +116,7 @@ static TidScan *make_tidscan(List *qptlist, List *qpqual, Index scanrelid,
List *tidquals);
static FunctionScan *make_functionscan(List *qptlist, List *qpqual,
Index scanrelid, Node *funcexpr, bool ordinality,
List *funccolnames, List *funccoltypes, List *funccoltypmods,
List *funccolnames, List *funccoltypes, List *funccoltypmods,
List *funccolcollations);
static ValuesScan *make_valuesscan(List *qptlist, List *qpqual,
Index scanrelid, List *values_lists);

View File

@ -3974,7 +3974,7 @@ DropFdwStmt: DROP FOREIGN DATA_P WRAPPER name opt_drop_behavior
$$ = (Node *) n;
}
| DROP FOREIGN DATA_P WRAPPER IF_P EXISTS name opt_drop_behavior
{
{
DropStmt *n = makeNode(DropStmt);
n->removeType = OBJECT_FDW;
n->objects = list_make1(list_make1(makeString($7)));
@ -4136,7 +4136,7 @@ DropForeignServerStmt: DROP SERVER name opt_drop_behavior
$$ = (Node *) n;
}
| DROP SERVER IF_P EXISTS name opt_drop_behavior
{
{
DropStmt *n = makeNode(DropStmt);
n->removeType = OBJECT_FOREIGN_SERVER;
n->objects = list_make1(list_make1(makeString($5)));
@ -4874,8 +4874,8 @@ AlterEnumStmt:
;
opt_if_not_exists: IF_P NOT EXISTS { $$ = true; }
| /* empty */ { $$ = false; }
;
| /* empty */ { $$ = false; }
;
/*****************************************************************************
@ -11168,44 +11168,44 @@ func_application: func_name '(' ')'
n->agg_star = TRUE;
$$ = (Node *)n;
}
;
;
/*
* func_expr and its cousin func_expr_windowless is split out from c_expr just
* so that we have classifications for "everything that is a function call or
* looks like one". This isn't very important, but it saves us having to document
* which variants are legal in the backwards-compatible functional-index syntax
* func_expr and its cousin func_expr_windowless is split out from c_expr just
* so that we have classifications for "everything that is a function call or
* looks like one". This isn't very important, but it saves us having to document
* which variants are legal in the backwards-compatible functional-index syntax
* for CREATE INDEX.
* (Note that many of the special SQL functions wouldn't actually make any
* sense as functional index entries, but we ignore that consideration here.)
*/
func_expr: func_application filter_clause over_clause
func_expr: func_application filter_clause over_clause
{
FuncCall *n = (FuncCall*)$1;
FuncCall *n = (FuncCall*)$1;
n->agg_filter = $2;
n->over = $3;
$$ = (Node*)n;
}
}
| func_expr_common_subexpr
{ $$ = $1; }
;
/*
/*
* As func_expr but does not accept WINDOW functions directly (they
* can still be contained in arguments for functions etc.)
* Use this when window expressions are not allowed, so to disambiguate
* Use this when window expressions are not allowed, so to disambiguate
* the grammar. (e.g. in CREATE INDEX)
*/
func_expr_windowless:
func_expr_windowless:
func_application { $$ = $1; }
| func_expr_common_subexpr { $$ = $1; }
| func_expr_common_subexpr { $$ = $1; }
;
/*
* Special expression
* Special expression
*/
func_expr_common_subexpr:
func_expr_common_subexpr:
COLLATION FOR '(' a_expr ')'
{
$$ = (Node *) makeFuncCall(SystemFuncName("pg_collation_for"),
@ -11386,8 +11386,8 @@ func_expr_common_subexpr:
* at the moment they result in the same thing.
*/
$$ = (Node *) makeFuncCall(SystemFuncName(((Value *)llast($5->names))->val.str),
list_make1($3),
@1);
list_make1($3),
@1);
}
| TRIM '(' BOTH trim_list ')'
{
@ -11596,9 +11596,9 @@ window_definition:
;
filter_clause:
FILTER '(' WHERE a_expr ')' { $$ = $4; }
| /*EMPTY*/ { $$ = NULL; }
;
FILTER '(' WHERE a_expr ')' { $$ = $4; }
| /*EMPTY*/ { $$ = NULL; }
;
over_clause: OVER window_specification
{ $$ = $2; }

View File

@ -877,7 +877,7 @@ GetBackgroundWorkerPid(BackgroundWorkerHandle *handle, pid_t *pidp)
/*
* Wait for a background worker to start up.
*
*
* This is like GetBackgroundWorkerPid(), except that if the worker has not
* yet started, we wait for it to do so; thus, BGWH_NOT_YET_STARTED is never
* returned. However, if the postmaster has died, we give up and return

View File

@ -544,7 +544,7 @@ dsm_backend_startup(void)
if (!dsm_control_segment_sane(dsm_control, dsm_control_mapped_size))
{
dsm_impl_op(DSM_OP_DETACH, control_handle, 0,
&dsm_control_impl_private, &control_address,
&dsm_control_impl_private, &control_address,
&dsm_control_mapped_size, WARNING);
ereport(FATAL,
(errcode(ERRCODE_INTERNAL_ERROR),

View File

@ -1786,7 +1786,7 @@ escape_json(StringInfo buf, const char *str)
*
* Returns the type of the outermost JSON value as TEXT. Possible types are
* "object", "array", "string", "number", "boolean", and "null".
*
*
* Performs a single call to json_lex() to get the first token of the supplied
* value. This initial token uniquely determines the value's type. As our
* input must already have been validated by json_in() or json_recv(), the
@ -1796,39 +1796,39 @@ escape_json(StringInfo buf, const char *str)
Datum
json_typeof(PG_FUNCTION_ARGS)
{
text *json = PG_GETARG_TEXT_P(0);
text *json = PG_GETARG_TEXT_P(0);
JsonLexContext *lex = makeJsonLexContext(json, false);
JsonTokenType tok;
char *type;
JsonLexContext *lex = makeJsonLexContext(json, false);
JsonTokenType tok;
char *type;
/* Lex exactly one token from the input and check its type. */
json_lex(lex);
tok = lex_peek(lex);
switch (tok)
{
case JSON_TOKEN_OBJECT_START:
type = "object";
break;
case JSON_TOKEN_ARRAY_START:
type = "array";
break;
case JSON_TOKEN_STRING:
type = "string";
break;
case JSON_TOKEN_NUMBER:
type = "number";
break;
case JSON_TOKEN_TRUE:
case JSON_TOKEN_FALSE:
type = "boolean";
break;
case JSON_TOKEN_NULL:
type = "null";
break;
default:
elog(ERROR, "unexpected json token: %d", tok);
}
/* Lex exactly one token from the input and check its type. */
json_lex(lex);
tok = lex_peek(lex);
switch (tok)
{
case JSON_TOKEN_OBJECT_START:
type = "object";
break;
case JSON_TOKEN_ARRAY_START:
type = "array";
break;
case JSON_TOKEN_STRING:
type = "string";
break;
case JSON_TOKEN_NUMBER:
type = "number";
break;
case JSON_TOKEN_TRUE:
case JSON_TOKEN_FALSE:
type = "boolean";
break;
case JSON_TOKEN_NULL:
type = "null";
break;
default:
elog(ERROR, "unexpected json token: %d", tok);
}
PG_RETURN_TEXT_P(cstring_to_text(type));
PG_RETURN_TEXT_P(cstring_to_text(type));
}

View File

@ -2667,7 +2667,7 @@ RelationBuildLocalRelation(const char *relname,
/* system relations and non-table objects don't have one */
if (!IsSystemNamespace(relnamespace) &&
(relkind == RELKIND_RELATION || relkind == RELKIND_MATVIEW))
(relkind == RELKIND_RELATION || relkind == RELKIND_MATVIEW))
rel->rd_rel->relreplident = REPLICA_IDENTITY_DEFAULT;
else
rel->rd_rel->relreplident = REPLICA_IDENTITY_NOTHING;

View File

@ -2142,7 +2142,6 @@ process_log_prefix_padding(const char *p, int *ppadding)
return NULL;
paddingsign = -1;
}
/* generate an int version of the numerical string */
while (*p >= '0' && *p <= '9')
@ -2169,7 +2168,7 @@ log_line_prefix(StringInfo buf, ErrorData *edata)
/* has counter been reset in current process? */
static int log_my_pid = 0;
int padding;
const char *p;
const char *p;
/*
* This is one of the few places where we'd rather not inherit a static
@ -2284,7 +2283,7 @@ log_line_prefix(StringInfo buf, ErrorData *edata)
if (padding != 0)
{
char strfbuf[128];
snprintf(strfbuf, sizeof(strfbuf) - 1, "%lx.%x",
snprintf(strfbuf, sizeof(strfbuf) - 1, "%lx.%x",
(long) (MyStartTime), MyProcPid);
appendStringInfo(buf, "%*s", padding, strfbuf);
}
@ -2352,13 +2351,13 @@ log_line_prefix(StringInfo buf, ErrorData *edata)
case 'r':
if (MyProcPort && MyProcPort->remote_host)
{
if (padding != 0)
if (padding != 0)
{
if (MyProcPort->remote_port && MyProcPort->remote_port[0] != '\0')
{
/*
/*
* This option is slightly special as the port number
* may be appended onto the end. Here we need to build
* may be appended onto the end. Here we need to build
* 1 string which contains the remote_host and optionally
* the remote_port (if set) so we can properly align the
* string.
@ -2371,7 +2370,6 @@ log_line_prefix(StringInfo buf, ErrorData *edata)
}
else
appendStringInfo(buf, "%*s", padding, MyProcPort->remote_host);
}
else
{
@ -2379,7 +2377,7 @@ log_line_prefix(StringInfo buf, ErrorData *edata)
appendStringInfoString(buf, MyProcPort->remote_host);
if (MyProcPort->remote_port &&
MyProcPort->remote_port[0] != '\0')
appendStringInfo(buf, "(%s)",
appendStringInfo(buf, "(%s)",
MyProcPort->remote_port);
}
@ -2389,7 +2387,7 @@ log_line_prefix(StringInfo buf, ErrorData *edata)
padding > 0 ? padding : -padding);
break;
case 'h':
if (MyProcPort && MyProcPort->remote_host)
if (MyProcPort && MyProcPort->remote_host)
{
if (padding != 0)
appendStringInfo(buf, "%*s", padding, MyProcPort->remote_host);

View File

@ -4250,7 +4250,7 @@ SelectConfigFiles(const char *userDoption, const char *progname)
pg_timezone_abbrev_initialize();
set_default_effective_cache_size();
/*
* Figure out where pg_hba.conf is, and make sure the path is absolute.
*/

View File

@ -370,12 +370,12 @@
# panic
#log_min_error_statement = error # values in order of decreasing detail:
# debug5
# debug5
# debug4
# debug3
# debug2
# debug1
# info
# info
# notice
# warning
# error

View File

@ -13488,7 +13488,7 @@ dumpTableSchema(Archive *fout, TableInfo *tbinfo)
* dump properties we only have ALTER TABLE syntax for
*/
if ((tbinfo->relkind == RELKIND_RELATION || tbinfo->relkind == RELKIND_MATVIEW) &&
tbinfo->relreplident != REPLICA_IDENTITY_DEFAULT)
tbinfo->relreplident != REPLICA_IDENTITY_DEFAULT)
{
if (tbinfo->relreplident == REPLICA_IDENTITY_INDEX)
{
@ -13497,12 +13497,12 @@ dumpTableSchema(Archive *fout, TableInfo *tbinfo)
else if (tbinfo->relreplident == REPLICA_IDENTITY_NOTHING)
{
appendPQExpBuffer(q, "\nALTER TABLE ONLY %s REPLICA IDENTITY NOTHING;\n",
fmtId(tbinfo->dobj.name));
fmtId(tbinfo->dobj.name));
}
else if (tbinfo->relreplident == REPLICA_IDENTITY_FULL)
{
appendPQExpBuffer(q, "\nALTER TABLE ONLY %s REPLICA IDENTITY FULL;\n",
fmtId(tbinfo->dobj.name));
fmtId(tbinfo->dobj.name));
}
}
@ -13681,9 +13681,9 @@ dumpIndex(Archive *fout, IndxInfo *indxinfo)
if (indxinfo->indisreplident)
{
appendPQExpBuffer(q, "\nALTER TABLE ONLY %s REPLICA IDENTITY USING",
fmtId(tbinfo->dobj.name));
fmtId(tbinfo->dobj.name));
appendPQExpBuffer(q, " INDEX %s;\n",
fmtId(indxinfo->dobj.name));
fmtId(indxinfo->dobj.name));
}
/*

View File

@ -1041,20 +1041,21 @@ exec_command(const char *cmd,
if (!opt0)
{
size_t i;
/* list all variables */
static const char *const my_list[] = {
/* list all variables */
int i;
static const char *const my_list[] = {
"border", "columns", "expanded", "fieldsep",
"footer", "format", "linestyle", "null",
"numericlocale", "pager", "recordsep",
"tableattr", "title", "tuples_only",
NULL };
for (i = 0; my_list[i] != NULL; i++) {
printPsetInfo(my_list[i], &pset.popt);
}
NULL
};
success = true;
for (i = 0; my_list[i] != NULL; i++)
printPsetInfo(my_list[i], &pset.popt);
success = true;
}
else
success = do_pset(opt0, opt1, &pset.popt, pset.quiet);
@ -2432,8 +2433,8 @@ do_pset(const char *param, const char *value, printQueryOpt *popt, bool quiet)
return false;
}
if (!quiet)
printPsetInfo(param, &pset.popt);
if (!quiet)
printPsetInfo(param, &pset.popt);
return true;
}

View File

@ -2311,13 +2311,13 @@ describeOneTableDetails(const char *schemaname,
}
if ((tableinfo.relkind == 'r' || tableinfo.relkind == 'm') &&
tableinfo.relreplident != 'd' && tableinfo.relreplident != 'i')
tableinfo.relreplident != 'd' && tableinfo.relreplident != 'i')
{
const char *s = _("Replica Identity");
printfPQExpBuffer(&buf, "%s: %s",
s,
tableinfo.relreplident == 'n' ? "NOTHING" : "FULL");
s,
tableinfo.relreplident == 'n' ? "NOTHING" : "FULL");
printTableAddFooter(&cont, buf.data);
}

View File

@ -1584,7 +1584,7 @@ psql_completion(char *text, int start, int end)
else if (pg_strcasecmp(prev4_wd, "REPLICA") == 0 &&
pg_strcasecmp(prev3_wd, "IDENTITY") == 0 &&
pg_strcasecmp(prev2_wd, "USING") == 0 &&
pg_strcasecmp(prev_wd, "INDEX") == 0)
pg_strcasecmp(prev_wd, "INDEX") == 0)
{
completion_info_charp = prev5_wd;
COMPLETE_WITH_QUERY(Query_for_index_of_table);

View File

@ -1396,8 +1396,8 @@ typedef struct SubqueryScanState
*
* eflags node's capability flags
* ordinal column value for WITH ORDINALITY
* scan_tupdesc scan tuple descriptor
* func_tupdesc function tuple descriptor
* scan_tupdesc scan tuple descriptor
* func_tupdesc function tuple descriptor
* func_slot function result slot, or null
* tuplestorestate private state of tuplestore.c
* funcexpr state for function expression being evaluated

View File

@ -763,7 +763,7 @@ typedef struct RangeTblEntry
* collations. Note that in this case, ORDINALITY is not permitted, so
* there is no extra ordinal column to be allowed for.
*
* Otherwise, those fields are NIL, and the result column types must be
* Otherwise, those fields are NIL, and the result column types must be
* derived from the funcexpr while treating the ordinal column, if
* present, as a special case. (see get_rte_attribute_*)
*/

View File

@ -5,7 +5,7 @@ descriptor statements have the following shortcomings
- input descriptors (USING DESCRIPTOR <name>) are not supported
Reason: to fully support dynamic SQL the frontend/backend communication
should change to recognize input parameters.
Since this is not likely to happen in the near future and you
can cover the same functionality with the existing infrastructure
(using s[n]printf), I'll leave the work to someone else.
should change to recognize input parameters.
Since this is not likely to happen in the near future and you
can cover the same functionality with the existing infrastructure
(using s[n]printf), I'll leave the work to someone else.

View File

@ -178,4 +178,3 @@ if ($verbose)
}
exit $ret;

View File

@ -6,7 +6,7 @@
# Copyright (c) 2007-2013, PostgreSQL Global Development Group
#
# Written by Mike Aubury <mike.aubury@aubit.com>
# Michael Meskes <meskes@postgresql.org>
# Michael Meskes <meskes@postgresql.org>
# Andy Colson <andy@squeakycode.net>
#
# Placed under the same license as PostgreSQL.
@ -617,7 +617,7 @@ sub dump_line
=top
load addons into cache
%addons = {
%addons = {
stmtClosePortalStmt => { 'type' => 'block', 'lines' => [ "{", "if (INFORMIX_MODE)" ..., "}" ] },
stmtViewStmt => { 'type' => 'rule', 'lines' => [ "| ECPGAllocateDescr", ... ] }
}
@ -671,5 +671,3 @@ sub preload_addons
}
}
}

View File

@ -542,10 +542,10 @@ cppline {space}*#([^i][A-Za-z]*|{if}|{ifdef}|{ifndef}|{import})(.*\\{space})*.
else
{
/*
* When we fail to match $...$ to dolqstart, transfer
* the $... part to the output, but put back the final
* $ for rescanning. Consider $delim$...$junk$delim$
*/
* When we fail to match $...$ to dolqstart, transfer
* the $... part to the output, but put back the final
* $ for rescanning. Consider $delim$...$junk$delim$
*/
addlit(yytext, yyleng-1);
yyless(yyleng-1);
}
@ -1262,11 +1262,11 @@ static void
addlitchar(unsigned char ychar)
{
/* enlarge buffer if needed */
if ((literallen+1) >= literalalloc)
{
literalalloc *= 2;
literalbuf = (char *) realloc(literalbuf, literalalloc);
}
if ((literallen+1) >= literalalloc)
{
literalalloc *= 2;
literalbuf = (char *) realloc(literalbuf, literalalloc);
}
/* append new data, add trailing null */
literalbuf[literallen] = ychar;
literallen += 1;

View File

@ -79,7 +79,7 @@ endif
REGRESS_OPTS = --dbname=regress1,connectdb --create-role=connectuser,connectdb $(EXTRA_REGRESS_OPTS)
check: all
./pg_regress $(REGRESS_OPTS) --top-builddir=$(top_builddir) --temp-install=./tmp_check $(pg_regress_locale_flags) $(THREAD) --schedule=$(srcdir)/ecpg_schedule
./pg_regress $(REGRESS_OPTS) --top-builddir=$(top_builddir) --temp-install=./tmp_check $(pg_regress_locale_flags) $(THREAD) --schedule=$(srcdir)/ecpg_schedule
# the same options, but with --listen-on-tcp
checktcp: all

View File

@ -25,7 +25,7 @@
static void test(void) {
/* exec sql begin declare section */
@ -127,7 +127,7 @@ if (sqlca.sqlcode < 0) sqlprint();}
for (i=0; i<4; i++)
printf("item[%d] = %d\n", i, ind[i] ? -1 : item[i]);
printf("item[%d] = %d\n", i, ind[i] ? -1 : item[i]);
/* declare C cursor for select Item1 from T */
#line 35 "autoprep.pgc"

View File

@ -79,7 +79,7 @@ main (void)

View File

@ -113,7 +113,7 @@ if (sqlca.sqlcode < 0) sqlprint();}
#line 30 "fetch.pgc"
while (1) {
{ ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "fetch 1 in C", ECPGt_EOIT,
{ ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "fetch 1 in C", ECPGt_EOIT,
ECPGt_int,&(i),(long)1,(long)1,sizeof(int),
ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
ECPGt_char,(str),(long)25,(long)1,(25)*sizeof(char),
@ -182,7 +182,7 @@ if (sqlca.sqlcode < 0) sqlprint();}
#line 46 "fetch.pgc"
while (1) {
{ ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "fetch 1 in D", ECPGt_EOIT,
{ ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "fetch 1 in D", ECPGt_EOIT,
ECPGt_int,&(i),(long)1,(long)1,sizeof(int),
ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
ECPGt_char,(str),(long)25,(long)1,(25)*sizeof(char),

View File

@ -24,7 +24,7 @@
int main() {
/* exec sql begin declare section */
#line 9 "insupd.pgc"
int i1 [ 3 ] , i2 [ 3 ] , i3 [ 3 ] , i4 ;

View File

@ -25,7 +25,7 @@
int main() {
/* exec sql begin declare section */
#line 10 "parser.pgc"
int item [ 3 ] , ind [ 3 ] , i ;
@ -81,7 +81,7 @@ if (sqlca.sqlcode < 0) sqlprint();}
for (i=0; i<3; i++)
printf("item[%d] = %d\n", i, ind[i] ? -1 : item[i]);
printf("item[%d] = %d\n", i, ind[i] ? -1 : item[i]);
{ ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "alter table T alter Item1 type bigint", ECPGt_EOIT, ECPGt_EORT);
#line 31 "parser.pgc"

View File

@ -178,7 +178,7 @@ if (sqlca.sqlcode < 0) sqlprint();}
while (true)
{
{ ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "fetch C", ECPGt_EOIT,
{ ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_normal, "fetch C", ECPGt_EOIT,
ECPGt_int,&(i),(long)1,(long)1,sizeof(int),
ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
ECPGt_char,(var),(long)25,(long)1,(25)*sizeof(char),

View File

@ -7,7 +7,7 @@ EXEC SQL INCLUDE ../regression;
static void test(void) {
EXEC SQL BEGIN DECLARE SECTION;
int item[4], ind[4], i = 1;
int item[4], ind[4], i = 1;
int item1, ind1;
char sqlstr[64] = "SELECT item2 FROM T ORDER BY item2 NULLS LAST";
EXEC SQL END DECLARE SECTION;
@ -30,7 +30,7 @@ static void test(void) {
EXEC SQL SELECT Item2 INTO :item:ind FROM T ORDER BY Item2 NULLS LAST;
for (i=0; i<4; i++)
printf("item[%d] = %d\n", i, ind[i] ? -1 : item[i]);
printf("item[%d] = %d\n", i, ind[i] ? -1 : item[i]);
EXEC SQL DECLARE C CURSOR FOR SELECT Item1 FROM T;

View File

@ -31,7 +31,7 @@ main (void)
c ptr = NULL;
struct varchar
{
int len;
int len;
char text[10];
} vc;
EXEC SQL END DECLARE SECTION;

View File

@ -113,7 +113,7 @@ main ()
break;
case SQL3_DATE_TIME_TIMESTAMP:
exec sql get descriptor MYDESC value :INDEX
:DATETIME_INTERVAL_CODE = datetime_interval_code;
:DATETIME_INTERVAL_CODE = datetime_interval_code;
switch (DATETIME_INTERVAL_CODE)
{
case SQL3_DDT_DATE:
@ -174,7 +174,7 @@ main ()
break;
case SQL3_DATE_TIME_TIMESTAMP:
exec sql get descriptor MYDESC value :INDEX
:DATETIME_INTERVAL_CODE = datetime_interval_code,
:DATETIME_INTERVAL_CODE = datetime_interval_code,
:STRINGVAR = data;
printf ("%d \"%s\"\n", DATETIME_INTERVAL_CODE, STRINGVAR);
break;

View File

@ -29,7 +29,7 @@ int main() {
EXEC SQL WHENEVER NOT FOUND DO BREAK;
while (1) {
EXEC SQL FETCH 1 IN C INTO :i, :str;
EXEC SQL FETCH 1 IN C INTO :i, :str;
printf("%d: %s\n", i, str);
}
@ -45,7 +45,7 @@ int main() {
EXEC SQL WHENEVER NOT FOUND DO BREAK;
while (1) {
EXEC SQL FETCH 1 IN D INTO :i, :str;
EXEC SQL FETCH 1 IN D INTO :i, :str;
printf("%d: %s\n", i, str);
}
EXEC SQL CLOSE D;

View File

@ -6,7 +6,7 @@ EXEC SQL INCLUDE ../regression;
int main() {
EXEC SQL BEGIN DECLARE SECTION;
int i1[3], i2[3], i3[3], i4;
int i1[3], i2[3], i3[3], i4;
EXEC SQL END DECLARE SECTION;
ECPGdebug(1, stderr);

View File

@ -7,7 +7,7 @@ EXEC SQL INCLUDE ../regression;
int main() {
EXEC SQL BEGIN DECLARE SECTION;
int item[3], ind[3], i;
int item[3], ind[3], i;
EXEC SQL END DECLARE SECTION;
ECPGdebug(1, stderr);
@ -19,14 +19,14 @@ int main() {
EXEC SQL CREATE TABLE T ( Item1 int, Item2 int );
EXEC SQL INSERT INTO t
SELECT 1,nullif(y-1,0)
EXEC SQL INSERT INTO t
SELECT 1,nullif(y-1,0)
FROM generate_series(1,3) WITH ORDINALITY AS series(x,y);
EXEC SQL SELECT Item2 INTO :item:ind FROM T ORDER BY Item2 NULLS LAST;
for (i=0; i<3; i++)
printf("item[%d] = %d\n", i, ind[i] ? -1 : item[i]);
printf("item[%d] = %d\n", i, ind[i] ? -1 : item[i]);
EXEC SQL ALTER TABLE T ALTER Item1 TYPE bigint;
EXEC SQL ALTER TABLE T ALTER COLUMN Item2 SET DATA TYPE smallint;

View File

@ -48,7 +48,7 @@ int main() {
while (true)
{
EXEC SQL FETCH C INTO :i, :var;
EXEC SQL FETCH C INTO :i, :var;
printf("value: %d %s\n", i, var);
}

View File

@ -72,7 +72,7 @@ CREATE OR REPLACE FUNCTION plperl_concat(TEXT[]) RETURNS TEXT AS $$
my $array_arg = shift;
my $result = "";
my @arrays;
push @arrays, @$array_arg;
while (@arrays > 0) {
my $el = shift @arrays;
@ -107,7 +107,7 @@ CREATE TYPE foo AS (bar INTEGER, baz TEXT);
CREATE OR REPLACE FUNCTION plperl_array_of_rows(foo[]) RETURNS TEXT AS $$
my $array_arg = shift;
my $result = "";
for my $row_ref (@$array_arg) {
die "not a hash reference" unless (ref $row_ref eq "HASH");
$result .= $row_ref->{bar}." items of ".$row_ref->{baz}.";";
@ -125,7 +125,7 @@ CREATE TYPE rowfoo AS (bar INTEGER, baz INTEGER[]);
CREATE OR REPLACE FUNCTION plperl_sum_row_elements(rowfoo) RETURNS TEXT AS $$
my $row_ref = shift;
my $result;
if (ref $row_ref ne 'HASH') {
$result = 0;
}
@ -152,7 +152,7 @@ CREATE TYPE rowbar AS (foo rowfoo[]);
CREATE OR REPLACE FUNCTION plperl_sum_array_of_rows(rowbar) RETURNS TEXT AS $$
my $rowfoo_ref = shift;
my $result = 0;
if (ref $rowfoo_ref eq 'HASH') {
my $row_array_ref = $rowfoo_ref->{foo};
if (is_array_ref($row_array_ref)) {

View File

@ -46,7 +46,7 @@ CREATE OR REPLACE FUNCTION plperl_concat(TEXT[]) RETURNS TEXT AS $$
my $array_arg = shift;
my $result = "";
my @arrays;
push @arrays, @$array_arg;
while (@arrays > 0) {
my $el = shift @arrays;
@ -68,7 +68,7 @@ CREATE TYPE foo AS (bar INTEGER, baz TEXT);
CREATE OR REPLACE FUNCTION plperl_array_of_rows(foo[]) RETURNS TEXT AS $$
my $array_arg = shift;
my $result = "";
for my $row_ref (@$array_arg) {
die "not a hash reference" unless (ref $row_ref eq "HASH");
$result .= $row_ref->{bar}." items of ".$row_ref->{baz}.";";
@ -84,7 +84,7 @@ CREATE TYPE rowfoo AS (bar INTEGER, baz INTEGER[]);
CREATE OR REPLACE FUNCTION plperl_sum_row_elements(rowfoo) RETURNS TEXT AS $$
my $row_ref = shift;
my $result;
if (ref $row_ref ne 'HASH') {
$result = 0;
}
@ -109,7 +109,7 @@ CREATE TYPE rowbar AS (foo rowfoo[]);
CREATE OR REPLACE FUNCTION plperl_sum_array_of_rows(rowbar) RETURNS TEXT AS $$
my $rowfoo_ref = shift;
my $result = 0;
if (ref $rowfoo_ref eq 'HASH') {
my $row_array_ref = $rowfoo_ref->{foo};
if (is_array_ref($row_array_ref)) {

View File

@ -222,7 +222,7 @@ static Portal exec_dynquery_with_params(PLpgSQL_execstate *estate,
const char *portalname, int cursorOptions);
static char *format_expr_params(PLpgSQL_execstate *estate,
const PLpgSQL_expr *expr);
const PLpgSQL_expr *expr);
static char *format_preparedparamsdata(PLpgSQL_execstate *estate,
const PreparedParamsData *ppd);
@ -3407,8 +3407,7 @@ exec_stmt_execsql(PLpgSQL_execstate *estate,
ereport(ERROR,
(errcode(ERRCODE_NO_DATA_FOUND),
errmsg("query returned no rows"),
errdetail ?
errdetail_internal("parameters: %s", errdetail) : 0));
errdetail ? errdetail_internal("parameters: %s", errdetail) : 0));
}
/* set the target to NULL(s) */
exec_move_row(estate, rec, row, NULL, tuptab->tupdesc);
@ -3427,8 +3426,7 @@ exec_stmt_execsql(PLpgSQL_execstate *estate,
ereport(ERROR,
(errcode(ERRCODE_TOO_MANY_ROWS),
errmsg("query returned more than one row"),
errdetail ?
errdetail_internal("parameters: %s", errdetail) : 0));
errdetail ? errdetail_internal("parameters: %s", errdetail) : 0));
}
/* Put the first result row into the target */
exec_move_row(estate, rec, row, tuptab->vals[0], tuptab->tupdesc);
@ -3601,8 +3599,7 @@ exec_stmt_dynexecute(PLpgSQL_execstate *estate,
ereport(ERROR,
(errcode(ERRCODE_NO_DATA_FOUND),
errmsg("query returned no rows"),
errdetail ?
errdetail_internal("parameters: %s", errdetail) : 0));
errdetail ? errdetail_internal("parameters: %s", errdetail) : 0));
}
/* set the target to NULL(s) */
exec_move_row(estate, rec, row, NULL, tuptab->tupdesc);
@ -3621,8 +3618,7 @@ exec_stmt_dynexecute(PLpgSQL_execstate *estate,
ereport(ERROR,
(errcode(ERRCODE_TOO_MANY_ROWS),
errmsg("query returned more than one row"),
errdetail ?
errdetail_internal("parameters: %s", errdetail) : 0));
errdetail ? errdetail_internal("parameters: %s", errdetail) : 0));
}
/* Put the first result row into the target */

View File

@ -185,7 +185,7 @@ static List *read_raise_options(void);
%type <forvariable> for_variable
%type <stmt> for_control
%type <str> any_identifier opt_block_label opt_label option_value
%type <str> any_identifier opt_block_label opt_label option_value
%type <list> proc_sect proc_stmts stmt_elsifs stmt_else
%type <loop_body> loop_body

View File

@ -38,7 +38,7 @@ create function check_pkey1_exists(int4, bpchar) returns bool as E'
set GD(plan) [spi_prepare \\
"select 1 from T_pkey1 \\
where key1 = \\$1 and key2 = \\$2" \\
{int4 bpchar}]
{int4 bpchar}]
}
set n [spi_execp -count 1 $GD(plan) [list $1 $2]]
@ -55,8 +55,8 @@ CREATE VIEW trigger_test_view AS SELECT * FROM trigger_test;
CREATE FUNCTION trigger_data() returns trigger language pltcl as $_$
if { [info exists TG_relid] } {
set TG_relid "bogus:12345"
}
set TG_relid "bogus:12345"
}
set dnames [info locals {[a-zA-Z]*} ]
@ -72,10 +72,10 @@ CREATE FUNCTION trigger_data() returns trigger language pltcl as $_$
set str "$str$akey: $val"
}
set str "$str}"
elog NOTICE "$key: $str"
elog NOTICE "$key: $str"
} else {
set val [eval list "\$$key" ]
elog NOTICE "$key: $val"
elog NOTICE "$key: $val"
}
}

View File

@ -109,8 +109,8 @@ set conn [eval pg_connect $dbname $options]
if {$i == $argc} {
pg_select $conn "select distinct modname from pltcl_modules \
order by modname" \
MOD {
order by modname" \
MOD {
listmodule $conn $MOD(modname)
}
} else {

View File

@ -84,7 +84,7 @@ proc __PLTcl_loadmod_check_table {conn tabname expnames exptypes} {
set found 0
pg_select $conn "select C.relname, A.attname, A.attnum, T.typname \
from pg_catalog.pg_class C, pg_catalog.pg_attribute A, pg_catalog.pg_type T \
from pg_catalog.pg_class C, pg_catalog.pg_attribute A, pg_catalog.pg_type T \
where C.relname = '$tabname' \
and A.attrelid = C.oid \
and A.attnum > 0 \
@ -449,7 +449,7 @@ proc __PLTcl_loadmod_load_modules {conn} {
set srctext [string range $srctext 4000 end]
pg_result [ \
pg_exec $conn "insert into pltcl_modules values ( \
pg_exec $conn "insert into pltcl_modules values ( \
'$xname', $i, '$xpart')" \
] -clear
incr i

View File

@ -43,7 +43,7 @@ create function check_pkey1_exists(int4, bpchar) returns bool as E'
set GD(plan) [spi_prepare \\
"select 1 from T_pkey1 \\
where key1 = \\$1 and key2 = \\$2" \\
{int4 bpchar}]
{int4 bpchar}]
}
set n [spi_execp -count 1 $GD(plan) [list $1 $2]]
@ -65,8 +65,8 @@ CREATE VIEW trigger_test_view AS SELECT * FROM trigger_test;
CREATE FUNCTION trigger_data() returns trigger language pltcl as $_$
if { [info exists TG_relid] } {
set TG_relid "bogus:12345"
}
set TG_relid "bogus:12345"
}
set dnames [info locals {[a-zA-Z]*} ]
@ -82,10 +82,10 @@ CREATE FUNCTION trigger_data() returns trigger language pltcl as $_$
set str "$str$akey: $val"
}
set str "$str}"
elog NOTICE "$key: $str"
elog NOTICE "$key: $str"
} else {
set val [eval list "\$$key" ]
elog NOTICE "$key: $val"
elog NOTICE "$key: $val"
}
}

View File

@ -442,7 +442,7 @@ ERROR: hash procedures must have one argument
DROP OPERATOR FAMILY alt_opf15 USING hash;
ERROR: current transaction is aborted, commands ignored until end of transaction block
ROLLBACK;
-- Should fail. In gist throw an error when giving different data types for function argument
-- Should fail. In gist throw an error when giving different data types for function argument
-- without defining left / right type in ALTER OPERATOR FAMILY ... ADD FUNCTION
CREATE OPERATOR FAMILY alt_opf16 USING gist;
ALTER OPERATOR FAMILY alt_opf16 USING gist ADD FUNCTION 1 btint42cmp(int4, int2);

View File

@ -964,16 +964,16 @@ select json '{ "a": "null \u0000 escape" }' ->> 'a' as not_unescaped;
--json_typeof() function
select value, json_typeof(value)
from (values (json '123.4'),
(json '-1'),
(json '"foo"'),
(json 'true'),
(json 'false'),
from (values (json '123.4'),
(json '-1'),
(json '"foo"'),
(json 'true'),
(json 'false'),
(json 'null'),
(json '[1, 2, 3]'),
(json '[]'),
(json '{"x":"foo", "y":123}'),
(json '{}'),
(json '[1, 2, 3]'),
(json '[]'),
(json '{"x":"foo", "y":123}'),
(json '{}'),
(NULL::json))
as data(value);
value | json_typeof

View File

@ -960,16 +960,16 @@ select json '{ "a": "null \u0000 escape" }' ->> 'a' as not_unescaped;
--json_typeof() function
select value, json_typeof(value)
from (values (json '123.4'),
(json '-1'),
(json '"foo"'),
(json 'true'),
(json 'false'),
from (values (json '123.4'),
(json '-1'),
(json '"foo"'),
(json 'true'),
(json 'false'),
(json 'null'),
(json '[1, 2, 3]'),
(json '[]'),
(json '{"x":"foo", "y":123}'),
(json '{}'),
(json '[1, 2, 3]'),
(json '[]'),
(json '{"x":"foo", "y":123}'),
(json '{}'),
(NULL::json))
as data(value);
value | json_typeof

View File

@ -2604,7 +2604,7 @@ create function excpt_test2() returns void as $$
begin
begin
begin
raise notice '% %', sqlstate, sqlerrm;
raise notice '% %', sqlstate, sqlerrm;
end;
end;
end; $$ language plpgsql;
@ -2618,7 +2618,7 @@ CONTEXT: PL/pgSQL function excpt_test2() line 5 at RAISE
create function excpt_test3() returns void as $$
begin
begin
raise exception 'user exception';
raise exception 'user exception';
exception when others then
raise notice 'caught exception % %', sqlstate, sqlerrm;
begin

View File

@ -1280,7 +1280,7 @@ INSERT INTO credit_card
(103, '9801-2345-6789-0123', 2000);
INSERT INTO credit_usage
VALUES (101, '2011-09-15', 120),
(101, '2011-10-05', 90),
(101, '2011-10-05', 90),
(101, '2011-10-18', 110),
(101, '2011-10-21', 200),
(101, '2011-11-10', 80),

View File

@ -1280,7 +1280,7 @@ INSERT INTO credit_card
(103, '9801-2345-6789-0123', 2000);
INSERT INTO credit_usage
VALUES (101, '2011-09-15', 120),
(101, '2011-10-05', 90),
(101, '2011-10-05', 90),
(101, '2011-10-18', 110),
(101, '2011-10-21', 200),
(101, '2011-11-10', 80),

View File

@ -2,12 +2,12 @@
-- creating test tables
CREATE TABLE guid1
(
guid_field UUID,
guid_field UUID,
text_field TEXT DEFAULT(now())
);
CREATE TABLE guid2
(
guid_field UUID,
guid_field UUID,
text_field TEXT DEFAULT(now())
);
-- inserting invalid data tests

View File

@ -382,7 +382,7 @@ ALTER OPERATOR FAMILY alt_opf15 USING hash ADD FUNCTION 1 fn_opf15(int4, int2);
DROP OPERATOR FAMILY alt_opf15 USING hash;
ROLLBACK;
-- Should fail. In gist throw an error when giving different data types for function argument
-- Should fail. In gist throw an error when giving different data types for function argument
-- without defining left / right type in ALTER OPERATOR FAMILY ... ADD FUNCTION
CREATE OPERATOR FAMILY alt_opf16 USING gist;
ALTER OPERATOR FAMILY alt_opf16 USING gist ADD FUNCTION 1 btint42cmp(int4, int2);

View File

@ -313,15 +313,15 @@ select json '{ "a": "null \u0000 escape" }' ->> 'a' as not_unescaped;
--json_typeof() function
select value, json_typeof(value)
from (values (json '123.4'),
(json '-1'),
(json '"foo"'),
(json 'true'),
(json 'false'),
from (values (json '123.4'),
(json '-1'),
(json '"foo"'),
(json 'true'),
(json 'false'),
(json 'null'),
(json '[1, 2, 3]'),
(json '[]'),
(json '{"x":"foo", "y":123}'),
(json '{}'),
(json '[1, 2, 3]'),
(json '[]'),
(json '{"x":"foo", "y":123}'),
(json '{}'),
(NULL::json))
as data(value);

View File

@ -2215,7 +2215,7 @@ create function excpt_test2() returns void as $$
begin
begin
begin
raise notice '% %', sqlstate, sqlerrm;
raise notice '% %', sqlstate, sqlerrm;
end;
end;
end; $$ language plpgsql;
@ -2225,7 +2225,7 @@ select excpt_test2();
create function excpt_test3() returns void as $$
begin
begin
raise exception 'user exception';
raise exception 'user exception';
exception when others then
raise notice 'caught exception % %', sqlstate, sqlerrm;
begin
@ -4084,4 +4084,3 @@ select outer_outer_func(20);
drop function outer_outer_func(int);
drop function outer_func(int);
drop function inner_func(int);

View File

@ -47,7 +47,7 @@ INSERT INTO credit_card
(103, '9801-2345-6789-0123', 2000);
INSERT INTO credit_usage
VALUES (101, '2011-09-15', 120),
(101, '2011-10-05', 90),
(101, '2011-10-05', 90),
(101, '2011-10-18', 110),
(101, '2011-10-21', 200),
(101, '2011-11-10', 80),

View File

@ -2,12 +2,12 @@
-- creating test tables
CREATE TABLE guid1
(
guid_field UUID,
guid_field UUID,
text_field TEXT DEFAULT(now())
);
CREATE TABLE guid2
(
guid_field UUID,
guid_field UUID,
text_field TEXT DEFAULT(now())
);

View File

@ -60,4 +60,3 @@ WDT 32400 D # West Australian Daylight-Saving Time (not in zic)
WST 28800 # Western Standard Time (Australia)
# (Antarctica/Casey)
# (Australia/Perth)

View File

@ -729,4 +729,3 @@ WAKT 43200 # Wake Time
WFT 43200 # Wallis and Futuna Time
# (Pacific/Wallis)
YAPT 36000 # Yap Time (Micronesia) (not in zic)

View File

@ -32,4 +32,3 @@ UTC 0 # Coordinated Universal Time
# (Etc/UTC)
Z 0 # Zulu
ZULU 0 # Zulu

View File

@ -89,7 +89,7 @@ Starting a New Development Cycle
Creating Back-Branch Release Notes
==================================
* Run src/tools/git_changelog to generate a list of relevant commits.
* Run src/tools/git_changelog to generate a list of relevant commits.
You can also run 'git log' in each branch. Be sure to use the --since
branch tag and not the release date, as commits could have been done
between branch stamping and the release date.
@ -99,9 +99,9 @@ Creating Back-Branch Release Notes
should include more small change details because testing is limited.
* Copy this into older branches' release-N.N.sgml files, then remove
items that do not apply based on commit logs for that branch.
items that do not apply based on commit logs for that branch.
* Add any older branch commits not in the newest branch. This can be
* Add any older branch commits not in the newest branch. This can be
accomplished by diff'ing the newest and older branch commit logs and
looking for lines that only appear in the older branch, e.g.:

View File

@ -65,4 +65,3 @@ print
"Manually update doc/src/sgml/legal.sgml and src/interfaces/libpq/libpq.rc.in too.\n";
print
"Also update ./COPYRIGHT and doc/src/sgml/legal.sgml in all back branches.\n";

View File

@ -92,4 +92,3 @@ else
{
exit $? >> 8;
}

View File

@ -42,4 +42,3 @@ case only these files will be changed, and nothing else will be touched. If the
first non-option argument is not a .c or .h file, it is treated as the name
of a typedefs file for legacy reasons, but this use is deprecated - use the
--typedefs option instead.