Convert documentation to DocBook XML

Since some preparation work had already been done, the only source
changes left were changing empty-element tags like <xref linkend="foo">
to <xref linkend="foo"/>, and changing the DOCTYPE.

The source files are still named *.sgml, but they are actually XML files
now.  Renaming could be considered later.

In the build system, the intermediate step to convert from SGML to XML
is removed.  Everything is build straight from the source files again.
The OpenSP (or the old SP) package is no longer needed.

The documentation toolchain instructions are updated and are much
simpler now.

Peter Eisentraut, Alexander Lakhin, Jürgen Purtz
This commit is contained in:
Peter Eisentraut 2017-11-23 09:39:47 -05:00
parent 2f8d6369e6
commit 3c49c6facb
346 changed files with 4260 additions and 4588 deletions

View File

@ -1,18 +1,18 @@
# config/docbook.m4
# PGAC_PROG_NSGMLS
# ----------------
AC_DEFUN([PGAC_PROG_NSGMLS],
[PGAC_PATH_PROGS(NSGMLS, [onsgmls nsgmls])])
# PGAC_PATH_XMLLINT
# -----------------
AC_DEFUN([PGAC_PATH_XMLLINT],
[PGAC_PATH_PROGS(XMLLINT, xmllint)])
# PGAC_CHECK_DOCBOOK(VERSION)
# ---------------------------
AC_DEFUN([PGAC_CHECK_DOCBOOK],
[AC_REQUIRE([PGAC_PROG_NSGMLS])
AC_CACHE_CHECK([for DocBook V$1], [pgac_cv_check_docbook],
[cat >conftest.sgml <<EOF
<!doctype book PUBLIC "-//OASIS//DTD DocBook V$1//EN">
[AC_REQUIRE([PGAC_PATH_XMLLINT])
AC_CACHE_CHECK([for DocBook XML V$1], [pgac_cv_check_docbook],
[cat >conftest.xml <<EOF
<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V$1//EN" "http://www.oasis-open.org/docbook/xml/$1/docbookx.dtd">
<book>
<title>test</title>
<chapter>
@ -27,13 +27,13 @@ EOF
pgac_cv_check_docbook=no
if test -n "$NSGMLS"; then
$NSGMLS -s conftest.sgml 1>&AS_MESSAGE_LOG_FD 2>&1
if test -n "$XMLLINT"; then
$XMLLINT --noout --valid conftest.xml 1>&AS_MESSAGE_LOG_FD 2>&1
if test $? -eq 0; then
pgac_cv_check_docbook=yes
fi
fi
rm -f conftest.sgml])
rm -f conftest.xml])
have_docbook=$pgac_cv_check_docbook
AC_SUBST([have_docbook])

158
configure vendored
View File

@ -630,12 +630,10 @@ vpath_build
PG_VERSION_NUM
PROVE
FOP
OSX
XSLTPROC
XMLLINT
DBTOEPUB
have_docbook
NSGMLS
XMLLINT
TCL_SHLIB_LD_LIBS
TCL_SHARED_BUILD
TCL_LIB_SPEC
@ -16132,19 +16130,19 @@ fi
#
# Check for DocBook and tools
#
if test -z "$NSGMLS"; then
for ac_prog in onsgmls nsgmls
if test -z "$XMLLINT"; then
for ac_prog in xmllint
do
# Extract the first word of "$ac_prog", so it can be a program name with args.
set dummy $ac_prog; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
if ${ac_cv_path_NSGMLS+:} false; then :
if ${ac_cv_path_XMLLINT+:} false; then :
$as_echo_n "(cached) " >&6
else
case $NSGMLS in
case $XMLLINT in
[\\/]* | ?:[\\/]*)
ac_cv_path_NSGMLS="$NSGMLS" # Let the user override the test with a path.
ac_cv_path_XMLLINT="$XMLLINT" # Let the user override the test with a path.
;;
*)
as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
@ -16154,7 +16152,7 @@ do
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_path_NSGMLS="$as_dir/$ac_word$ac_exec_ext"
ac_cv_path_XMLLINT="$as_dir/$ac_word$ac_exec_ext"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
fi
@ -16165,35 +16163,35 @@ IFS=$as_save_IFS
;;
esac
fi
NSGMLS=$ac_cv_path_NSGMLS
if test -n "$NSGMLS"; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $NSGMLS" >&5
$as_echo "$NSGMLS" >&6; }
XMLLINT=$ac_cv_path_XMLLINT
if test -n "$XMLLINT"; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $XMLLINT" >&5
$as_echo "$XMLLINT" >&6; }
else
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
$as_echo "no" >&6; }
fi
test -n "$NSGMLS" && break
test -n "$XMLLINT" && break
done
else
# Report the value of NSGMLS in configure's output in all cases.
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for NSGMLS" >&5
$as_echo_n "checking for NSGMLS... " >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $NSGMLS" >&5
$as_echo "$NSGMLS" >&6; }
# Report the value of XMLLINT in configure's output in all cases.
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for XMLLINT" >&5
$as_echo_n "checking for XMLLINT... " >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $XMLLINT" >&5
$as_echo "$XMLLINT" >&6; }
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for DocBook V4.2" >&5
$as_echo_n "checking for DocBook V4.2... " >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for DocBook XML V4.2" >&5
$as_echo_n "checking for DocBook XML V4.2... " >&6; }
if ${pgac_cv_check_docbook+:} false; then :
$as_echo_n "(cached) " >&6
else
cat >conftest.sgml <<EOF
<!doctype book PUBLIC "-//OASIS//DTD DocBook V4.2//EN">
cat >conftest.xml <<EOF
<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN" "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
<book>
<title>test</title>
<chapter>
@ -16208,13 +16206,13 @@ EOF
pgac_cv_check_docbook=no
if test -n "$NSGMLS"; then
$NSGMLS -s conftest.sgml 1>&5 2>&1
if test -n "$XMLLINT"; then
$XMLLINT --noout --valid conftest.xml 1>&5 2>&1
if test $? -eq 0; then
pgac_cv_check_docbook=yes
fi
fi
rm -f conftest.sgml
rm -f conftest.xml
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_check_docbook" >&5
$as_echo "$pgac_cv_check_docbook" >&6; }
@ -16276,60 +16274,6 @@ $as_echo_n "checking for DBTOEPUB... " >&6; }
$as_echo "$DBTOEPUB" >&6; }
fi
if test -z "$XMLLINT"; then
for ac_prog in xmllint
do
# Extract the first word of "$ac_prog", so it can be a program name with args.
set dummy $ac_prog; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
if ${ac_cv_path_XMLLINT+:} false; then :
$as_echo_n "(cached) " >&6
else
case $XMLLINT in
[\\/]* | ?:[\\/]*)
ac_cv_path_XMLLINT="$XMLLINT" # Let the user override the test with a path.
;;
*)
as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
for as_dir in $PATH
do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_path_XMLLINT="$as_dir/$ac_word$ac_exec_ext"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
fi
done
done
IFS=$as_save_IFS
;;
esac
fi
XMLLINT=$ac_cv_path_XMLLINT
if test -n "$XMLLINT"; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $XMLLINT" >&5
$as_echo "$XMLLINT" >&6; }
else
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
$as_echo "no" >&6; }
fi
test -n "$XMLLINT" && break
done
else
# Report the value of XMLLINT in configure's output in all cases.
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for XMLLINT" >&5
$as_echo_n "checking for XMLLINT... " >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $XMLLINT" >&5
$as_echo "$XMLLINT" >&6; }
fi
if test -z "$XSLTPROC"; then
for ac_prog in xsltproc
do
@ -16384,60 +16328,6 @@ $as_echo_n "checking for XSLTPROC... " >&6; }
$as_echo "$XSLTPROC" >&6; }
fi
if test -z "$OSX"; then
for ac_prog in osx sgml2xml sx
do
# Extract the first word of "$ac_prog", so it can be a program name with args.
set dummy $ac_prog; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
if ${ac_cv_path_OSX+:} false; then :
$as_echo_n "(cached) " >&6
else
case $OSX in
[\\/]* | ?:[\\/]*)
ac_cv_path_OSX="$OSX" # Let the user override the test with a path.
;;
*)
as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
for as_dir in $PATH
do
IFS=$as_save_IFS
test -z "$as_dir" && as_dir=.
for ac_exec_ext in '' $ac_executable_extensions; do
if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
ac_cv_path_OSX="$as_dir/$ac_word$ac_exec_ext"
$as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
break 2
fi
done
done
IFS=$as_save_IFS
;;
esac
fi
OSX=$ac_cv_path_OSX
if test -n "$OSX"; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $OSX" >&5
$as_echo "$OSX" >&6; }
else
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
$as_echo "no" >&6; }
fi
test -n "$OSX" && break
done
else
# Report the value of OSX in configure's output in all cases.
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for OSX" >&5
$as_echo_n "checking for OSX... " >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $OSX" >&5
$as_echo "$OSX" >&6; }
fi
if test -z "$FOP"; then
for ac_prog in fop
do

View File

@ -2091,12 +2091,10 @@ fi
#
# Check for DocBook and tools
#
PGAC_PROG_NSGMLS
PGAC_PATH_XMLLINT
PGAC_CHECK_DOCBOOK(4.2)
PGAC_PATH_PROGS(DBTOEPUB, dbtoepub)
PGAC_PATH_PROGS(XMLLINT, xmllint)
PGAC_PATH_PROGS(XSLTPROC, xsltproc)
PGAC_PATH_PROGS(OSX, [osx sgml2xml sx])
PGAC_PATH_PROGS(FOP, fop)
#

View File

@ -37,15 +37,7 @@ ifndef FOP
FOP = $(missing) fop
endif
SGMLINCLUDE = -D . -D $(srcdir)
ifndef NSGMLS
NSGMLS = $(missing) nsgmls
endif
ifndef OSX
OSX = $(missing) osx
endif
XMLINCLUDE = --path .
ifndef XMLLINT
XMLLINT = $(missing) xmllint
@ -63,19 +55,6 @@ GENERATED_SGML = version.sgml \
ALLSGML := $(wildcard $(srcdir)/*.sgml $(srcdir)/ref/*.sgml) $(GENERATED_SGML)
# Enable some extra warnings
# -wfully-tagged needed to throw a warning on missing tags
# for older tool chains, 2007-08-31
# -wnet catches XML-style empty-element tags like <xref linkend="abc"/>.
override SPFLAGS += -wall -wno-unused-param -wfully-tagged -wnet
# Additional warnings for XML compatibility. The conditional is meant
# to detect whether we are using OpenSP rather than the ancient
# original SP.
override SPFLAGS += -wempty
ifneq (,$(filter o%,$(notdir $(OSX))))
override SPFLAGS += -wdata-delim -winstance-ignore-ms -winstance-include-ms -winstance-param-entity
endif
##
## Man pages
@ -83,9 +62,9 @@ endif
man distprep-man: man-stamp
man-stamp: stylesheet-man.xsl postgres.xml
$(XMLLINT) --noout --valid postgres.xml
$(XSLTPROC) $(XSLTPROCFLAGS) $(XSLTPROC_MAN_FLAGS) $^
man-stamp: stylesheet-man.xsl postgres.sgml $(ALLSGML)
$(XMLLINT) $(XMLINCLUDE) --noout --valid $(word 2,$^)
$(XSLTPROC) $(XMLINCLUDE) $(XSLTPROCFLAGS) $(XSLTPROC_MAN_FLAGS) $(wordlist 1,2,$^)
touch $@
@ -136,27 +115,8 @@ INSTALL.html: %.html : stylesheet-text.xsl %.xml
$(XMLLINT) --noout --valid $*.xml
$(XSLTPROC) $(XSLTPROCFLAGS) $(XSLTPROC_HTML_FLAGS) $^ >$@
INSTALL.xml: standalone-profile.xsl standalone-install.xml postgres.xml
$(XSLTPROC) $(XSLTPROCFLAGS) --xinclude $(wordlist 1,2,$^) >$@
##
## SGML->XML conversion
##
# For obscure reasons, GNU make 3.81 complains about circular dependencies
# if we try to do "make all" in a VPATH build without the explicit
# $(srcdir) on the postgres.sgml dependency in this rule. GNU make bug?
postgres.xml: $(srcdir)/postgres.sgml $(ALLSGML)
$(OSX) $(SPFLAGS) $(SGMLINCLUDE) -x lower $< >$@.tmp
$(call mangle-xml,book)
define mangle-xml
$(PERL) -p -e 's/\[(aacute|acirc|aelig|agrave|amp|aring|atilde|auml|bull|copy|eacute|egrave|gt|iacute|lt|mdash|nbsp|ntilde|oacute|ocirc|oslash|ouml|pi|quot|scaron|uuml) *\]/\&\1;/gi;' \
-e '$$_ .= qq{<!DOCTYPE $(1) PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN" "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">\n} if $$. == 1;' \
<$@.tmp > $@
rm $@.tmp
endef
INSTALL.xml: standalone-profile.xsl standalone-install.xml postgres.sgml $(ALLSGML)
$(XSLTPROC) $(XMLINCLUDE) $(XSLTPROCFLAGS) --xinclude $(wordlist 1,2,$^) >$@
##
@ -169,20 +129,20 @@ endif
html: html-stamp
html-stamp: stylesheet.xsl postgres.xml
$(XMLLINT) --noout --valid postgres.xml
$(XSLTPROC) $(XSLTPROCFLAGS) $(XSLTPROC_HTML_FLAGS) $^
html-stamp: stylesheet.xsl postgres.sgml $(ALLSGML)
$(XMLLINT) $(XMLINCLUDE) --noout --valid $(word 2,$^)
$(XSLTPROC) $(XMLINCLUDE) $(XSLTPROCFLAGS) $(XSLTPROC_HTML_FLAGS) $(wordlist 1,2,$^)
cp $(srcdir)/stylesheet.css html/
touch $@
htmlhelp: stylesheet-hh.xsl postgres.xml
$(XMLLINT) --noout --valid postgres.xml
$(XSLTPROC) $(XSLTPROCFLAGS) $^
htmlhelp: stylesheet-hh.xsl postgres.sgml $(ALLSGML)
$(XMLLINT) $(XMLINCLUDE) --noout --valid $(word 2,$^)
$(XSLTPROC) $(XMLINCLUDE) $(XSLTPROCFLAGS) $(wordlist 1,2,$^)
# single-page HTML
postgres.html: stylesheet-html-nochunk.xsl postgres.xml
$(XMLLINT) --noout --valid postgres.xml
$(XSLTPROC) $(XSLTPROCFLAGS) $(XSLTPROC_HTML_FLAGS) -o $@ $^
postgres.html: stylesheet-html-nochunk.xsl postgres.sgml $(ALLSGML)
$(XMLLINT) $(XMLINCLUDE) --noout --valid $(word 2,$^)
$(XSLTPROC) $(XMLINCLUDE) $(XSLTPROCFLAGS) $(XSLTPROC_HTML_FLAGS) -o $@ $(wordlist 1,2,$^)
# single-page text
postgres.txt: postgres.html
@ -196,13 +156,13 @@ postgres.txt: postgres.html
postgres.pdf:
$(error Invalid target; use postgres-A4.pdf or postgres-US.pdf as targets)
%-A4.fo: stylesheet-fo.xsl %.xml
$(XMLLINT) --noout --valid $*.xml
$(XSLTPROC) $(XSLTPROCFLAGS) --stringparam paper.type A4 -o $@ $^
%-A4.fo: stylesheet-fo.xsl %.sgml $(ALLSGML)
$(XMLLINT) $(XMLINCLUDE) --noout --valid $(word 2,$^)
$(XSLTPROC) $(XMLINCLUDE) $(XSLTPROCFLAGS) --stringparam paper.type A4 -o $@ $(wordlist 1,2,$^)
%-US.fo: stylesheet-fo.xsl %.xml
$(XMLLINT) --noout --valid $*.xml
$(XSLTPROC) $(XSLTPROCFLAGS) --stringparam paper.type USletter -o $@ $^
%-US.fo: stylesheet-fo.xsl %.sgml $(ALLSGML)
$(XMLLINT) $(XMLINCLUDE) --noout --valid $(word 2,$^)
$(XSLTPROC) $(XMLINCLUDE) $(XSLTPROCFLAGS) --stringparam paper.type USletter -o $@ $(wordlist 1,2,$^)
%.pdf: %.fo
$(FOP) -fo $< -pdf $@
@ -213,7 +173,7 @@ postgres.pdf:
##
epub: postgres.epub
postgres.epub: postgres.xml
postgres.epub: postgres.sgml $(ALLSGML)
$(XMLLINT) --noout --valid $<
$(DBTOEPUB) $<
@ -226,7 +186,8 @@ DB2X_TEXIXML = db2x_texixml
DB2X_XSLTPROC = db2x_xsltproc
MAKEINFO = makeinfo
%.texixml: %.xml
%.texixml: %.sgml $(ALLSGML)
$(XMLLINT) --noout --valid $<
$(DB2X_XSLTPROC) -s texi -g output-file=$(basename $@) $< -o $@
%.texi: %.texixml
@ -242,7 +203,7 @@ MAKEINFO = makeinfo
# Quick syntax check without style processing
check: postgres.sgml $(ALLSGML) check-tabs
$(NSGMLS) $(SPFLAGS) $(SGMLINCLUDE) -s $<
$(XMLLINT) $(XMLINCLUDE) --noout --valid $<
##
@ -312,7 +273,7 @@ check-tabs:
# This allows removing some files from the distribution tarballs while
# keeping the dependencies satisfied.
.SECONDARY: postgres.xml $(GENERATED_SGML)
.SECONDARY: $(GENERATED_SGML)
.SECONDARY: INSTALL.html INSTALL.xml
.SECONDARY: postgres-A4.fo postgres-US.fo
@ -326,8 +287,6 @@ clean:
rm -f *.fo *.pdf
# generated SGML files
rm -f $(GENERATED_SGML)
# SGML->XML conversion
rm -f postgres.xml *.tmp
# HTML Help
rm -f htmlhelp.hhp toc.hhc index.hhk
# EPUB

View File

@ -16,9 +16,9 @@
</para>
<para>
The functions shown in <xref linkend="functions-adminpack-table"> provide
The functions shown in <xref linkend="functions-adminpack-table"/> provide
write access to files on the machine hosting the server. (See also the
functions in <xref linkend="functions-admin-genfile-table">, which
functions in <xref linkend="functions-admin-genfile-table"/>, which
provide read-only access.)
Only files within the database cluster directory can be accessed, but
either a relative or absolute path is allowable.
@ -107,18 +107,18 @@
</indexterm>
<para>
<function>pg_logdir_ls</function> returns the start timestamps and path
names of all the log files in the <xref linkend="guc-log-directory">
directory. The <xref linkend="guc-log-filename"> parameter must have its
names of all the log files in the <xref linkend="guc-log-directory"/>
directory. The <xref linkend="guc-log-filename"/> parameter must have its
default setting (<literal>postgresql-%Y-%m-%d_%H%M%S.log</literal>) to use this
function.
</para>
<para>
The functions shown
in <xref linkend="functions-adminpack-deprecated-table"> are deprecated
in <xref linkend="functions-adminpack-deprecated-table"/> are deprecated
and should not be used in new applications; instead use those shown
in <xref linkend="functions-admin-signal-table">
and <xref linkend="functions-admin-genfile-table">. These functions are
in <xref linkend="functions-admin-signal-table"/>
and <xref linkend="functions-admin-genfile-table"/>. These functions are
provided in <filename>adminpack</filename> only for compatibility with old
versions of <application>pgAdmin</application>.
</para>

View File

@ -18,12 +18,12 @@
<para>
This chapter will on occasion refer to examples found in <xref
linkend="tutorial-sql"> to change or improve them, so it will be
linkend="tutorial-sql"/> to change or improve them, so it will be
useful to have read that chapter. Some examples from
this chapter can also be found in
<filename>advanced.sql</filename> in the tutorial directory. This
file also contains some sample data to load, which is not
repeated here. (Refer to <xref linkend="tutorial-sql-intro"> for
repeated here. (Refer to <xref linkend="tutorial-sql-intro"/> for
how to use the file.)
</para>
</sect1>
@ -37,7 +37,7 @@
</indexterm>
<para>
Refer back to the queries in <xref linkend="tutorial-join">.
Refer back to the queries in <xref linkend="tutorial-join"/>.
Suppose the combined listing of weather records and city location
is of particular interest to your application, but you do not want
to type the query each time you need it. You can create a
@ -82,7 +82,7 @@ SELECT * FROM myview;
<para>
Recall the <classname>weather</classname> and
<classname>cities</classname> tables from <xref
linkend="tutorial-sql">. Consider the following problem: You
linkend="tutorial-sql"/>. Consider the following problem: You
want to make sure that no one can insert rows in the
<classname>weather</classname> table that do not have a matching
entry in the <classname>cities</classname> table. This is called
@ -129,7 +129,7 @@ DETAIL: Key (city)=(Berkeley) is not present in table "cities".
<para>
The behavior of foreign keys can be finely tuned to your
application. We will not go beyond this simple example in this
tutorial, but just refer you to <xref linkend="ddl">
tutorial, but just refer you to <xref linkend="ddl"/>
for more information. Making correct use of
foreign keys will definitely improve the quality of your database
applications, so you are strongly encouraged to learn about them.
@ -447,7 +447,7 @@ FROM empsalary;
<para>
There are options to define the window frame in other ways, but
this tutorial does not cover them. See
<xref linkend="syntax-window-functions"> for details.
<xref linkend="syntax-window-functions"/> for details.
</para>
</footnote>
Here is an example using <function>sum</function>:
@ -554,10 +554,10 @@ SELECT sum(salary) OVER w, avg(salary) OVER w
<para>
More details about window functions can be found in
<xref linkend="syntax-window-functions">,
<xref linkend="functions-window">,
<xref linkend="queries-window">, and the
<xref linkend="sql-select"> reference page.
<xref linkend="syntax-window-functions"/>,
<xref linkend="functions-window"/>,
<xref linkend="queries-window"/>, and the
<xref linkend="sql-select"/> reference page.
</para>
</sect1>
@ -692,7 +692,7 @@ SELECT name, altitude
<para>
Although inheritance is frequently useful, it has not been integrated
with unique constraints or foreign keys, which limits its usefulness.
See <xref linkend="ddl-inherit"> for more detail.
See <xref linkend="ddl-inherit"/> for more detail.
</para>
</note>
</sect1>

View File

@ -31,7 +31,7 @@
index scans themselves, which may be user-defined operator class
code. For example, B-Tree index verification relies on comparisons
made with one or more B-Tree support function 1 routines. See <xref
linkend="xindex-support"> for details of operator class support
linkend="xindex-support"/> for details of operator class support
functions.
</para>
<para>
@ -192,7 +192,7 @@ ORDER BY c.relpages DESC LIMIT 10;
index that is ordered using an affected collation, simply because
<emphasis>indexed</emphasis> values might happen to have the same
absolute ordering regardless of the behavioral inconsistency. See
<xref linkend="locale"> and <xref linkend="collation"> for
<xref linkend="locale"/> and <xref linkend="collation"/> for
further details about how <productname>PostgreSQL</productname> uses
operating system locales and collations.
</para>
@ -210,7 +210,7 @@ ORDER BY c.relpages DESC LIMIT 10;
logical inconsistency to be introduced. One obvious testing
strategy is to call <filename>amcheck</filename> functions continuously
when running the standard regression tests. See <xref
linkend="regress-run"> for details on running the tests.
linkend="regress-run"/> for details on running the tests.
</para>
</listitem>
<listitem>
@ -263,7 +263,7 @@ ORDER BY c.relpages DESC LIMIT 10;
There is no general method of repairing problems that
<filename>amcheck</filename> detects. An explanation for the root cause of
an invariant violation should be sought. <xref
linkend="pageinspect"> may play a useful role in diagnosing
linkend="pageinspect"/> may play a useful role in diagnosing
corruption that <filename>amcheck</filename> detects. A <command>REINDEX</command>
may not be effective in repairing corruption.
</para>

View File

@ -7,7 +7,7 @@
<title>Author</title>
<para>
This chapter originated as part of
<xref linkend="sim98">, Stefan Simkovics'
<xref linkend="sim98"/>, Stefan Simkovics'
Master's Thesis prepared at Vienna University of Technology under the direction
of O.Univ.Prof.Dr. Georg Gottlob and Univ.Ass. Mag. Katrin Seyr.
</para>
@ -136,7 +136,7 @@
<para>
The client process can be any program that understands the
<productname>PostgreSQL</productname> protocol described in
<xref linkend="protocol">. Many clients are based on the
<xref linkend="protocol"/>. Many clients are based on the
C-language library <application>libpq</application>, but several independent
implementations of the protocol exist, such as the Java
<application>JDBC</application> driver.
@ -317,7 +317,7 @@
<para>
The query rewriter is discussed in some detail in
<xref linkend="rules">, so there is no need to cover it here.
<xref linkend="rules"/>, so there is no need to cover it here.
We will only point out that both the input and the output of the
rewriter are query trees, that is, there is no change in the
representation or level of semantic detail in the trees. Rewriting
@ -347,8 +347,8 @@
involving large numbers of join operations. In order to determine
a reasonable (not necessarily optimal) query plan in a reasonable amount
of time, <productname>PostgreSQL</productname> uses a <firstterm>Genetic
Query Optimizer</firstterm> (see <xref linkend="geqo">) when the number of joins
exceeds a threshold (see <xref linkend="guc-geqo-threshold">).
Query Optimizer</firstterm> (see <xref linkend="geqo"/>) when the number of joins
exceeds a threshold (see <xref linkend="guc-geqo-threshold"/>).
</para>
</note>
@ -438,7 +438,7 @@
</para>
<para>
If the query uses fewer than <xref linkend="guc-geqo-threshold">
If the query uses fewer than <xref linkend="guc-geqo-threshold"/>
relations, a near-exhaustive search is conducted to find the best
join sequence. The planner preferentially considers joins between any
two relations for which there exist a corresponding join clause in the
@ -454,7 +454,7 @@
<para>
When <varname>geqo_threshold</varname> is exceeded, the join
sequences considered are determined by heuristics, as described
in <xref linkend="geqo">. Otherwise the process is the same.
in <xref linkend="geqo"/>. Otherwise the process is the same.
</para>
<para>

View File

@ -128,7 +128,7 @@ CREATE TABLE tictactoe (
<para>
(These kinds of array constants are actually only a special case of
the generic type constants discussed in <xref
linkend="sql-syntax-constants-generic">. The constant is initially
linkend="sql-syntax-constants-generic"/>. The constant is initially
treated as a string and passed to the array input conversion
routine. An explicit type specification might be necessary.)
</para>
@ -192,7 +192,7 @@ INSERT INTO sal_emp
expressions; for instance, string literals are single quoted, instead of
double quoted as they would be in an array literal. The <literal>ARRAY</literal>
constructor syntax is discussed in more detail in
<xref linkend="sql-syntax-array-constructors">.
<xref linkend="sql-syntax-array-constructors"/>.
</para>
</sect2>
@ -616,7 +616,7 @@ SELECT * FROM sal_emp WHERE pay_by_quarter[1] = 10000 OR
However, this quickly becomes tedious for large arrays, and is not
helpful if the size of the array is unknown. An alternative method is
described in <xref linkend="functions-comparisons">. The above
described in <xref linkend="functions-comparisons"/>. The above
query could be replaced by:
<programlisting>
@ -644,7 +644,7 @@ SELECT * FROM
WHERE pay_by_quarter[s] = 10000;
</programlisting>
This function is described in <xref linkend="functions-srf-subscripts">.
This function is described in <xref linkend="functions-srf-subscripts"/>.
</para>
<para>
@ -657,8 +657,8 @@ SELECT * FROM sal_emp WHERE pay_by_quarter &amp;&amp; ARRAY[10000];
</programlisting>
This and other array operators are further described in
<xref linkend="functions-array">. It can be accelerated by an appropriate
index, as described in <xref linkend="indexes-types">.
<xref linkend="functions-array"/>. It can be accelerated by an appropriate
index, as described in <xref linkend="indexes-types"/>.
</para>
<para>
@ -755,7 +755,7 @@ SELECT f1[1][-2][3] AS e1, f1[1][-1][5] AS e2
or backslashes disables this and allows the literal string value
<quote>NULL</quote> to be entered. Also, for backward compatibility with
pre-8.2 versions of <productname>PostgreSQL</productname>, the <xref
linkend="guc-array-nulls"> configuration parameter can be turned
linkend="guc-array-nulls"/> configuration parameter can be turned
<literal>off</literal> to suppress recognition of <literal>NULL</literal> as a NULL.
</para>
@ -797,7 +797,7 @@ INSERT ... VALUES (E'{"\\\\","\\""}');
with a data type whose input routine also treated backslashes specially,
<type>bytea</type> for example, we might need as many as eight backslashes
in the command to get one backslash into the stored array element.)
Dollar quoting (see <xref linkend="sql-syntax-dollar-quoting">) can be
Dollar quoting (see <xref linkend="sql-syntax-dollar-quoting"/>) can be
used to avoid the need to double backslashes.
</para>
</note>
@ -805,7 +805,7 @@ INSERT ... VALUES (E'{"\\\\","\\""}');
<tip>
<para>
The <literal>ARRAY</literal> constructor syntax (see
<xref linkend="sql-syntax-array-constructors">) is often easier to work
<xref linkend="sql-syntax-array-constructors"/>) is often easier to work
with than the array-literal syntax when writing array values in SQL
commands. In <literal>ARRAY</literal>, individual element values are written the
same way they would be written when not members of an array.

View File

@ -18,7 +18,7 @@
<para>
In order to function, this module must be loaded via
<xref linkend="guc-shared-preload-libraries"> in <filename>postgresql.conf</filename>.
<xref linkend="guc-shared-preload-libraries"/> in <filename>postgresql.conf</filename>.
</para>
<sect2>

View File

@ -10,7 +10,7 @@
<para>
The <filename>auto_explain</filename> module provides a means for
logging execution plans of slow statements automatically, without
having to run <xref linkend="sql-explain">
having to run <xref linkend="sql-explain"/>
by hand. This is especially helpful for tracking down un-optimized queries
in large applications.
</para>
@ -25,8 +25,8 @@ LOAD 'auto_explain';
(You must be superuser to do that.) More typical usage is to preload
it into some or all sessions by including <literal>auto_explain</literal> in
<xref linkend="guc-session-preload-libraries"> or
<xref linkend="guc-shared-preload-libraries"> in
<xref linkend="guc-session-preload-libraries"/> or
<xref linkend="guc-shared-preload-libraries"/> in
<filename>postgresql.conf</filename>. Then you can track unexpectedly slow queries
no matter when they happen. Of course there is a price in overhead for
that.

View File

@ -32,7 +32,7 @@
commands that, when fed back to the server, will recreate the
database in the same state as it was at the time of the dump.
<productname>PostgreSQL</productname> provides the utility program
<xref linkend="app-pgdump"> for this purpose. The basic usage of this
<xref linkend="app-pgdump"/> for this purpose. The basic usage of this
command is:
<synopsis>
pg_dump <replaceable class="parameter">dbname</replaceable> &gt; <replaceable class="parameter">outfile</replaceable>
@ -79,7 +79,7 @@ pg_dump <replaceable class="parameter">dbname</replaceable> &gt; <replaceable cl
environment variable <envar>PGUSER</envar>. Remember that
<application>pg_dump</application> connections are subject to the normal
client authentication mechanisms (which are described in <xref
linkend="client-authentication">).
linkend="client-authentication"/>).
</para>
<para>
@ -120,9 +120,9 @@ psql <replaceable class="parameter">dbname</replaceable> &lt; <replaceable class
class="parameter">dbname</replaceable></literal>). <application>psql</application>
supports options similar to <application>pg_dump</application> for specifying
the database server to connect to and the user name to use. See
the <xref linkend="app-psql"> reference page for more information.
the <xref linkend="app-psql"/> reference page for more information.
Non-text file dumps are restored using the <xref
linkend="app-pgrestore"> utility.
linkend="app-pgrestore"/> utility.
</para>
<para>
@ -178,13 +178,13 @@ pg_dump -h <replaceable>host1</replaceable> <replaceable>dbname</replaceable> |
<para>
After restoring a backup, it is wise to run <xref
linkend="sql-analyze"> on each
linkend="sql-analyze"/> on each
database so the query optimizer has useful statistics;
see <xref linkend="vacuum-for-statistics">
and <xref linkend="autovacuum"> for more information.
see <xref linkend="vacuum-for-statistics"/>
and <xref linkend="autovacuum"/> for more information.
For more advice on how to load large amounts of data
into <productname>PostgreSQL</productname> efficiently, refer to <xref
linkend="populate">.
linkend="populate"/>.
</para>
</sect2>
@ -196,7 +196,7 @@ pg_dump -h <replaceable>host1</replaceable> <replaceable>dbname</replaceable> |
and it does not dump information about roles or tablespaces
(because those are cluster-wide rather than per-database).
To support convenient dumping of the entire contents of a database
cluster, the <xref linkend="app-pg-dumpall"> program is provided.
cluster, the <xref linkend="app-pg-dumpall"/> program is provided.
<application>pg_dumpall</application> backs up each database in a given
cluster, and also preserves cluster-wide data such as role and
tablespace definitions. The basic usage of this command is:
@ -308,8 +308,8 @@ pg_dump -Fc <replaceable class="parameter">dbname</replaceable> &gt; <replaceabl
pg_restore -d <replaceable class="parameter">dbname</replaceable> <replaceable class="parameter">filename</replaceable>
</programlisting>
See the <xref linkend="app-pgdump"> and <xref
linkend="app-pgrestore"> reference pages for details.
See the <xref linkend="app-pgdump"/> and <xref
linkend="app-pgrestore"/> reference pages for details.
</para>
</formalpara>
@ -345,7 +345,7 @@ pg_dump -j <replaceable class="parameter">num</replaceable> -F d -f <replaceable
<para>
An alternative backup strategy is to directly copy the files that
<productname>PostgreSQL</productname> uses to store the data in the database;
<xref linkend="creating-cluster"> explains where these files
<xref linkend="creating-cluster"/> explains where these files
are located. You can use whatever method you prefer
for doing file system backups; for example:
@ -369,7 +369,7 @@ tar -cf backup.tar /usr/local/pgsql/data
an atomic snapshot of the state of the file system,
but also because of internal buffering within the server).
Information about stopping the server can be found in
<xref linkend="server-shutdown">. Needless to say, you
<xref linkend="server-shutdown"/>. Needless to say, you
also need to shut down the server before restoring the data.
</para>
</listitem>
@ -428,10 +428,10 @@ tar -cf backup.tar /usr/local/pgsql/data
If simultaneous snapshots are not possible, one option is to shut down
the database server long enough to establish all the frozen snapshots.
Another option is to perform a continuous archiving base backup (<xref
linkend="backup-base-backup">) because such backups are immune to file
linkend="backup-base-backup"/>) because such backups are immune to file
system changes during the backup. This requires enabling continuous
archiving just during the backup process; restore is done using
continuous archive recovery (<xref linkend="backup-pitr-recovery">).
continuous archive recovery (<xref linkend="backup-pitr-recovery"/>).
</para>
<para>
@ -591,11 +591,11 @@ tar -cf backup.tar /usr/local/pgsql/data
</para>
<para>
To enable WAL archiving, set the <xref linkend="guc-wal-level">
To enable WAL archiving, set the <xref linkend="guc-wal-level"/>
configuration parameter to <literal>replica</literal> or higher,
<xref linkend="guc-archive-mode"> to <literal>on</literal>,
<xref linkend="guc-archive-mode"/> to <literal>on</literal>,
and specify the shell command to use in the <xref
linkend="guc-archive-command"> configuration parameter. In practice
linkend="guc-archive-command"/> configuration parameter. In practice
these settings will always be placed in the
<filename>postgresql.conf</filename> file.
In <varname>archive_command</varname>,
@ -705,7 +705,7 @@ test ! -f /mnt/server/archivedir/00000001000000A900000065 &amp;&amp; cp pg_wal/0
than through SQL operations.
You might wish to keep the configuration files in a location that will
be backed up by your regular file system backup procedures. See
<xref linkend="runtime-config-file-locations"> for how to relocate the
<xref linkend="runtime-config-file-locations"/> for how to relocate the
configuration files.
</para>
@ -715,7 +715,7 @@ test ! -f /mnt/server/archivedir/00000001000000A900000065 &amp;&amp; cp pg_wal/0
where it does so), there could be a long delay between the completion
of a transaction and its safe recording in archive storage. To put
a limit on how old unarchived data can be, you can set
<xref linkend="guc-archive-timeout"> to force the server to switch
<xref linkend="guc-archive-timeout"/> to force the server to switch
to a new WAL segment file at least that often. Note that archived
files that are archived early due to a forced switch are still the same
length as completely full files. It is therefore unwise to set a very
@ -729,13 +729,13 @@ test ! -f /mnt/server/archivedir/00000001000000A900000065 &amp;&amp; cp pg_wal/0
<function>pg_switch_wal</function> if you want to ensure that a
just-finished transaction is archived as soon as possible. Other utility
functions related to WAL management are listed in <xref
linkend="functions-admin-backup-table">.
linkend="functions-admin-backup-table"/>.
</para>
<para>
When <varname>wal_level</varname> is <literal>minimal</literal> some SQL commands
are optimized to avoid WAL logging, as described in <xref
linkend="populate-pitr">. If archiving or streaming replication were
linkend="populate-pitr"/>. If archiving or streaming replication were
turned on during execution of one of these statements, WAL would not
contain enough information for archive recovery. (Crash recovery is
unaffected.) For this reason, <varname>wal_level</varname> can only be changed at
@ -753,11 +753,11 @@ test ! -f /mnt/server/archivedir/00000001000000A900000065 &amp;&amp; cp pg_wal/0
<para>
The easiest way to perform a base backup is to use the
<xref linkend="app-pgbasebackup"> tool. It can create
<xref linkend="app-pgbasebackup"/> tool. It can create
a base backup either as regular files or as a tar archive. If more
flexibility than <xref linkend="app-pgbasebackup"> can provide is
flexibility than <xref linkend="app-pgbasebackup"/> can provide is
required, you can also make a base backup using the low level API
(see <xref linkend="backup-lowlevel-base-backup">).
(see <xref linkend="backup-lowlevel-base-backup"/>).
</para>
<para>
@ -791,7 +791,7 @@ test ! -f /mnt/server/archivedir/00000001000000A900000065 &amp;&amp; cp pg_wal/0
<para>
The backup history file is just a small text file. It contains the
label string you gave to <xref linkend="app-pgbasebackup">, as well as
label string you gave to <xref linkend="app-pgbasebackup"/>, as well as
the starting and ending times and WAL segments of the backup.
If you used the label to identify the associated dump file,
then the archived history file is enough to tell you which dump file to
@ -814,7 +814,7 @@ test ! -f /mnt/server/archivedir/00000001000000A900000065 &amp;&amp; cp pg_wal/0
<para>
The procedure for making a base backup using the low level
APIs contains a few more steps than
the <xref linkend="app-pgbasebackup"> method, but is relatively
the <xref linkend="app-pgbasebackup"/> method, but is relatively
simple. It is very important that these steps are executed in
sequence, and that the success of a step is verified before
proceeding to the next step.
@ -830,7 +830,7 @@ test ! -f /mnt/server/archivedir/00000001000000A900000065 &amp;&amp; cp pg_wal/0
A non-exclusive low level backup is one that allows other
concurrent backups to be running (both those started using
the same backup API and those started using
<xref linkend="app-pgbasebackup">).
<xref linkend="app-pgbasebackup"/>).
</para>
<para>
<orderedlist>
@ -859,7 +859,7 @@ SELECT pg_start_backup('label', false, false);
required for the checkpoint will be spread out over a significant
period of time, by default half your inter-checkpoint interval
(see the configuration parameter
<xref linkend="guc-checkpoint-completion-target">). This is
<xref linkend="guc-checkpoint-completion-target"/>). This is
usually what you want, because it minimizes the impact on query
processing. If you want to start the backup as soon as
possible, change the second parameter to <literal>true</literal>, which will
@ -879,7 +879,7 @@ SELECT pg_start_backup('label', false, false);
<application>pg_dumpall</application>). It is neither
necessary nor desirable to stop normal operation of the database
while you do this. See
<xref linkend="backup-lowlevel-base-backup-data"> for things to
<xref linkend="backup-lowlevel-base-backup-data"/> for things to
consider during this backup.
</para>
</listitem>
@ -989,7 +989,7 @@ SELECT pg_start_backup('label');
required for the checkpoint will be spread out over a significant
period of time, by default half your inter-checkpoint interval
(see the configuration parameter
<xref linkend="guc-checkpoint-completion-target">). This is
<xref linkend="guc-checkpoint-completion-target"/>). This is
usually what you want, because it minimizes the impact on query
processing. If you want to start the backup as soon as
possible, use:
@ -1007,7 +1007,7 @@ SELECT pg_start_backup('label', true);
<application>pg_dumpall</application>). It is neither
necessary nor desirable to stop normal operation of the database
while you do this. See
<xref linkend="backup-lowlevel-base-backup-data"> for things to
<xref linkend="backup-lowlevel-base-backup-data"/> for things to
consider during this backup.
</para>
<para>
@ -1119,7 +1119,7 @@ SELECT pg_stop_backup();
<filename>pg_snapshots/</filename>, <filename>pg_stat_tmp/</filename>,
and <filename>pg_subtrans/</filename> (but not the directories themselves) can be
omitted from the backup as they will be initialized on postmaster startup.
If <xref linkend="guc-stats-temp-directory"> is set and is under the data
If <xref linkend="guc-stats-temp-directory"/> is set and is under the data
directory then the contents of that directory can also be omitted.
</para>
@ -1221,7 +1221,7 @@ SELECT pg_stop_backup();
<listitem>
<para>
Create a recovery command file <filename>recovery.conf</filename> in the cluster
data directory (see <xref linkend="recovery-config">). You might
data directory (see <xref linkend="recovery-config"/>). You might
also want to temporarily modify <filename>pg_hba.conf</filename> to prevent
ordinary users from connecting until you are sure the recovery was successful.
</para>
@ -1310,7 +1310,7 @@ restore_command = 'cp /mnt/server/archivedir/%f %p'
at the start of recovery for a file named something like
<filename>00000001.history</filename>. This is also normal and does not
indicate a problem in simple recovery situations; see
<xref linkend="backup-timelines"> for discussion.
<xref linkend="backup-timelines"/> for discussion.
</para>
<para>
@ -1440,7 +1440,7 @@ restore_command = 'cp /mnt/server/archivedir/%f %p'
<para>
As with base backups, the easiest way to produce a standalone
hot backup is to use the <xref linkend="app-pgbasebackup">
hot backup is to use the <xref linkend="app-pgbasebackup"/>
tool. If you include the <literal>-X</literal> parameter when calling
it, all the write-ahead log required to use the backup will be
included in the backup automatically, and no special action is
@ -1548,7 +1548,7 @@ archive_command = 'local_backup_script.sh "%p" "%f"'
<tip>
<para>
When using an <varname>archive_command</varname> script, it's desirable
to enable <xref linkend="guc-logging-collector">.
to enable <xref linkend="guc-logging-collector"/>.
Any messages written to <systemitem>stderr</systemitem> from the script will then
appear in the database server log, allowing complex configurations to
be diagnosed easily if they fail.
@ -1567,7 +1567,7 @@ archive_command = 'local_backup_script.sh "%p" "%f"'
<itemizedlist>
<listitem>
<para>
If a <xref linkend="sql-createdatabase">
If a <xref linkend="sql-createdatabase"/>
command is executed while a base backup is being taken, and then
the template database that the <command>CREATE DATABASE</command> copied
is modified while the base backup is still in progress, it is
@ -1580,7 +1580,7 @@ archive_command = 'local_backup_script.sh "%p" "%f"'
<listitem>
<para>
<xref linkend="sql-createtablespace">
<xref linkend="sql-createtablespace"/>
commands are WAL-logged with the literal absolute path, and will
therefore be replayed as tablespace creations with the same
absolute path. This might be undesirable if the log is being
@ -1603,8 +1603,8 @@ archive_command = 'local_backup_script.sh "%p" "%f"'
your system hardware and software, the risk of partial writes might
be small enough to ignore, in which case you can significantly
reduce the total volume of archived logs by turning off page
snapshots using the <xref linkend="guc-full-page-writes">
parameter. (Read the notes and warnings in <xref linkend="wal">
snapshots using the <xref linkend="guc-full-page-writes"/>
parameter. (Read the notes and warnings in <xref linkend="wal"/>
before you do so.) Turning off page snapshots does not prevent
use of the logs for PITR operations. An area for future
development is to compress archived WAL data by removing

View File

@ -286,6 +286,6 @@ typedef struct BackgroundWorker
<para>
The maximum number of registered background workers is limited by
<xref linkend="guc-max-worker-processes">.
<xref linkend="guc-max-worker-processes"/>.
</para>
</chapter>

View File

@ -95,7 +95,7 @@
<para>
The core <productname>PostgreSQL</productname> distribution
includes the <acronym>BRIN</acronym> operator classes shown in
<xref linkend="brin-builtin-opclasses-table">.
<xref linkend="brin-builtin-opclasses-table"/>.
</para>
<para>
@ -590,7 +590,7 @@ typedef struct BrinOpcInfo
To write an operator class for a data type that implements a totally
ordered set, it is possible to use the minmax support procedures
alongside the corresponding operators, as shown in
<xref linkend="brin-extensibility-minmax-table">.
<xref linkend="brin-extensibility-minmax-table"/>.
All operator class members (procedures and operators) are mandatory.
</para>
@ -648,7 +648,7 @@ typedef struct BrinOpcInfo
To write an operator class for a complex data type which has values
included within another type, it's possible to use the inclusion support
procedures alongside the corresponding operators, as shown
in <xref linkend="brin-extensibility-inclusion-table">. It requires
in <xref linkend="brin-extensibility-inclusion-table"/>. It requires
only a single additional function, which can be written in any language.
More functions can be defined for additional functionality. All operators
are optional. Some operators require other operators, as shown as
@ -821,7 +821,7 @@ typedef struct BrinOpcInfo
additional data types to be supported by defining extra sets
of operators. Inclusion operator class operator strategies are dependent
on another operator strategy as shown in
<xref linkend="brin-extensibility-inclusion-table">, or the same
<xref linkend="brin-extensibility-inclusion-table"/>, or the same
operator strategy as themselves. They require the dependency
operator to be defined with the <literal>STORAGE</literal> data type as the
left-hand-side argument and the other supported data type to be the

View File

@ -27,7 +27,7 @@
<title>Overview</title>
<para>
<xref linkend="catalog-table"> lists the system catalogs.
<xref linkend="catalog-table"/> lists the system catalogs.
More detailed documentation of each catalog follows below.
</para>
@ -567,8 +567,8 @@
<para>
New aggregate functions are registered with the <xref
linkend="sql-createaggregate">
command. See <xref linkend="xaggr"> for more information about
linkend="sql-createaggregate"/>
command. See <xref linkend="xaggr"/> for more information about
writing aggregate functions and the meaning of the transition
functions, etc.
</para>
@ -588,7 +588,7 @@
relation access methods. There is one row for each access method supported
by the system.
Currently, only indexes have access methods. The requirements for index
access methods are discussed in detail in <xref linkend="indexam">.
access methods are discussed in detail in <xref linkend="indexam"/>.
</para>
<table>
@ -649,7 +649,7 @@
methods. That data is now only directly visible at the C code level.
However, <function>pg_index_column_has_property()</function> and related
functions have been added to allow SQL queries to inspect index access
method properties; see <xref linkend="functions-info-catalog-table">.
method properties; see <xref linkend="functions-info-catalog-table"/>.
</para>
</note>
@ -1034,7 +1034,7 @@
<entry>
<structfield>attstattarget</structfield> controls the level of detail
of statistics accumulated for this column by
<xref linkend="sql-analyze">.
<xref linkend="sql-analyze"/>.
A zero value indicates that no statistics should be collected.
A negative value says to use the system default statistics target.
The exact meaning of positive values is data type-dependent.
@ -1270,7 +1270,7 @@
</para>
<para>
<xref linkend="user-manag"> contains detailed information about user and
<xref linkend="user-manag"/> contains detailed information about user and
privilege management.
</para>
@ -1356,7 +1356,7 @@
<entry><type>bool</type></entry>
<entry>
Role bypasses every row level security policy, see
<xref linkend="ddl-rowsecurity"> for more information.
<xref linkend="ddl-rowsecurity"/> for more information.
</entry>
</row>
@ -1964,8 +1964,8 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
<entry></entry>
<entry>
Access privileges; see
<xref linkend="sql-grant"> and
<xref linkend="sql-revoke">
<xref linkend="sql-grant"/> and
<xref linkend="sql-revoke"/>
for details
</entry>
</row>
@ -2015,7 +2015,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
The catalog <structname>pg_collation</structname> describes the
available collations, which are essentially mappings from an SQL
name to operating system locale categories.
See <xref linkend="collation"> for more information.
See <xref linkend="collation"/> for more information.
</para>
<table>
@ -2424,7 +2424,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
<para>
The catalog <structname>pg_conversion</structname> describes
encoding conversion procedures. See <xref linkend="sql-createconversion">
encoding conversion procedures. See <xref linkend="sql-createconversion"/>
for more information.
</para>
@ -2516,8 +2516,8 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
<para>
The catalog <structname>pg_database</structname> stores information about
the available databases. Databases are created with the <xref
linkend="sql-createdatabase"> command.
Consult <xref linkend="managing-databases"> for details about the meaning
linkend="sql-createdatabase"/> command.
Consult <xref linkend="managing-databases"/> for details about the meaning
of some of the parameters.
</para>
@ -2675,8 +2675,8 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
<entry></entry>
<entry>
Access privileges; see
<xref linkend="sql-grant"> and
<xref linkend="sql-revoke">
<xref linkend="sql-grant"/> and
<xref linkend="sql-revoke"/>
for details
</entry>
</row>
@ -3053,7 +3053,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
<para>
The catalog <structname>pg_description</structname> stores optional descriptions
(comments) for each database object. Descriptions can be manipulated
with the <xref linkend="sql-comment"> command and viewed with
with the <xref linkend="sql-comment"/> command and viewed with
<application>psql</application>'s <literal>\d</literal> commands.
Descriptions of many built-in system objects are provided in the initial
contents of <structname>pg_description</structname>.
@ -3208,7 +3208,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
<para>
The catalog <structname>pg_event_trigger</structname> stores event triggers.
See <xref linkend="event-triggers"> for more information.
See <xref linkend="event-triggers"/> for more information.
</para>
<table>
@ -3258,7 +3258,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
<entry><type>char</type></entry>
<entry></entry>
<entry>
Controls in which <xref linkend="guc-session-replication-role"> modes
Controls in which <xref linkend="guc-session-replication-role"/> modes
the event trigger fires.
<literal>O</literal> = trigger fires in <quote>origin</quote> and <quote>local</quote> modes,
<literal>D</literal> = trigger is disabled,
@ -3291,7 +3291,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
<para>
The catalog <structname>pg_extension</structname> stores information
about the installed extensions. See <xref linkend="extend-extensions">
about the installed extensions. See <xref linkend="extend-extensions"/>
for details about extensions.
</para>
@ -3463,8 +3463,8 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
<entry></entry>
<entry>
Access privileges; see
<xref linkend="sql-grant"> and
<xref linkend="sql-revoke">
<xref linkend="sql-grant"/> and
<xref linkend="sql-revoke"/>
for details
</entry>
</row>
@ -3559,8 +3559,8 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
<entry></entry>
<entry>
Access privileges; see
<xref linkend="sql-grant"> and
<xref linkend="sql-revoke">
<xref linkend="sql-grant"/> and
<xref linkend="sql-revoke"/>
for details
</entry>
</row>
@ -4011,8 +4011,8 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
<entry></entry>
<entry>
The initial access privileges; see
<xref linkend="sql-grant"> and
<xref linkend="sql-revoke">
<xref linkend="sql-grant"/> and
<xref linkend="sql-revoke"/>
for details
</entry>
</row>
@ -4034,8 +4034,8 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
<para>
The catalog <structname>pg_language</structname> registers
languages in which you can write functions or stored procedures.
See <xref linkend="sql-createlanguage">
and <xref linkend="xplang"> for more information about language handlers.
See <xref linkend="sql-createlanguage"/>
and <xref linkend="xplang"/> for more information about language handlers.
</para>
<table>
@ -4117,7 +4117,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
<entry>
This references a function that is responsible for executing
<quote>inline</quote> anonymous code blocks
(<xref linkend="sql-do"> blocks).
(<xref linkend="sql-do"/> blocks).
Zero if inline blocks are not supported.
</entry>
</row>
@ -4139,8 +4139,8 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
<entry></entry>
<entry>
Access privileges; see
<xref linkend="sql-grant"> and
<xref linkend="sql-revoke">
<xref linkend="sql-grant"/> and
<xref linkend="sql-revoke"/>
for details
</entry>
</row>
@ -4279,8 +4279,8 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
<entry></entry>
<entry>
Access privileges; see
<xref linkend="sql-grant"> and
<xref linkend="sql-revoke">
<xref linkend="sql-grant"/> and
<xref linkend="sql-revoke"/>
for details
</entry>
</row>
@ -4346,8 +4346,8 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
<entry></entry>
<entry>
Access privileges; see
<xref linkend="sql-grant"> and
<xref linkend="sql-revoke">
<xref linkend="sql-grant"/> and
<xref linkend="sql-revoke"/>
for details
</entry>
</row>
@ -4377,7 +4377,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
</para>
<para>
Operator classes are described at length in <xref linkend="xindex">.
Operator classes are described at length in <xref linkend="xindex"/>.
</para>
<table>
@ -4481,8 +4481,8 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
<para>
The catalog <structname>pg_operator</structname> stores information about operators.
See <xref linkend="sql-createoperator">
and <xref linkend="xoper"> for more information.
See <xref linkend="sql-createoperator"/>
and <xref linkend="xoper"/> for more information.
</para>
<table>
@ -4639,7 +4639,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
</para>
<para>
Operator families are described at length in <xref linkend="xindex">.
Operator families are described at length in <xref linkend="xindex"/>.
</para>
<table>
@ -5040,8 +5040,8 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
<para>
The catalog <structname>pg_proc</structname> stores information about functions (or procedures).
See <xref linkend="sql-createfunction">
and <xref linkend="xfunc"> for more information.
See <xref linkend="sql-createfunction"/>
and <xref linkend="xfunc"/> for more information.
</para>
<para>
@ -5106,7 +5106,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
<entry><type>float4</type></entry>
<entry></entry>
<entry>Estimated execution cost (in units of
<xref linkend="guc-cpu-operator-cost">); if <structfield>proretset</structfield>,
<xref linkend="guc-cpu-operator-cost"/>); if <structfield>proretset</structfield>,
this is cost per row returned</entry>
</row>
@ -5130,7 +5130,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
<entry><type>regproc</type></entry>
<entry><literal><link linkend="catalog-pg-proc"><structname>pg_proc</structname></link>.oid</literal></entry>
<entry>Calls to this function can be simplified by this other function
(see <xref linkend="xfunc-transform-functions">)</entry>
(see <xref linkend="xfunc-transform-functions"/>)</entry>
</row>
<row>
@ -5359,8 +5359,8 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
<entry></entry>
<entry>
Access privileges; see
<xref linkend="sql-grant"> and
<xref linkend="sql-revoke">
<xref linkend="sql-grant"/> and
<xref linkend="sql-revoke"/>
for details
</entry>
</row>
@ -5390,7 +5390,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
<para>
The catalog <structname>pg_publication</structname> contains all
publications created in the database. For more on publications see
<xref linkend="logical-replication-publication">.
<xref linkend="logical-replication-publication"/>.
</para>
<table>
@ -5475,7 +5475,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
<para>
The catalog <structname>pg_publication_rel</structname> contains the
mapping between relations and publications in the database. This is a
many-to-many mapping. See also <xref linkend="view-pg-publication-tables">
many-to-many mapping. See also <xref linkend="view-pg-publication-tables"/>
for a more user-friendly view of this information.
</para>
@ -5605,7 +5605,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
<para>
The <structname>pg_replication_origin</structname> catalog contains
all replication origins created. For more on replication origins
see <xref linkend="replication-origins">.
see <xref linkend="replication-origins"/>.
</para>
<table>
@ -5705,7 +5705,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
<entry><type>char</type></entry>
<entry></entry>
<entry>
Controls in which <xref linkend="guc-session-replication-role"> modes
Controls in which <xref linkend="guc-session-replication-role"/> modes
the rule fires.
<literal>O</literal> = rule fires in <quote>origin</quote> and <quote>local</quote> modes,
<literal>D</literal> = rule is disabled,
@ -5765,8 +5765,8 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
<para>
The catalog <structname>pg_seclabel</structname> stores security
labels on database objects. Security labels can be manipulated
with the <xref linkend="sql-security-label"> command. For an easier
way to view security labels, see <xref linkend="view-pg-seclabels">.
with the <xref linkend="sql-security-label"/> command. For an easier
way to view security labels, see <xref linkend="view-pg-seclabels"/>.
</para>
<para>
@ -6093,7 +6093,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
<para>
The catalog <structname>pg_shdescription</structname> stores optional
descriptions (comments) for shared database objects. Descriptions can be
manipulated with the <xref linkend="sql-comment"> command and viewed with
manipulated with the <xref linkend="sql-comment"/> command and viewed with
<application>psql</application>'s <literal>\d</literal> commands.
</para>
@ -6160,8 +6160,8 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
<para>
The catalog <structname>pg_shseclabel</structname> stores security
labels on shared database objects. Security labels can be manipulated
with the <xref linkend="sql-security-label"> command. For an easier
way to view security labels, see <xref linkend="view-pg-seclabels">.
with the <xref linkend="sql-security-label"/> command. For an easier
way to view security labels, see <xref linkend="view-pg-seclabels"/>.
</para>
<para>
@ -6228,7 +6228,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
<para>
The catalog <structname>pg_statistic</structname> stores
statistical data about the contents of the database. Entries are
created by <xref linkend="sql-analyze">
created by <xref linkend="sql-analyze"/>
and subsequently used by the query planner. Note that all the
statistical data is inherently approximate, even assuming that it
is up-to-date.
@ -6408,7 +6408,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
The catalog <structname>pg_statistic_ext</structname>
holds extended planner statistics.
Each row in this catalog corresponds to a <firstterm>statistics object</firstterm>
created with <xref linkend="sql-createstatistics">.
created with <xref linkend="sql-createstatistics"/>.
</para>
<table>
@ -6521,7 +6521,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
<para>
The catalog <structname>pg_subscription</structname> contains all existing
logical replication subscriptions. For more information about logical
replication see <xref linkend="logical-replication">.
replication see <xref linkend="logical-replication"/>.
</para>
<para>
@ -6616,7 +6616,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
<entry></entry>
<entry>Array of subscribed publication names. These reference the
publications on the publisher server. For more on publications
see <xref linkend="logical-replication-publication">.
see <xref linkend="logical-replication-publication"/>.
</entry>
</row>
</tbody>
@ -6758,8 +6758,8 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
<entry></entry>
<entry>
Access privileges; see
<xref linkend="sql-grant"> and
<xref linkend="sql-revoke">
<xref linkend="sql-grant"/> and
<xref linkend="sql-revoke"/>
for details
</entry>
</row>
@ -6788,7 +6788,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
<para>
The catalog <structname>pg_transform</structname> stores information about
transforms, which are a mechanism to adapt data types to procedural
languages. See <xref linkend="sql-createtransform"> for more information.
languages. See <xref linkend="sql-createtransform"/> for more information.
</para>
<table>
@ -6856,7 +6856,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
<para>
The catalog <structname>pg_trigger</structname> stores triggers on tables
and views.
See <xref linkend="sql-createtrigger">
See <xref linkend="sql-createtrigger"/>
for more information.
</para>
@ -6914,7 +6914,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
<entry><type>char</type></entry>
<entry></entry>
<entry>
Controls in which <xref linkend="guc-session-replication-role"> modes
Controls in which <xref linkend="guc-session-replication-role"/> modes
the trigger fires.
<literal>O</literal> = trigger fires in <quote>origin</quote> and <quote>local</quote> modes,
<literal>D</literal> = trigger is disabled,
@ -7066,7 +7066,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
<para>
<productname>PostgreSQL</productname>'s text search features are
described at length in <xref linkend="textsearch">.
described at length in <xref linkend="textsearch"/>.
</para>
<table>
@ -7141,7 +7141,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
<para>
<productname>PostgreSQL</productname>'s text search features are
described at length in <xref linkend="textsearch">.
described at length in <xref linkend="textsearch"/>.
</para>
<table>
@ -7212,7 +7212,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
<para>
<productname>PostgreSQL</productname>'s text search features are
described at length in <xref linkend="textsearch">.
described at length in <xref linkend="textsearch"/>.
</para>
<table>
@ -7295,7 +7295,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
<para>
<productname>PostgreSQL</productname>'s text search features are
described at length in <xref linkend="textsearch">.
described at length in <xref linkend="textsearch"/>.
</para>
<table>
@ -7392,7 +7392,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
<para>
<productname>PostgreSQL</productname>'s text search features are
described at length in <xref linkend="textsearch">.
described at length in <xref linkend="textsearch"/>.
</para>
<table>
@ -7461,9 +7461,9 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
<para>
The catalog <structname>pg_type</structname> stores information about data
types. Base types and enum types (scalar types) are created with
<xref linkend="sql-createtype">, and
<xref linkend="sql-createtype"/>, and
domains with
<xref linkend="sql-createdomain">.
<xref linkend="sql-createdomain"/>.
A composite type is automatically created for each table in the database, to
represent the row structure of the table. It is also possible to create
composite types with <command>CREATE TYPE AS</command>.
@ -7567,7 +7567,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
<structfield>typcategory</structfield> is an arbitrary classification
of data types that is used by the parser to determine which implicit
casts should be <quote>preferred</quote>.
See <xref linkend="catalog-typcategory-table">.
See <xref linkend="catalog-typcategory-table"/>.
</entry>
</row>
@ -7871,8 +7871,8 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
<entry></entry>
<entry>
Access privileges; see
<xref linkend="sql-grant"> and
<xref linkend="sql-revoke">
<xref linkend="sql-grant"/> and
<xref linkend="sql-revoke"/>
for details
</entry>
</row>
@ -7881,7 +7881,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
</table>
<para>
<xref linkend="catalog-typcategory-table"> lists the system-defined values
<xref linkend="catalog-typcategory-table"/> lists the system-defined values
of <structfield>typcategory</structfield>. Any future additions to this list will
also be upper-case ASCII letters. All other ASCII characters are reserved
for user-defined categories.
@ -8043,7 +8043,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
</para>
<para>
The information schema (<xref linkend="information-schema">) provides
The information schema (<xref linkend="information-schema"/>) provides
an alternative set of views which overlap the functionality of the system
views. Since the information schema is SQL-standard whereas the views
described here are <productname>PostgreSQL</productname>-specific,
@ -8052,11 +8052,11 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
</para>
<para>
<xref linkend="view-table"> lists the system views described here.
<xref linkend="view-table"/> lists the system views described here.
More detailed documentation of each view follows below.
There are some additional views that provide access to the results of
the statistics collector; they are described in <xref
linkend="monitoring-stats-views-table">.
linkend="monitoring-stats-views-table"/>.
</para>
<para>
@ -8389,7 +8389,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
be used by software packages that want to interface to
<productname>PostgreSQL</productname> to facilitate finding the required header
files and libraries. It provides the same basic information as the
<xref linkend="app-pgconfig"> <productname>PostgreSQL</productname> client
<xref linkend="app-pgconfig"/> <productname>PostgreSQL</productname> client
application.
</para>
@ -8440,7 +8440,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
<itemizedlist>
<listitem>
<para>
via the <xref linkend="sql-declare">
via the <xref linkend="sql-declare"/>
statement in SQL
</para>
</listitem>
@ -8448,14 +8448,14 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
<listitem>
<para>
via the Bind message in the frontend/backend protocol, as
described in <xref linkend="protocol-flow-ext-query">
described in <xref linkend="protocol-flow-ext-query"/>
</para>
</listitem>
<listitem>
<para>
via the Server Programming Interface (SPI), as described in
<xref linkend="spi-interface">
<xref linkend="spi-interface"/>
</para>
</listitem>
</itemizedlist>
@ -8648,7 +8648,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
</para>
<para>
See <xref linkend="config-setting"> for more information about the various
See <xref linkend="config-setting"/> for more information about the various
ways to change run-time parameters.
</para>
@ -8813,7 +8813,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
</para>
<para>
See <xref linkend="client-authentication"> for more information about
See <xref linkend="client-authentication"/> for more information about
client authentication configuration.
</para>
</sect1>
@ -8890,7 +8890,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
<para>
The view <structname>pg_locks</structname> provides access to
information about the locks held by active processes within the
database server. See <xref linkend="mvcc"> for more discussion
database server. See <xref linkend="mvcc"/> for more discussion
of locking.
</para>
@ -9053,7 +9053,7 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
<entry><type>text</type></entry>
<entry></entry>
<entry>Name of the lock mode held or desired by this process (see <xref
linkend="locking-tables"> and <xref linkend="xact-serializable">)</entry>
linkend="locking-tables"/> and <xref linkend="xact-serializable"/>)</entry>
</row>
<row>
<entry><structfield>granted</structfield></entry>
@ -9164,7 +9164,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
queues, nor information about which processes are parallel workers running
on behalf of which other client sessions. It is better to use
the <function>pg_blocking_pids()</function> function
(see <xref linkend="functions-info-session-table">) to identify which
(see <xref linkend="functions-info-session-table"/>) to identify which
process(es) a waiting process is blocked behind.
</para>
@ -9369,7 +9369,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
<para>
The <structname>pg_prepared_statements</structname> view displays
all the prepared statements that are available in the current
session. See <xref linkend="sql-prepare"> for more information about prepared
session. See <xref linkend="sql-prepare"/> for more information about prepared
statements.
</para>
@ -9377,7 +9377,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
<structname>pg_prepared_statements</structname> contains one row
for each prepared statement. Rows are added to the view when a new
prepared statement is created and removed when a prepared statement
is released (for example, via the <xref linkend="sql-deallocate"> command).
is released (for example, via the <xref linkend="sql-deallocate"/> command).
</para>
<table>
@ -9457,7 +9457,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
<para>
The view <structname>pg_prepared_xacts</structname> displays
information about transactions that are currently prepared for two-phase
commit (see <xref linkend="sql-prepare-transaction"> for details).
commit (see <xref linkend="sql-prepare-transaction"/> for details).
</para>
<para>
@ -9601,7 +9601,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
The <structname>pg_replication_origin_status</structname> view
contains information about how far replay for a certain origin has
progressed. For more on replication origins
see <xref linkend="replication-origins">.
see <xref linkend="replication-origins"/>.
</para>
<table>
@ -9670,7 +9670,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
<para>
For more on replication slots,
see <xref linkend="streaming-replication-slots"> and <xref linkend="logicaldecoding">.
see <xref linkend="streaming-replication-slots"/> and <xref linkend="logicaldecoding"/>.
</para>
<table>
@ -9917,7 +9917,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
<entry></entry>
<entry>
Role bypasses every row level security policy, see
<xref linkend="ddl-rowsecurity"> for more information.
<xref linkend="ddl-rowsecurity"/> for more information.
</entry>
</row>
@ -10203,8 +10203,8 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
<para>
The view <structname>pg_settings</structname> provides access to
run-time parameters of the server. It is essentially an alternative
interface to the <xref linkend="sql-show">
and <xref linkend="sql-set"> commands.
interface to the <xref linkend="sql-show"/>
and <xref linkend="sql-set"/> commands.
It also provides access to some facts about each parameter that are
not directly available from <command>SHOW</command>, such as minimum and
maximum values.
@ -10441,7 +10441,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
</variablelist>
<para>
See <xref linkend="config-setting"> for more information about the various
See <xref linkend="config-setting"/> for more information about the various
ways to change these parameters.
</para>
@ -10449,7 +10449,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
The <structname>pg_settings</structname> view cannot be inserted into or
deleted from, but it can be updated. An <command>UPDATE</command> applied
to a row of <structname>pg_settings</structname> is equivalent to executing
the <xref linkend="sql-set"> command on that named
the <xref linkend="sql-set"/> command on that named
parameter. The change only affects the value used by the current
session. If an <command>UPDATE</command> is issued within a transaction
that is later aborted, the effects of the <command>UPDATE</command> command
@ -10543,7 +10543,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
<entry></entry>
<entry>
User bypasses every row level security policy, see
<xref linkend="ddl-rowsecurity"> for more information.
<xref linkend="ddl-rowsecurity"/> for more information.
</entry>
</row>
@ -10763,7 +10763,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
The maximum number of entries in the array fields can be controlled on a
column-by-column basis using the <command>ALTER TABLE SET STATISTICS</command>
command, or globally by setting the
<xref linkend="guc-default-statistics-target"> run-time parameter.
<xref linkend="guc-default-statistics-target"/> run-time parameter.
</para>
</sect1>
@ -10858,7 +10858,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
The view <structname>pg_timezone_abbrevs</structname> provides a list
of time zone abbreviations that are currently recognized by the datetime
input routines. The contents of this view change when the
<xref linkend="guc-timezone-abbreviations"> run-time parameter is modified.
<xref linkend="guc-timezone-abbreviations"/> run-time parameter is modified.
</para>
<table>
@ -10895,7 +10895,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
<para>
While most timezone abbreviations represent fixed offsets from UTC,
there are some that have historically varied in value
(see <xref linkend="datetime-config-files"> for more information).
(see <xref linkend="datetime-config-files"/> for more information).
In such cases this view presents their current meaning.
</para>
@ -11025,7 +11025,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
<entry><type>bool</type></entry>
<entry>
User bypasses every row level security policy, see
<xref linkend="ddl-rowsecurity"> for more information.
<xref linkend="ddl-rowsecurity"/> for more information.
</entry>
</row>

View File

@ -15,8 +15,8 @@
Using the locale features of the operating system to provide
locale-specific collation order, number formatting, translated
messages, and other aspects.
This is covered in <xref linkend="locale"> and
<xref linkend="collation">.
This is covered in <xref linkend="locale"/> and
<xref linkend="collation"/>.
</para>
</listitem>
@ -25,7 +25,7 @@
Providing a number of different character sets to support storing text
in all kinds of languages, and providing character set translation
between client and server.
This is covered in <xref linkend="multibyte">.
This is covered in <xref linkend="multibyte"/>.
</para>
</listitem>
</itemizedlist>
@ -146,7 +146,7 @@ initdb --locale=sv_SE
the sort order of indexes, so they must be kept fixed, or indexes on
text columns would become corrupt.
(But you can alleviate this restriction using collations, as discussed
in <xref linkend="collation">.)
in <xref linkend="collation"/>.)
The default values for these
categories are determined when <command>initdb</command> is run, and
those values are used when new databases are created, unless
@ -157,7 +157,7 @@ initdb --locale=sv_SE
The other locale categories can be changed whenever desired
by setting the server configuration parameters
that have the same name as the locale categories (see <xref
linkend="runtime-config-client-format"> for details). The values
linkend="runtime-config-client-format"/> for details). The values
that are chosen by <command>initdb</command> are actually only written
into the configuration file <filename>postgresql.conf</filename> to
serve as defaults when the server is started. If you remove these
@ -267,10 +267,10 @@ initdb --locale=sv_SE
with <literal>LIKE</literal> clauses under a non-C locale, several custom
operator classes exist. These allow the creation of an index that
performs a strict character-by-character comparison, ignoring
locale comparison rules. Refer to <xref linkend="indexes-opclass">
locale comparison rules. Refer to <xref linkend="indexes-opclass"/>
for more information. Another approach is to create indexes using
the <literal>C</literal> collation, as discussed in
<xref linkend="collation">.
<xref linkend="collation"/>.
</para>
</sect2>
@ -316,7 +316,7 @@ initdb --locale=sv_SE
<productname>PostgreSQL</productname> speak their preferred language well.
If messages in your language are currently not available or not fully
translated, your assistance would be appreciated. If you want to
help, refer to <xref linkend="nls"> or write to the developers'
help, refer to <xref linkend="nls"/> or write to the developers'
mailing list.
</para>
</sect2>
@ -524,7 +524,7 @@ SELECT * FROM test1 ORDER BY a || b COLLATE "fr_FR";
these under one concept than to create another infrastructure for
setting <symbol>LC_CTYPE</symbol> per expression.) Also,
a <literal>libc</literal> collation
is tied to a character set encoding (see <xref linkend="multibyte">).
is tied to a character set encoding (see <xref linkend="multibyte"/>).
The same collation name may exist for different encodings.
</para>
@ -605,7 +605,7 @@ SELECT * FROM test1 ORDER BY a || b COLLATE "fr_FR";
for <symbol>LC_COLLATE</symbol> and <symbol>LC_CTYPE</symbol>, or if new
locales are installed in the operating system after the database system
was initialized, then a new collation may be created using
the <xref linkend="sql-createcollation"> command.
the <xref linkend="sql-createcollation"/> command.
New operating system locales can also be imported en masse using
the <link linkend="functions-admin-collation"><function>pg_import_system_collations()</function></link> function.
</para>
@ -702,7 +702,7 @@ SELECT a COLLATE "C" &lt; b COLLATE "POSIX" FROM test1;
<para>
If the standard and predefined collations are not sufficient, users can
create their own collation objects using the SQL
command <xref linkend="sql-createcollation">.
command <xref linkend="sql-createcollation"/>.
</para>
<para>
@ -730,7 +730,7 @@ CREATE COLLATION german (provider = libc, locale = 'de_DE');
defined in the operating system when the database instance is
initialized, it is not often necessary to manually create new ones.
Reasons might be if a different naming system is desired (in which case
see also <xref linkend="collation-copy">) or if the operating system has
see also <xref linkend="collation-copy"/>) or if the operating system has
been upgraded to provide new locale definitions (in which case see
also <link linkend="functions-admin-collation"><function>pg_import_system_collations()</function></link>).
</para>
@ -871,7 +871,7 @@ CREATE COLLATION german (provider = libc, locale = 'de_DE');
<title>Copying Collations</title>
<para>
The command <xref linkend="sql-createcollation"> can also be used to
The command <xref linkend="sql-createcollation"/> can also be used to
create a new collation from an existing collation, which can be useful to
be able to use operating-system-independent collation names in
applications, create compatibility names, or use an ICU-provided collation
@ -924,7 +924,7 @@ CREATE COLLATION french FROM "fr-x-icu";
<title>Supported Character Sets</title>
<para>
<xref linkend="charset-table"> shows the character sets available
<xref linkend="charset-table"/> shows the character sets available
for use in <productname>PostgreSQL</productname>.
</para>
@ -1392,7 +1392,7 @@ CREATE DATABASE korean WITH ENCODING 'EUC_KR' LC_COLLATE='ko_KR.euckr' LC_CTYPE=
database. When copying any other database, the encoding and locale
settings cannot be changed from those of the source database, because
that might result in corrupt data. For more information see
<xref linkend="manage-ag-templatedbs">.
<xref linkend="manage-ag-templatedbs"/>.
</para>
<para>
@ -1449,7 +1449,7 @@ $ <userinput>psql -l</userinput>
character set combinations. The conversion information is stored in the
<literal>pg_conversion</literal> system catalog. <productname>PostgreSQL</productname>
comes with some predefined conversions, as shown in <xref
linkend="multibyte-translation-table">. You can create a new
linkend="multibyte-translation-table"/>. You can create a new
conversion using the SQL command <command>CREATE CONVERSION</command>.
</para>
@ -1763,7 +1763,7 @@ $ <userinput>psql -l</userinput>
<listitem>
<para>
<application>libpq</application> (<xref linkend="libpq-control">) has functions to control the client encoding.
<application>libpq</application> (<xref linkend="libpq-control"/>) has functions to control the client encoding.
</para>
</listitem>
@ -1812,7 +1812,7 @@ RESET client_encoding;
<listitem>
<para>
Using the configuration variable <xref
linkend="guc-client-encoding">. If the
linkend="guc-client-encoding"/>. If the
<varname>client_encoding</varname> variable is set, that client
encoding is automatically selected when a connection to the
server is made. (This can subsequently be overridden using any

View File

@ -13,13 +13,13 @@
wants to connect as, much the same way one logs into a Unix computer
as a particular user. Within the SQL environment the active database
user name determines access privileges to database objects &mdash; see
<xref linkend="user-manag"> for more information. Therefore, it is
<xref linkend="user-manag"/> for more information. Therefore, it is
essential to restrict which database users can connect.
</para>
<note>
<para>
As explained in <xref linkend="user-manag">,
As explained in <xref linkend="user-manag"/>,
<productname>PostgreSQL</productname> actually does privilege
management in terms of <quote>roles</quote>. In this chapter, we
consistently use <firstterm>database user</firstterm> to mean <quote>role with the
@ -70,7 +70,7 @@
<filename>pg_hba.conf</filename> file is installed when the data
directory is initialized by <command>initdb</command>. It is
possible to place the authentication configuration file elsewhere,
however; see the <xref linkend="guc-hba-file"> configuration parameter.
however; see the <xref linkend="guc-hba-file"/> configuration parameter.
</para>
<para>
@ -136,7 +136,7 @@ hostnossl <replaceable>database</replaceable> <replaceable>user</replaceable>
<para>
Remote TCP/IP connections will not be possible unless
the server is started with an appropriate value for the
<xref linkend="guc-listen-addresses"> configuration parameter,
<xref linkend="guc-listen-addresses"/> configuration parameter,
since the default behavior is to listen for TCP/IP connections
only on the local loopback address <literal>localhost</literal>.
</para>
@ -157,8 +157,8 @@ hostnossl <replaceable>database</replaceable> <replaceable>user</replaceable>
To make use of this option the server must be built with
<acronym>SSL</acronym> support. Furthermore,
<acronym>SSL</acronym> must be enabled
by setting the <xref linkend="guc-ssl"> configuration parameter (see
<xref linkend="ssl-tcp"> for more information).
by setting the <xref linkend="guc-ssl"/> configuration parameter (see
<xref linkend="ssl-tcp"/> for more information).
Otherwise, the <literal>hostssl</literal> record is ignored except for
logging a warning that it cannot match any connections.
</para>
@ -381,7 +381,7 @@ hostnossl <replaceable>database</replaceable> <replaceable>user</replaceable>
<para>
Specifies the authentication method to use when a connection matches
this record. The possible choices are summarized here; details
are in <xref linkend="auth-methods">.
are in <xref linkend="auth-methods"/>.
<variablelist>
<varlistentry>
@ -393,7 +393,7 @@ hostnossl <replaceable>database</replaceable> <replaceable>user</replaceable>
<productname>PostgreSQL</productname> database server to login as
any <productname>PostgreSQL</productname> user they wish,
without the need for a password or any other authentication. See <xref
linkend="auth-trust"> for details.
linkend="auth-trust"/> for details.
</para>
</listitem>
</varlistentry>
@ -416,7 +416,7 @@ hostnossl <replaceable>database</replaceable> <replaceable>user</replaceable>
<listitem>
<para>
Perform SCRAM-SHA-256 authentication to verify the user's
password. See <xref linkend="auth-password"> for details.
password. See <xref linkend="auth-password"/> for details.
</para>
</listitem>
</varlistentry>
@ -426,7 +426,7 @@ hostnossl <replaceable>database</replaceable> <replaceable>user</replaceable>
<listitem>
<para>
Perform SCRAM-SHA-256 or MD5 authentication to verify the
user's password. See <xref linkend="auth-password">
user's password. See <xref linkend="auth-password"/>
for details.
</para>
</listitem>
@ -440,7 +440,7 @@ hostnossl <replaceable>database</replaceable> <replaceable>user</replaceable>
authentication.
Since the password is sent in clear text over the
network, this should not be used on untrusted networks.
See <xref linkend="auth-password"> for details.
See <xref linkend="auth-password"/> for details.
</para>
</listitem>
</varlistentry>
@ -451,7 +451,7 @@ hostnossl <replaceable>database</replaceable> <replaceable>user</replaceable>
<para>
Use GSSAPI to authenticate the user. This is only
available for TCP/IP connections. See <xref
linkend="gssapi-auth"> for details.
linkend="gssapi-auth"/> for details.
</para>
</listitem>
</varlistentry>
@ -462,7 +462,7 @@ hostnossl <replaceable>database</replaceable> <replaceable>user</replaceable>
<para>
Use SSPI to authenticate the user. This is only
available on Windows. See <xref
linkend="sspi-auth"> for details.
linkend="sspi-auth"/> for details.
</para>
</listitem>
</varlistentry>
@ -477,7 +477,7 @@ hostnossl <replaceable>database</replaceable> <replaceable>user</replaceable>
Ident authentication can only be used on TCP/IP
connections. When specified for local connections, peer
authentication will be used instead.
See <xref linkend="auth-ident"> for details.
See <xref linkend="auth-ident"/> for details.
</para>
</listitem>
</varlistentry>
@ -489,7 +489,7 @@ hostnossl <replaceable>database</replaceable> <replaceable>user</replaceable>
Obtain the client's operating system user name from the operating
system and check if it matches the requested database user name.
This is only available for local connections.
See <xref linkend="auth-peer"> for details.
See <xref linkend="auth-peer"/> for details.
</para>
</listitem>
</varlistentry>
@ -499,7 +499,7 @@ hostnossl <replaceable>database</replaceable> <replaceable>user</replaceable>
<listitem>
<para>
Authenticate using an <acronym>LDAP</acronym> server. See <xref
linkend="auth-ldap"> for details.
linkend="auth-ldap"/> for details.
</para>
</listitem>
</varlistentry>
@ -509,7 +509,7 @@ hostnossl <replaceable>database</replaceable> <replaceable>user</replaceable>
<listitem>
<para>
Authenticate using a RADIUS server. See <xref
linkend="auth-radius"> for details.
linkend="auth-radius"/> for details.
</para>
</listitem>
</varlistentry>
@ -519,7 +519,7 @@ hostnossl <replaceable>database</replaceable> <replaceable>user</replaceable>
<listitem>
<para>
Authenticate using SSL client certificates. See
<xref linkend="auth-cert"> for details.
<xref linkend="auth-cert"/> for details.
</para>
</listitem>
</varlistentry>
@ -530,7 +530,7 @@ hostnossl <replaceable>database</replaceable> <replaceable>user</replaceable>
<para>
Authenticate using the Pluggable Authentication Modules
(PAM) service provided by the operating system. See <xref
linkend="auth-pam"> for details.
linkend="auth-pam"/> for details.
</para>
</listitem>
</varlistentry>
@ -540,7 +540,7 @@ hostnossl <replaceable>database</replaceable> <replaceable>user</replaceable>
<listitem>
<para>
Authenticate using the BSD Authentication service provided by the
operating system. See <xref linkend="auth-bsd"> for details.
operating system. See <xref linkend="auth-bsd"/> for details.
</para>
</listitem>
</varlistentry>
@ -638,7 +638,7 @@ hostnossl <replaceable>database</replaceable> <replaceable>user</replaceable>
<para>
Some examples of <filename>pg_hba.conf</filename> entries are shown in
<xref linkend="example-pg-hba.conf">. See the next section for details on the
<xref linkend="example-pg-hba.conf"/>. See the next section for details on the
different authentication methods.
</para>
@ -763,7 +763,7 @@ local db1,db2,@demodbs all md5
<filename>pg_ident.conf</filename><indexterm><primary>pg_ident.conf</primary></indexterm>
and is stored in the
cluster's data directory. (It is possible to place the map file
elsewhere, however; see the <xref linkend="guc-ident-file">
elsewhere, however; see the <xref linkend="guc-ident-file"/>
configuration parameter.)
The ident map file contains lines of the general form:
<synopsis>
@ -790,7 +790,7 @@ local db1,db2,@demodbs all md5
<para>
If the <replaceable>system-username</replaceable> field starts with a slash (<literal>/</literal>),
the remainder of the field is treated as a regular expression.
(See <xref linkend="posix-syntax-details"> for details of
(See <xref linkend="posix-syntax-details"/> for details of
<productname>PostgreSQL</productname>'s regular expression syntax.) The regular
expression can include a single capture, or parenthesized subexpression,
which can then be referenced in the <replaceable>database-username</replaceable>
@ -828,8 +828,8 @@ mymap /^(.*)@otherdomain\.com$ guest
<para>
A <filename>pg_ident.conf</filename> file that could be used in
conjunction with the <filename>pg_hba.conf</filename> file in <xref
linkend="example-pg-hba.conf"> is shown in <xref
linkend="example-pg-ident.conf">. In this example, anyone
linkend="example-pg-hba.conf"/> is shown in <xref
linkend="example-pg-ident.conf"/>. In this example, anyone
logged in to a machine on the 192.168 network that does not have the
operating system user name <literal>bryanh</literal>, <literal>ann</literal>, or
<literal>robert</literal> would not be granted access. Unix user
@ -885,7 +885,7 @@ omicron bryanh guest1
Unix-domain socket file using file-system permissions. To do this, set the
<varname>unix_socket_permissions</varname> (and possibly
<varname>unix_socket_group</varname>) configuration parameters as
described in <xref linkend="runtime-config-connection">. Or you
described in <xref linkend="runtime-config-connection"/>. Or you
could set the <varname>unix_socket_directories</varname>
configuration parameter to place the socket file in a suitably
restricted directory.
@ -965,7 +965,7 @@ omicron bryanh guest1
<para>
The <literal>md5</literal> method cannot be used with
the <xref linkend="guc-db-user-namespace"> feature.
the <xref linkend="guc-db-user-namespace"/> feature.
</para>
<para>
@ -998,8 +998,8 @@ omicron bryanh guest1
separate from operating system user passwords. The password for
each database user is stored in the <literal>pg_authid</literal> system
catalog. Passwords can be managed with the SQL commands
<xref linkend="sql-createrole"> and
<xref linkend="sql-alterrole">,
<xref linkend="sql-createrole"/> and
<xref linkend="sql-alterrole"/>,
e.g., <userinput>CREATE ROLE foo WITH LOGIN PASSWORD 'secret'</userinput>,
or the <application>psql</application>
command <literal>\password</literal>.
@ -1011,7 +1011,7 @@ omicron bryanh guest1
The availability of the different password-based authentication methods
depends on how a user's password on the server is encrypted (or hashed,
more accurately). This is controlled by the configuration
parameter <xref linkend="guc-password-encryption"> at the time the
parameter <xref linkend="guc-password-encryption"/> at the time the
password is set. If a password was encrypted using
the <literal>scram-sha-256</literal> setting, then it can be used for the
authentication methods <literal>scram-sha-256</literal>
@ -1061,7 +1061,7 @@ omicron bryanh guest1
<para>
GSSAPI support has to be enabled when <productname>PostgreSQL</productname> is built;
see <xref linkend="installation"> for more information.
see <xref linkend="installation"/> for more information.
</para>
<para>
@ -1072,7 +1072,7 @@ omicron bryanh guest1
The PostgreSQL server will accept any principal that is included in the keytab used by
the server, but care needs to be taken to specify the correct principal details when
making the connection from the client using the <literal>krbsrvname</literal> connection parameter. (See
also <xref linkend="libpq-paramkeywords">.) The installation default can be
also <xref linkend="libpq-paramkeywords"/>.) The installation default can be
changed from the default <literal>postgres</literal> at build time using
<literal>./configure --with-krb-srvnam=</literal><replaceable>whatever</replaceable>.
In most environments,
@ -1112,9 +1112,9 @@ omicron bryanh guest1
<para>
Make sure that your server keytab file is readable (and preferably
only readable, not writable) by the <productname>PostgreSQL</productname>
server account. (See also <xref linkend="postgres-user">.) The location
server account. (See also <xref linkend="postgres-user"/>.) The location
of the key file is specified by the <xref
linkend="guc-krb-server-keyfile"> configuration
linkend="guc-krb-server-keyfile"/> configuration
parameter. The default is
<filename>/usr/local/pgsql/etc/krb5.keytab</filename> (or whatever
directory was specified as <varname>sysconfdir</varname> at build time).
@ -1138,7 +1138,7 @@ omicron bryanh guest1
database user name <literal>fred</literal>, principal
<literal>fred@EXAMPLE.COM</literal> would be able to connect. To also allow
principal <literal>fred/users.example.com@EXAMPLE.COM</literal>, use a user name
map, as described in <xref linkend="auth-username-maps">.
map, as described in <xref linkend="auth-username-maps"/>.
</para>
<para>
@ -1150,7 +1150,7 @@ omicron bryanh guest1
<para>
If set to 0, the realm name from the authenticated user principal is
stripped off before being passed through the user name mapping
(<xref linkend="auth-username-maps">). This is discouraged and is
(<xref linkend="auth-username-maps"/>). This is discouraged and is
primarily available for backwards compatibility, as it is not secure
in multi-realm environments unless <literal>krb_realm</literal> is
also used. It is recommended to
@ -1166,7 +1166,7 @@ omicron bryanh guest1
<listitem>
<para>
Allows for mapping between system and database user names. See
<xref linkend="auth-username-maps"> for details. For a GSSAPI/Kerberos
<xref linkend="auth-username-maps"/> for details. For a GSSAPI/Kerberos
principal, such as <literal>username@EXAMPLE.COM</literal> (or, less
commonly, <literal>username/hostbased@EXAMPLE.COM</literal>), the
user name used for mapping is
@ -1217,7 +1217,7 @@ omicron bryanh guest1
<para>
When using <productname>Kerberos</productname> authentication,
<productname>SSPI</productname> works the same way
<productname>GSSAPI</productname> does; see <xref linkend="gssapi-auth">
<productname>GSSAPI</productname> does; see <xref linkend="gssapi-auth"/>
for details.
</para>
@ -1231,7 +1231,7 @@ omicron bryanh guest1
<para>
If set to 0, the realm name from the authenticated user principal is
stripped off before being passed through the user name mapping
(<xref linkend="auth-username-maps">). This is discouraged and is
(<xref linkend="auth-username-maps"/>). This is discouraged and is
primarily available for backwards compatibility, as it is not secure
in multi-realm environments unless <literal>krb_realm</literal> is
also used. It is recommended to
@ -1284,7 +1284,7 @@ omicron bryanh guest1
<listitem>
<para>
Allows for mapping between system and database user names. See
<xref linkend="auth-username-maps"> for details. For a SSPI/Kerberos
<xref linkend="auth-username-maps"/> for details. For a SSPI/Kerberos
principal, such as <literal>username@EXAMPLE.COM</literal> (or, less
commonly, <literal>username/hostbased@EXAMPLE.COM</literal>), the
user name used for mapping is
@ -1329,7 +1329,7 @@ omicron bryanh guest1
<note>
<para>
When ident is specified for a local (non-TCP/IP) connection,
peer authentication (see <xref linkend="auth-peer">) will be
peer authentication (see <xref linkend="auth-peer"/>) will be
used instead.
</para>
</note>
@ -1342,7 +1342,7 @@ omicron bryanh guest1
<listitem>
<para>
Allows for mapping between system and database user names. See
<xref linkend="auth-username-maps"> for details.
<xref linkend="auth-username-maps"/> for details.
</para>
</listitem>
</varlistentry>
@ -1415,7 +1415,7 @@ omicron bryanh guest1
<listitem>
<para>
Allows for mapping between system and database user names. See
<xref linkend="auth-username-maps"> for details.
<xref linkend="auth-username-maps"/> for details.
</para>
</listitem>
</varlistentry>
@ -1828,7 +1828,7 @@ host ... ldap ldapserver=ldap.example.net ldapbasedn="dc=example, dc=net" ldapse
<listitem>
<para>
Allows for mapping between system and database user names. See
<xref linkend="auth-username-maps"> for details.
<xref linkend="auth-username-maps"/> for details.
</para>
</listitem>
</varlistentry>

File diff suppressed because it is too large Load Diff

View File

@ -16,14 +16,14 @@
<para>
This appendix covers extensions and other server plug-in modules found in
<literal>contrib</literal>. <xref linkend="contrib-prog"> covers utility
<literal>contrib</literal>. <xref linkend="contrib-prog"/> covers utility
programs.
</para>
<para>
When building from the source distribution, these components are not built
automatically, unless you build the "world" target
(see <xref linkend="build">).
(see <xref linkend="build"/>).
You can build and install all of them by running:
<screen>
<userinput>make</userinput>
@ -55,7 +55,7 @@
To make use of one of these modules, after you have installed the code
you need to register the new SQL objects in the database system.
In <productname>PostgreSQL</productname> 9.1 and later, this is done by executing
a <xref linkend="sql-createextension"> command. In a fresh database,
a <xref linkend="sql-createextension"/> command. In a fresh database,
you can simply do
<programlisting>
@ -89,16 +89,16 @@ CREATE EXTENSION <replaceable>module_name</replaceable> FROM unpackaged;
This will update the pre-9.1 objects of the module into a proper
<firstterm>extension</firstterm> object. Future updates to the module will be
managed by <xref linkend="sql-alterextension">.
managed by <xref linkend="sql-alterextension"/>.
For more information about extension updates, see
<xref linkend="extend-extensions">.
<xref linkend="extend-extensions"/>.
</para>
<para>
Note, however, that some of these modules are not <quote>extensions</quote>
in this sense, but are loaded into the server in some other way, for instance
by way of
<xref linkend="guc-shared-preload-libraries">. See the documentation of each
<xref linkend="guc-shared-preload-libraries"/>. See the documentation of each
module for details.
</para>
@ -163,7 +163,7 @@ pages.
<para>
This appendix and the previous one contain information regarding the modules that
can be found in the <literal>contrib</literal> directory of the
<productname>PostgreSQL</productname> distribution. See <xref linkend="contrib"> for
<productname>PostgreSQL</productname> distribution. See <xref linkend="contrib"/> for
more information about the <literal>contrib</literal> section in general and
server extensions and plug-ins found in <literal>contrib</literal>
specifically.
@ -184,7 +184,7 @@ pages.
This section covers <productname>PostgreSQL</productname> client
applications in <literal>contrib</literal>. They can be run from anywhere,
independent of where the database server resides. See
also <xref linkend="reference-client"> for information about client
also <xref linkend="reference-client"/> for information about client
applications that part of the core <productname>PostgreSQL</productname>
distribution.
</para>
@ -200,7 +200,7 @@ pages.
This section covers <productname>PostgreSQL</productname> server-related
applications in <literal>contrib</literal>. They are typically run on the
host where the database server resides. See also <xref
linkend="reference-server"> for information about server applications that
linkend="reference-server"/> for information about server applications that
part of the core <productname>PostgreSQL</productname> distribution.
</para>

View File

@ -16,7 +16,7 @@
<title>Syntax</title>
<para>
<xref linkend="cube-repr-table"> shows the valid external
<xref linkend="cube-repr-table"/> shows the valid external
representations for the <type>cube</type>
type. <replaceable>x</replaceable>, <replaceable>y</replaceable>, etc. denote
floating-point numbers.
@ -106,7 +106,7 @@
<title>Usage</title>
<para>
<xref linkend="cube-operators-table"> shows the operators provided for
<xref linkend="cube-operators-table"/> shows the operators provided for
type <type>cube</type>.
</para>
@ -268,7 +268,7 @@ SELECT c FROM test ORDER BY c ~&gt; 3 DESC LIMIT 5;
</para>
<para>
<xref linkend="cube-functions-table"> shows the available functions.
<xref linkend="cube-functions-table"/> shows the available functions.
</para>
<table id="cube-functions-table">

View File

@ -123,7 +123,7 @@ Plan *(*PlanCustomPath) (PlannerInfo *root,
</programlisting>
Convert a custom path to a finished plan. The return value will generally
be a <literal>CustomScan</literal> object, which the callback must allocate and
initialize. See <xref linkend="custom-scan-plan"> for more details.
initialize. See <xref linkend="custom-scan-plan"/> for more details.
</para>
</sect2>
</sect1>

View File

@ -16,11 +16,11 @@
<productname>PostgreSQL</productname> has a rich set of native data
types available to users. Users can add new types to
<productname>PostgreSQL</productname> using the <xref
linkend="sql-createtype"> command.
linkend="sql-createtype"/> command.
</para>
<para>
<xref linkend="datatype-table"> shows all the built-in general-purpose data
<xref linkend="datatype-table"/> shows all the built-in general-purpose data
types. Most of the alternative names listed in the
<quote>Aliases</quote> column are the names used internally by
<productname>PostgreSQL</productname> for historical reasons. In
@ -336,7 +336,7 @@
<para>
Numeric types consist of two-, four-, and eight-byte integers,
four- and eight-byte floating-point numbers, and selectable-precision
decimals. <xref linkend="datatype-numeric-table"> lists the
decimals. <xref linkend="datatype-numeric-table"/> lists the
available types.
</para>
@ -424,9 +424,9 @@
<para>
The syntax of constants for the numeric types is described in
<xref linkend="sql-syntax-constants">. The numeric types have a
<xref linkend="sql-syntax-constants"/>. The numeric types have a
full set of corresponding arithmetic operators and
functions. Refer to <xref linkend="functions"> for more
functions. Refer to <xref linkend="functions"/> for more
information. The following sections describe the types in detail.
</para>
@ -559,7 +559,7 @@ NUMERIC
The maximum allowed precision when explicitly specified in the
type declaration is 1000; <type>NUMERIC</type> without a specified
precision is subject to the limits described in <xref
linkend="datatype-numeric-table">.
linkend="datatype-numeric-table"/>.
</para>
</note>
@ -728,7 +728,7 @@ FROM generate_series(-3.5, 3.5, 1) as x;
<note>
<para>
The <xref linkend="guc-extra-float-digits"> setting controls the
The <xref linkend="guc-extra-float-digits"/> setting controls the
number of extra significant digits included when a floating point
value is converted to text for output. With the default value of
<literal>0</literal>, the output is the same on every platform
@ -841,7 +841,7 @@ FROM generate_series(-3.5, 3.5, 1) as x;
<para>
This section describes a PostgreSQL-specific way to create an
autoincrementing column. Another way is to use the SQL-standard
identity column feature, described at <xref linkend="sql-createtable">.
identity column feature, described at <xref linkend="sql-createtable"/>.
</para>
</note>
@ -888,7 +888,7 @@ ALTER SEQUENCE <replaceable class="parameter">tablename</replaceable>_<replaceab
from the sequence is still "used up" even if a row containing that
value is never successfully inserted into the table column. This
may happen, for example, if the inserting transaction rolls back.
See <literal>nextval()</literal> in <xref linkend="functions-sequence">
See <literal>nextval()</literal> in <xref linkend="functions-sequence"/>
for details.
</para>
</note>
@ -929,8 +929,8 @@ ALTER SEQUENCE <replaceable class="parameter">tablename</replaceable>_<replaceab
<para>
The <type>money</type> type stores a currency amount with a fixed
fractional precision; see <xref
linkend="datatype-money-table">. The fractional precision is
determined by the database's <xref linkend="guc-lc-monetary"> setting.
linkend="datatype-money-table"/>. The fractional precision is
determined by the database's <xref linkend="guc-lc-monetary"/> setting.
The range shown in the table assumes there are two fractional digits.
Input is accepted in a variety of formats, including integer and
floating-point literals, as well as typical
@ -1063,7 +1063,7 @@ SELECT '52093.89'::money::numeric::float8;
</table>
<para>
<xref linkend="datatype-character-table"> shows the
<xref linkend="datatype-character-table"/> shows the
general-purpose character types available in
<productname>PostgreSQL</productname>.
</para>
@ -1166,12 +1166,12 @@ SELECT '52093.89'::money::numeric::float8;
</tip>
<para>
Refer to <xref linkend="sql-syntax-strings"> for information about
the syntax of string literals, and to <xref linkend="functions">
Refer to <xref linkend="sql-syntax-strings"/> for information about
the syntax of string literals, and to <xref linkend="functions"/>
for information about available operators and functions. The
database character set determines the character set used to store
textual values; for more information on character set support,
refer to <xref linkend="multibyte">.
refer to <xref linkend="multibyte"/>.
</para>
<example>
@ -1180,7 +1180,7 @@ SELECT '52093.89'::money::numeric::float8;
<programlisting>
CREATE TABLE test1 (a character(4));
INSERT INTO test1 VALUES ('ok');
SELECT a, char_length(a) FROM test1; -- <co id="co.datatype-char">
SELECT a, char_length(a) FROM test1; -- <co id="co.datatype-char"/>
<computeroutput>
a | char_length
------+-------------
@ -1206,7 +1206,7 @@ SELECT b, char_length(b) FROM test2;
<callout arearefs="co.datatype-char">
<para>
The <function>char_length</function> function is discussed in
<xref linkend="functions-string">.
<xref linkend="functions-string"/>.
</para>
</callout>
</calloutlist>
@ -1215,7 +1215,7 @@ SELECT b, char_length(b) FROM test2;
<para>
There are two other fixed-length character types in
<productname>PostgreSQL</productname>, shown in <xref
linkend="datatype-character-special-table">. The <type>name</type>
linkend="datatype-character-special-table"/>. The <type>name</type>
type exists <emphasis>only</emphasis> for the storage of identifiers
in the internal system catalogs and is not intended for use by the general user. Its
length is currently defined as 64 bytes (63 usable characters plus
@ -1269,7 +1269,7 @@ SELECT b, char_length(b) FROM test2;
<para>
The <type>bytea</type> data type allows storage of binary strings;
see <xref linkend="datatype-binary-table">.
see <xref linkend="datatype-binary-table"/>.
</para>
<table id="datatype-binary-table">
@ -1313,7 +1313,7 @@ SELECT b, char_length(b) FROM test2;
input and output: <productname>PostgreSQL</productname>'s historical
<quote>escape</quote> format, and <quote>hex</quote> format. Both
of these are always accepted on input. The output format depends
on the configuration parameter <xref linkend="guc-bytea-output">;
on the configuration parameter <xref linkend="guc-bytea-output"/>;
the default is hex. (Note that the hex format was introduced in
<productname>PostgreSQL</productname> 9.0; earlier versions and some
tools don't understand it.)
@ -1384,7 +1384,7 @@ SELECT E'\\xDEADBEEF';
literal using escape string syntax).
Backslash itself (octet value 92) can alternatively be represented by
double backslashes.
<xref linkend="datatype-binary-sqlesc">
<xref linkend="datatype-binary-sqlesc"/>
shows the characters that must be escaped, and gives the alternative
escape sequences where applicable.
</para>
@ -1443,14 +1443,14 @@ SELECT E'\\xDEADBEEF';
The requirement to escape <emphasis>non-printable</emphasis> octets
varies depending on locale settings. In some instances you can get away
with leaving them unescaped. Note that the result in each of the examples
in <xref linkend="datatype-binary-sqlesc"> was exactly one octet in
in <xref linkend="datatype-binary-sqlesc"/> was exactly one octet in
length, even though the output representation is sometimes
more than one character.
</para>
<para>
The reason multiple backslashes are required, as shown
in <xref linkend="datatype-binary-sqlesc">, is that an input
in <xref linkend="datatype-binary-sqlesc"/>, is that an input
string written as a string literal must pass through two parse
phases in the <productname>PostgreSQL</productname> server.
The first backslash of each pair is interpreted as an escape
@ -1467,7 +1467,7 @@ SELECT E'\\xDEADBEEF';
to a single octet with a decimal value of 1. Note that the
single-quote character is not treated specially by <type>bytea</type>,
so it follows the normal rules for string literals. (See also
<xref linkend="sql-syntax-strings">.)
<xref linkend="sql-syntax-strings"/>.)
</para>
<para>
@ -1477,7 +1477,7 @@ SELECT E'\\xDEADBEEF';
Most <quote>printable</quote> octets are represented by their standard
representation in the client character set. The octet with decimal
value 92 (backslash) is doubled in the output.
Details are in <xref linkend="datatype-binary-resesc">.
Details are in <xref linkend="datatype-binary-resesc"/>.
</para>
<table id="datatype-binary-resesc">
@ -1571,12 +1571,12 @@ SELECT E'\\xDEADBEEF';
<para>
<productname>PostgreSQL</productname> supports the full set of
<acronym>SQL</acronym> date and time types, shown in <xref
linkend="datatype-datetime-table">. The operations available
linkend="datatype-datetime-table"/>. The operations available
on these data types are described in
<xref linkend="functions-datetime">.
<xref linkend="functions-datetime"/>.
Dates are counted according to the Gregorian calendar, even in
years before that calendar was introduced (see <xref
linkend="datetime-units-history"> for more information).
linkend="datetime-units-history"/> for more information).
</para>
<table id="datatype-datetime-table">
@ -1716,7 +1716,7 @@ MINUTE TO SECOND
traditional <productname>POSTGRES</productname>, and others.
For some formats, ordering of day, month, and year in date input is
ambiguous and there is support for specifying the expected
ordering of these fields. Set the <xref linkend="guc-datestyle"> parameter
ordering of these fields. Set the <xref linkend="guc-datestyle"/> parameter
to <literal>MDY</literal> to select month-day-year interpretation,
<literal>DMY</literal> to select day-month-year interpretation, or
<literal>YMD</literal> to select year-month-day interpretation.
@ -1726,7 +1726,7 @@ MINUTE TO SECOND
<productname>PostgreSQL</productname> is more flexible in
handling date/time input than the
<acronym>SQL</acronym> standard requires.
See <xref linkend="datetime-appendix">
See <xref linkend="datetime-appendix"/>
for the exact parsing rules of date/time input and for the
recognized text fields including months, days of the week, and
time zones.
@ -1735,7 +1735,7 @@ MINUTE TO SECOND
<para>
Remember that any date or time literal input needs to be enclosed
in single quotes, like text strings. Refer to
<xref linkend="sql-syntax-constants-generic"> for more
<xref linkend="sql-syntax-constants-generic"/> for more
information.
<acronym>SQL</acronym> requires the following syntax
<synopsis>
@ -1759,7 +1759,7 @@ MINUTE TO SECOND
</indexterm>
<para>
<xref linkend="datatype-datetime-date-table"> shows some possible
<xref linkend="datatype-datetime-date-table"/> shows some possible
inputs for the <type>date</type> type.
</para>
@ -1872,8 +1872,8 @@ MINUTE TO SECOND
<para>
Valid input for these types consists of a time of day followed
by an optional time zone. (See <xref
linkend="datatype-datetime-time-table">
and <xref linkend="datatype-timezone-table">.) If a time zone is
linkend="datatype-datetime-time-table"/>
and <xref linkend="datatype-timezone-table"/>.) If a time zone is
specified in the input for <type>time without time zone</type>,
it is silently ignored. You can also specify a date but it will
be ignored, except when you use a time zone name that involves a
@ -1993,7 +1993,7 @@ MINUTE TO SECOND
</table>
<para>
Refer to <xref linkend="datatype-timezones"> for more information on how
Refer to <xref linkend="datatype-timezones"/> for more information on how
to specify time zones.
</para>
</sect3>
@ -2074,7 +2074,7 @@ January 8 04:05:06 1999 PST
time zone specified is converted to UTC using the appropriate offset
for that time zone. If no time zone is stated in the input string,
then it is assumed to be in the time zone indicated by the system's
<xref linkend="guc-timezone"> parameter, and is converted to UTC using the
<xref linkend="guc-timezone"/> parameter, and is converted to UTC using the
offset for the <varname>timezone</varname> zone.
</para>
@ -2084,7 +2084,7 @@ January 8 04:05:06 1999 PST
current <varname>timezone</varname> zone, and displayed as local time in that
zone. To see the time in another time zone, either change
<varname>timezone</varname> or use the <literal>AT TIME ZONE</literal> construct
(see <xref linkend="functions-datetime-zoneconvert">).
(see <xref linkend="functions-datetime-zoneconvert"/>).
</para>
<para>
@ -2112,7 +2112,7 @@ January 8 04:05:06 1999 PST
<para>
<productname>PostgreSQL</productname> supports several
special date/time input values for convenience, as shown in <xref
linkend="datatype-datetime-special-table">. The values
linkend="datatype-datetime-special-table"/>. The values
<literal>infinity</literal> and <literal>-infinity</literal>
are specially represented inside the system and will be displayed
unchanged; but the others are simply notational shorthands
@ -2186,7 +2186,7 @@ January 8 04:05:06 1999 PST
<literal>CURRENT_TIMESTAMP</literal>, <literal>LOCALTIME</literal>,
<literal>LOCALTIMESTAMP</literal>. The latter four accept an
optional subsecond precision specification. (See <xref
linkend="functions-datetime-current">.) Note that these are
linkend="functions-datetime-current"/>.) Note that these are
SQL functions and are <emphasis>not</emphasis> recognized in data input strings.
</para>
@ -2218,7 +2218,7 @@ January 8 04:05:06 1999 PST
<acronym>SQL</acronym> standard requires the use of the ISO 8601
format. The name of the <quote>SQL</quote> output format is a
historical accident.) <xref
linkend="datatype-datetime-output-table"> shows examples of each
linkend="datatype-datetime-output-table"/> shows examples of each
output style. The output of the <type>date</type> and
<type>time</type> types is generally only the date or time part
in accordance with the given examples. However, the
@ -2275,9 +2275,9 @@ January 8 04:05:06 1999 PST
In the <acronym>SQL</acronym> and POSTGRES styles, day appears before
month if DMY field ordering has been specified, otherwise month appears
before day.
(See <xref linkend="datatype-datetime-input">
(See <xref linkend="datatype-datetime-input"/>
for how this setting also affects interpretation of input values.)
<xref linkend="datatype-datetime-output2-table"> shows examples.
<xref linkend="datatype-datetime-output2-table"/> shows examples.
</para>
<table id="datatype-datetime-output2-table">
@ -2313,7 +2313,7 @@ January 8 04:05:06 1999 PST
<para>
The date/time style can be selected by the user using the
<command>SET datestyle</command> command, the <xref
linkend="guc-datestyle"> parameter in the
linkend="guc-datestyle"/> parameter in the
<filename>postgresql.conf</filename> configuration file, or the
<envar>PGDATESTYLE</envar> environment variable on the server or
client.
@ -2321,7 +2321,7 @@ January 8 04:05:06 1999 PST
<para>
The formatting function <function>to_char</function>
(see <xref linkend="functions-formatting">) is also available as
(see <xref linkend="functions-formatting"/>) is also available as
a more flexible way to format date/time output.
</para>
</sect2>
@ -2391,7 +2391,7 @@ January 8 04:05:06 1999 PST
<para>
All timezone-aware dates and times are stored internally in
<acronym>UTC</acronym>. They are converted to local time
in the zone specified by the <xref linkend="guc-timezone"> configuration
in the zone specified by the <xref linkend="guc-timezone"/> configuration
parameter before being displayed to the client.
</para>
@ -2404,7 +2404,7 @@ January 8 04:05:06 1999 PST
A full time zone name, for example <literal>America/New_York</literal>.
The recognized time zone names are listed in the
<literal>pg_timezone_names</literal> view (see <xref
linkend="view-pg-timezone-names">).
linkend="view-pg-timezone-names"/>).
<productname>PostgreSQL</productname> uses the widely-used IANA
time zone data for this purpose, so the same time zone
names are also recognized by much other software.
@ -2417,9 +2417,9 @@ January 8 04:05:06 1999 PST
contrast to full time zone names which can imply a set of daylight
savings transition-date rules as well. The recognized abbreviations
are listed in the <literal>pg_timezone_abbrevs</literal> view (see <xref
linkend="view-pg-timezone-abbrevs">). You cannot set the
configuration parameters <xref linkend="guc-timezone"> or
<xref linkend="guc-log-timezone"> to a time
linkend="view-pg-timezone-abbrevs"/>). You cannot set the
configuration parameters <xref linkend="guc-timezone"/> or
<xref linkend="guc-log-timezone"/> to a time
zone abbreviation, but you can use abbreviations in
date/time input values and with the <literal>AT TIME ZONE</literal>
operator.
@ -2499,13 +2499,13 @@ January 8 04:05:06 1999 PST
they are obtained from configuration files stored under
<filename>.../share/timezone/</filename> and <filename>.../share/timezonesets/</filename>
of the installation directory
(see <xref linkend="datetime-config-files">).
(see <xref linkend="datetime-config-files"/>).
</para>
<para>
The <xref linkend="guc-timezone"> configuration parameter can
The <xref linkend="guc-timezone"/> configuration parameter can
be set in the file <filename>postgresql.conf</filename>, or in any of the
other standard ways described in <xref linkend="runtime-config">.
other standard ways described in <xref linkend="runtime-config"/>.
There are also some special ways to set it:
<itemizedlist>
@ -2556,7 +2556,7 @@ January 8 04:05:06 1999 PST
of the different units are implicitly added with appropriate
sign accounting. <literal>ago</literal> negates all the fields.
This syntax is also used for interval output, if
<xref linkend="guc-intervalstyle"> is set to
<xref linkend="guc-intervalstyle"/> is set to
<literal>postgres_verbose</literal>.
</para>
@ -2582,7 +2582,7 @@ P <replaceable>quantity</replaceable> <replaceable>unit</replaceable> <optional>
The string must start with a <literal>P</literal>, and may include a
<literal>T</literal> that introduces the time-of-day units. The
available unit abbreviations are given in <xref
linkend="datatype-interval-iso8601-units">. Units may be
linkend="datatype-interval-iso8601-units"/>. Units may be
omitted, and may be specified in any order, but units smaller than
a day must appear after <literal>T</literal>. In particular, the meaning of
<literal>M</literal> depends on whether it is before or after
@ -2696,7 +2696,7 @@ P <optional> <replaceable>years</replaceable>-<replaceable>months</replaceable>-
</para>
<para>
<xref linkend="datatype-interval-input-examples"> shows some examples
<xref linkend="datatype-interval-input-examples"/> shows some examples
of valid <type>interval</type> input.
</para>
@ -2751,7 +2751,7 @@ P <optional> <replaceable>years</replaceable>-<replaceable>months</replaceable>-
<literal>postgres_verbose</literal>, or <literal>iso_8601</literal>,
using the command <literal>SET intervalstyle</literal>.
The default is the <literal>postgres</literal> format.
<xref linkend="interval-style-output-table"> shows examples of each
<xref linkend="interval-style-output-table"/> shows examples of each
output style.
</para>
@ -2768,7 +2768,7 @@ P <optional> <replaceable>years</replaceable>-<replaceable>months</replaceable>-
<para>
The output of the <literal>postgres</literal> style matches the output of
<productname>PostgreSQL</productname> releases prior to 8.4 when the
<xref linkend="guc-datestyle"> parameter was set to <literal>ISO</literal>.
<xref linkend="guc-datestyle"/> parameter was set to <literal>ISO</literal>.
</para>
<para>
@ -2846,7 +2846,7 @@ P <optional> <replaceable>years</replaceable>-<replaceable>months</replaceable>-
<para>
<productname>PostgreSQL</productname> provides the
standard <acronym>SQL</acronym> type <type>boolean</type>;
see <xref linkend="datatype-boolean-table">.
see <xref linkend="datatype-boolean-table"/>.
The <type>boolean</type> type can have several states:
<quote>true</quote>, <quote>false</quote>, and a third state,
<quote>unknown</quote>, which is represented by the
@ -2902,7 +2902,7 @@ P <optional> <replaceable>years</replaceable>-<replaceable>months</replaceable>-
</para>
<para>
<xref linkend="datatype-boolean-example"> shows that
<xref linkend="datatype-boolean-example"/> shows that
<type>boolean</type> values are output using the letters
<literal>t</literal> and <literal>f</literal>.
</para>
@ -2954,7 +2954,7 @@ SELECT * FROM test1 WHERE a;
<para>
Enum types are created using the <xref
linkend="sql-createtype"> command,
linkend="sql-createtype"/> command,
for example:
<programlisting>
@ -3087,7 +3087,7 @@ SELECT person.name, holidays.num_weeks FROM person, holidays
<para>
Geometric data types represent two-dimensional spatial
objects. <xref linkend="datatype-geo-table"> shows the geometric
objects. <xref linkend="datatype-geo-table"/> shows the geometric
types available in <productname>PostgreSQL</productname>.
</para>
@ -3158,7 +3158,7 @@ SELECT person.name, holidays.num_weeks FROM person, holidays
<para>
A rich set of functions and operators is available to perform various geometric
operations such as scaling, translation, rotation, and determining
intersections. They are explained in <xref linkend="functions-geometry">.
intersections. They are explained in <xref linkend="functions-geometry"/>.
</para>
<sect2>
@ -3410,11 +3410,11 @@ SELECT person.name, holidays.num_weeks FROM person, holidays
<para>
<productname>PostgreSQL</productname> offers data types to store IPv4, IPv6, and MAC
addresses, as shown in <xref linkend="datatype-net-types-table">. It
addresses, as shown in <xref linkend="datatype-net-types-table"/>. It
is better to use these types instead of plain text types to store
network addresses, because
these types offer input error checking and specialized
operators and functions (see <xref linkend="functions-net">).
operators and functions (see <xref linkend="functions-net"/>).
</para>
<table tocentry="1" id="datatype-net-types-table">
@ -3526,7 +3526,7 @@ SELECT person.name, holidays.num_weeks FROM person, holidays
</para>
<para>
<xref linkend="datatype-net-cidr-table"> shows some examples.
<xref linkend="datatype-net-cidr-table"/> shows some examples.
</para>
<table id="datatype-net-cidr-table">
@ -3809,10 +3809,10 @@ SELECT macaddr8_set7bit('08:00:2b:01:02:03');
<para>
Refer to <xref
linkend="sql-syntax-bit-strings"> for information about the syntax
linkend="sql-syntax-bit-strings"/> for information about the syntax
of bit string constants. Bit-logical operators and string
manipulation functions are available; see <xref
linkend="functions-bitstring">.
linkend="functions-bitstring"/>.
</para>
<example>
@ -3840,7 +3840,7 @@ SELECT * FROM test;
A bit string value requires 1 byte for each group of 8 bits, plus
5 or 8 bytes overhead depending on the length of the string
(but long values may be compressed or moved out-of-line, as explained
in <xref linkend="datatype-character"> for character strings).
in <xref linkend="datatype-character"/> for character strings).
</para>
</sect1>
@ -3865,8 +3865,8 @@ SELECT * FROM test;
The <type>tsvector</type> type represents a document in a form optimized
for text search; the <type>tsquery</type> type similarly represents
a text query.
<xref linkend="textsearch"> provides a detailed explanation of this
facility, and <xref linkend="functions-textsearch"> summarizes the
<xref linkend="textsearch"/> provides a detailed explanation of this
facility, and <xref linkend="functions-textsearch"/> summarizes the
related functions and operators.
</para>
@ -3881,7 +3881,7 @@ SELECT * FROM test;
A <type>tsvector</type> value is a sorted list of distinct
<firstterm>lexemes</firstterm>, which are words that have been
<firstterm>normalized</firstterm> to merge different variants of the same word
(see <xref linkend="textsearch"> for details). Sorting and
(see <xref linkend="textsearch"/> for details). Sorting and
duplicate-elimination are done automatically during input, as shown in
this example:
@ -3975,7 +3975,7 @@ SELECT to_tsvector('english', 'The Fat Rats');
'fat':2 'rat':3
</programlisting>
Again, see <xref linkend="textsearch"> for more detail.
Again, see <xref linkend="textsearch"/> for more detail.
</para>
</sect2>
@ -4140,9 +4140,9 @@ a0ee-bc99-9c0b-4ef8-bb6d-6bb9-bd38-0a11
functions for UUIDs, but the core database does not include any
function for generating UUIDs, because no single algorithm is well
suited for every application. The <xref
linkend="uuid-ossp"> module
linkend="uuid-ossp"/> module
provides functions that implement several standard algorithms.
The <xref linkend="pgcrypto"> module also provides a generation
The <xref linkend="pgcrypto"/> module also provides a generation
function for random UUIDs.
Alternatively, UUIDs could be generated by client applications or
other libraries invoked through a server-side function.
@ -4161,7 +4161,7 @@ a0ee-bc99-9c0b-4ef8-bb6d-6bb9-bd38-0a11
advantage over storing XML data in a <type>text</type> field is that it
checks the input values for well-formedness, and there are support
functions to perform type-safe operations on it; see <xref
linkend="functions-xml">. Use of this data type requires the
linkend="functions-xml"/>. Use of this data type requires the
installation to have been built with <command>configure
--with-libxml</command>.
</para>
@ -4267,7 +4267,7 @@ SET xmloption TO { DOCUMENT | CONTENT };
results to the client (which is the normal mode), PostgreSQL
converts all character data passed between the client and the
server and vice versa to the character encoding of the respective
end; see <xref linkend="multibyte">. This includes string
end; see <xref linkend="multibyte"/>. This includes string
representations of XML values, such as in the above examples.
This would ordinarily mean that encoding declarations contained in
XML data can become invalid as the character data is converted
@ -4408,7 +4408,7 @@ INSERT INTO mytable VALUES(-1); -- fails
</para>
<para>
For additional information see <xref linkend="sql-createdomain">.
For additional information see <xref linkend="sql-createdomain"/>.
</para>
</sect1>
@ -4473,14 +4473,14 @@ INSERT INTO mytable VALUES(-1); -- fails
<productname>PostgreSQL</productname> as primary keys for various
system tables. OIDs are not added to user-created tables, unless
<literal>WITH OIDS</literal> is specified when the table is
created, or the <xref linkend="guc-default-with-oids">
created, or the <xref linkend="guc-default-with-oids"/>
configuration variable is enabled. Type <type>oid</type> represents
an object identifier. There are also several alias types for
<type>oid</type>: <type>regproc</type>, <type>regprocedure</type>,
<type>regoper</type>, <type>regoperator</type>, <type>regclass</type>,
<type>regtype</type>, <type>regrole</type>, <type>regnamespace</type>,
<type>regconfig</type>, and <type>regdictionary</type>.
<xref linkend="datatype-oid-table"> shows an overview.
<xref linkend="datatype-oid-table"/> shows an overview.
</para>
<para>
@ -4677,7 +4677,7 @@ SELECT * FROM pg_attribute
<para>
(The system columns are further explained in <xref
linkend="ddl-system-columns">.)
linkend="ddl-system-columns"/>.)
</para>
</sect1>
@ -4795,7 +4795,7 @@ SELECT * FROM pg_attribute
useful in situations where a function's behavior does not
correspond to simply taking or returning a value of a specific
<acronym>SQL</acronym> data type. <xref
linkend="datatype-pseudotypes-table"> lists the existing
linkend="datatype-pseudotypes-table"/> lists the existing
pseudo-types.
</para>
@ -4818,33 +4818,33 @@ SELECT * FROM pg_attribute
<row>
<entry><type>anyelement</type></entry>
<entry>Indicates that a function accepts any data type
(see <xref linkend="extend-types-polymorphic">).</entry>
(see <xref linkend="extend-types-polymorphic"/>).</entry>
</row>
<row>
<entry><type>anyarray</type></entry>
<entry>Indicates that a function accepts any array data type
(see <xref linkend="extend-types-polymorphic">).</entry>
(see <xref linkend="extend-types-polymorphic"/>).</entry>
</row>
<row>
<entry><type>anynonarray</type></entry>
<entry>Indicates that a function accepts any non-array data type
(see <xref linkend="extend-types-polymorphic">).</entry>
(see <xref linkend="extend-types-polymorphic"/>).</entry>
</row>
<row>
<entry><type>anyenum</type></entry>
<entry>Indicates that a function accepts any enum data type
(see <xref linkend="extend-types-polymorphic"> and
<xref linkend="datatype-enum">).</entry>
(see <xref linkend="extend-types-polymorphic"/> and
<xref linkend="datatype-enum"/>).</entry>
</row>
<row>
<entry><type>anyrange</type></entry>
<entry>Indicates that a function accepts any range data type
(see <xref linkend="extend-types-polymorphic"> and
<xref linkend="rangetypes">).</entry>
(see <xref linkend="extend-types-polymorphic"/> and
<xref linkend="rangetypes"/>).</entry>
</row>
<row>

View File

@ -180,7 +180,7 @@
<title>Date/Time Key Words</title>
<para>
<xref linkend="datetime-month-table"> shows the tokens that are
<xref linkend="datetime-month-table"/> shows the tokens that are
recognized as names of months.
</para>
@ -247,7 +247,7 @@
</table>
<para>
<xref linkend="datetime-dow-table"> shows the tokens that are
<xref linkend="datetime-dow-table"/> shows the tokens that are
recognized as names of days of the week.
</para>
@ -294,7 +294,7 @@
</table>
<para>
<xref linkend="datetime-mod-table"> shows the tokens that serve
<xref linkend="datetime-mod-table"/> shows the tokens that serve
various modifier purposes.
</para>
@ -349,7 +349,7 @@
Since timezone abbreviations are not well standardized,
<productname>PostgreSQL</productname> provides a means to customize
the set of abbreviations accepted by the server. The
<xref linkend="guc-timezone-abbreviations"> run-time parameter
<xref linkend="guc-timezone-abbreviations"/> run-time parameter
determines the active set of abbreviations. While this parameter
can be altered by any database user, the possible values for it
are under the control of the database administrator &mdash; they

View File

@ -14,7 +14,7 @@
</para>
<para>
See also <xref linkend="postgres-fdw">, which provides roughly the same
See also <xref linkend="postgres-fdw"/>, which provides roughly the same
functionality using a more modern and standards-compliant infrastructure.
</para>
@ -58,8 +58,8 @@ dblink_connect(text connname, text connstr) returns text
server. It is recommended to use the foreign-data wrapper
<literal>dblink_fdw</literal> when defining the foreign
server. See the example below, as well as
<xref linkend="sql-createserver"> and
<xref linkend="sql-createusermapping">.
<xref linkend="sql-createserver"/> and
<xref linkend="sql-createusermapping"/>.
</para>
</refsect1>
@ -84,7 +84,7 @@ dblink_connect(text connname, text connstr) returns text
<para><application>libpq</application>-style connection info string, for example
<literal>hostaddr=127.0.0.1 port=5432 dbname=mydb user=postgres
password=mypasswd</literal>.
For details see <xref linkend="libpq-connstring">.
For details see <xref linkend="libpq-connstring"/>.
Alternatively, the name of a foreign server.
</para>
</listitem>
@ -1340,7 +1340,7 @@ dblink_get_notify(text connname) returns setof (notify_name text, be_pid int, ex
the unnamed connection, or on a named connection if specified.
To receive notifications via dblink, <function>LISTEN</function> must
first be issued, using <function>dblink_exec</function>.
For details see <xref linkend="sql-listen"> and <xref linkend="sql-notify">.
For details see <xref linkend="sql-listen"/> and <xref linkend="sql-notify"/>.
</para>
</refsect1>

View File

@ -39,7 +39,7 @@
SQL does not make any guarantees about the order of the rows in a
table. When a table is read, the rows will appear in an unspecified order,
unless sorting is explicitly requested. This is covered in <xref
linkend="queries">. Furthermore, SQL does not assign unique
linkend="queries"/>. Furthermore, SQL does not assign unique
identifiers to rows, so it is possible to have several completely
identical rows in a table. This is a consequence of the
mathematical model that underlies SQL but is usually not desirable.
@ -64,7 +64,7 @@
built-in data types that fit many applications. Users can also
define their own data types. Most built-in data types have obvious
names and semantics, so we defer a detailed explanation to <xref
linkend="datatype">. Some of the frequently used data types are
linkend="datatype"/>. Some of the frequently used data types are
<type>integer</type> for whole numbers, <type>numeric</type> for
possibly fractional numbers, <type>text</type> for character
strings, <type>date</type> for dates, <type>time</type> for
@ -79,7 +79,7 @@
<para>
To create a table, you use the aptly named <xref
linkend="sql-createtable"> command.
linkend="sql-createtable"/> command.
In this command you specify at least a name for the new table, the
names of the columns and the data type of each column. For
example:
@ -95,7 +95,7 @@ CREATE TABLE my_first_table (
<type>text</type>; the second column has the name
<literal>second_column</literal> and the type <type>integer</type>.
The table and column names follow the identifier syntax explained
in <xref linkend="sql-syntax-identifiers">. The type names are
in <xref linkend="sql-syntax-identifiers"/>. The type names are
usually also identifiers, but there are some exceptions. Note that the
column list is comma-separated and surrounded by parentheses.
</para>
@ -139,7 +139,7 @@ CREATE TABLE products (
<para>
If you no longer need a table, you can remove it using the <xref
linkend="sql-droptable"> command.
linkend="sql-droptable"/> command.
For example:
<programlisting>
DROP TABLE my_first_table;
@ -155,7 +155,7 @@ DROP TABLE products;
<para>
If you need to modify a table that already exists, see <xref
linkend="ddl-alter"> later in this chapter.
linkend="ddl-alter"/> later in this chapter.
</para>
<para>
@ -163,7 +163,7 @@ DROP TABLE products;
tables. The remainder of this chapter is concerned with adding
features to the table definition to ensure data integrity,
security, or convenience. If you are eager to fill your tables with
data now you can skip ahead to <xref linkend="dml"> and read the
data now you can skip ahead to <xref linkend="dml"/> and read the
rest of this chapter later.
</para>
</sect1>
@ -181,7 +181,7 @@ DROP TABLE products;
columns will be filled with their respective default values. A
data manipulation command can also request explicitly that a column
be set to its default value, without having to know what that value is.
(Details about data manipulation commands are in <xref linkend="dml">.)
(Details about data manipulation commands are in <xref linkend="dml"/>.)
</para>
<para>
@ -220,7 +220,7 @@ CREATE TABLE products (
</programlisting>
where the <literal>nextval()</literal> function supplies successive values
from a <firstterm>sequence object</firstterm> (see <xref
linkend="functions-sequence">). This arrangement is sufficiently common
linkend="functions-sequence"/>). This arrangement is sufficiently common
that there's a special shorthand for it:
<programlisting>
CREATE TABLE products (
@ -229,7 +229,7 @@ CREATE TABLE products (
);
</programlisting>
The <literal>SERIAL</literal> shorthand is discussed further in <xref
linkend="datatype-serial">.
linkend="datatype-serial"/>.
</para>
</sect1>
@ -876,9 +876,9 @@ CREATE TABLE order_items (
<para>
More information about updating and deleting data is in <xref
linkend="dml">. Also see the description of foreign key constraint
linkend="dml"/>. Also see the description of foreign key constraint
syntax in the reference documentation for
<xref linkend="sql-createtable">.
<xref linkend="sql-createtable"/>.
</para>
</sect2>
@ -948,10 +948,10 @@ CREATE TABLE circles (
</indexterm>
The object identifier (object ID) of a row. This column is only
present if the table was created using <literal>WITH
OIDS</literal>, or if the <xref linkend="guc-default-with-oids">
OIDS</literal>, or if the <xref linkend="guc-default-with-oids"/>
configuration variable was set at the time. This column is of type
<type>oid</type> (same name as the column); see <xref
linkend="datatype-oid"> for more information about the type.
linkend="datatype-oid"/> for more information about the type.
</para>
</listitem>
</varlistentry>
@ -966,7 +966,7 @@ CREATE TABLE circles (
<para>
The OID of the table containing this row. This column is
particularly handy for queries that select from inheritance
hierarchies (see <xref linkend="ddl-inherit">), since without it,
hierarchies (see <xref linkend="ddl-inherit"/>), since without it,
it's difficult to tell which individual table a row came from. The
<structfield>tableoid</structfield> can be joined against the
<structfield>oid</structfield> column of
@ -1100,7 +1100,7 @@ CREATE TABLE circles (
Transaction identifiers are also 32-bit quantities. In a
long-lived database it is possible for transaction IDs to wrap
around. This is not a fatal problem given appropriate maintenance
procedures; see <xref linkend="maintenance"> for details. It is
procedures; see <xref linkend="maintenance"/> for details. It is
unwise, however, to depend on the uniqueness of transaction IDs
over the long term (more than one billion transactions).
</para>
@ -1167,7 +1167,7 @@ CREATE TABLE circles (
</itemizedlist>
All these actions are performed using the
<xref linkend="sql-altertable">
<xref linkend="sql-altertable"/>
command, whose reference page contains details beyond those given
here.
</para>
@ -1238,7 +1238,7 @@ ALTER TABLE products DROP COLUMN description;
<programlisting>
ALTER TABLE products DROP COLUMN description CASCADE;
</programlisting>
See <xref linkend="ddl-depend"> for a description of the general
See <xref linkend="ddl-depend"/> for a description of the general
mechanism behind this.
</para>
</sect2>
@ -1446,7 +1446,7 @@ ALTER TABLE products RENAME TO items;
object vary depending on the object's type (table, function, etc).
For complete information on the different types of privileges
supported by <productname>PostgreSQL</productname>, refer to the
<xref linkend="sql-grant"> reference
<xref linkend="sql-grant"/> reference
page. The following sections and chapters will also show you how
those privileges are used.
</para>
@ -1459,7 +1459,7 @@ ALTER TABLE products RENAME TO items;
<para>
An object can be assigned to a new owner with an <command>ALTER</command>
command of the appropriate kind for the object, e.g. <xref
linkend="sql-altertable">. Superusers can always do
linkend="sql-altertable"/>. Superusers can always do
this; ordinary roles can only do it if they are both the current owner
of the object (or a member of the owning role) and a member of the new
owning role.
@ -1482,7 +1482,7 @@ GRANT UPDATE ON accounts TO joe;
be used to grant a privilege to every role on the system. Also,
<quote>group</quote> roles can be set up to help manage privileges when
there are many users of a database &mdash; for details see
<xref linkend="user-manag">.
<xref linkend="user-manag"/>.
</para>
<para>
@ -1506,8 +1506,8 @@ REVOKE ALL ON accounts FROM PUBLIC;
the right to grant it in turn to others. If the grant option is
subsequently revoked then all who received the privilege from that
recipient (directly or through a chain of grants) will lose the
privilege. For details see the <xref linkend="sql-grant"> and
<xref linkend="sql-revoke"> reference pages.
privilege. For details see the <xref linkend="sql-grant"/> and
<xref linkend="sql-revoke"/> reference pages.
</para>
</sect1>
@ -1524,7 +1524,7 @@ REVOKE ALL ON accounts FROM PUBLIC;
<para>
In addition to the SQL-standard <link linkend="ddl-priv">privilege
system</link> available through <xref linkend="sql-grant">,
system</link> available through <xref linkend="sql-grant"/>,
tables can have <firstterm>row security policies</firstterm> that restrict,
on a per-user basis, which rows can be returned by normal queries
or inserted, updated, or deleted by data modification commands.
@ -1584,11 +1584,11 @@ REVOKE ALL ON accounts FROM PUBLIC;
</para>
<para>
Policies are created using the <xref linkend="sql-createpolicy">
command, altered using the <xref linkend="sql-alterpolicy"> command,
and dropped using the <xref linkend="sql-droppolicy"> command. To
Policies are created using the <xref linkend="sql-createpolicy"/>
command, altered using the <xref linkend="sql-alterpolicy"/> command,
and dropped using the <xref linkend="sql-droppolicy"/> command. To
enable and disable row security for a given table, use the
<xref linkend="sql-altertable"> command.
<xref linkend="sql-altertable"/> command.
</para>
<para>
@ -1829,7 +1829,7 @@ UPDATE 0
not being applied. For example, when taking a backup, it could be
disastrous if row security silently caused some rows to be omitted
from the backup. In such a situation, you can set the
<xref linkend="guc-row-security"> configuration parameter
<xref linkend="guc-row-security"/> configuration parameter
to <literal>off</literal>. This does not in itself bypass row security;
what it does is throw an error if any query's results would get filtered
by a policy. The reason for the error can then be investigated and
@ -1951,8 +1951,8 @@ SELECT * FROM information WHERE group_id = 2 FOR UPDATE;
</para>
<para>
For additional details see <xref linkend="sql-createpolicy">
and <xref linkend="sql-altertable">.
For additional details see <xref linkend="sql-createpolicy"/>
and <xref linkend="sql-altertable"/>.
</para>
</sect1>
@ -2034,7 +2034,7 @@ SELECT * FROM information WHERE group_id = 2 FOR UPDATE;
</indexterm>
<para>
To create a schema, use the <xref linkend="sql-createschema">
To create a schema, use the <xref linkend="sql-createschema"/>
command. Give the schema a name
of your choice. For example:
<programlisting>
@ -2099,7 +2099,7 @@ DROP SCHEMA myschema;
<programlisting>
DROP SCHEMA myschema CASCADE;
</programlisting>
See <xref linkend="ddl-depend"> for a description of the general
See <xref linkend="ddl-depend"/> for a description of the general
mechanism behind this.
</para>
@ -2112,7 +2112,7 @@ CREATE SCHEMA <replaceable>schema_name</replaceable> AUTHORIZATION <replaceable>
</programlisting>
You can even omit the schema name, in which case the schema name
will be the same as the user name. See <xref
linkend="ddl-schemas-patterns"> for how this can be useful.
linkend="ddl-schemas-patterns"/> for how this can be useful.
</para>
<para>
@ -2242,7 +2242,7 @@ SET search_path TO myschema;
</para>
<para>
See also <xref linkend="functions-info"> for other ways to manipulate
See also <xref linkend="functions-info"/> for other ways to manipulate
the schema search path.
</para>
@ -2297,7 +2297,7 @@ REVOKE CREATE ON SCHEMA public FROM PUBLIC;
<quote>public</quote> means <quote>every user</quote>. In the
first sense it is an identifier, in the second sense it is a
key word, hence the different capitalization; recall the
guidelines from <xref linkend="sql-syntax-identifiers">.)
guidelines from <xref linkend="sql-syntax-identifiers"/>.)
</para>
</sect2>
@ -2483,7 +2483,7 @@ SELECT name, altitude
</programlisting>
Given the sample data from the <productname>PostgreSQL</productname>
tutorial (see <xref linkend="tutorial-sql-intro">), this returns:
tutorial (see <xref linkend="tutorial-sql-intro"/>), this returns:
<programlisting>
name | altitude
@ -2602,7 +2602,7 @@ VALUES ('Albany', NULL, NULL, 'NY');
<structname>capitals</structname> table, but this does not happen:
<command>INSERT</command> always inserts into exactly the table
specified. In some cases it is possible to redirect the insertion
using a rule (see <xref linkend="rules">). However that does not
using a rule (see <xref linkend="rules"/>). However that does not
help for the above case because the <structname>cities</structname> table
does not contain the column <structfield>state</structfield>, and so the
command will be rejected before the rule can be applied.
@ -2633,11 +2633,11 @@ VALUES ('Albany', NULL, NULL, 'NY');
<para>
Table inheritance is typically established when the child table is
created, using the <literal>INHERITS</literal> clause of the
<xref linkend="sql-createtable">
<xref linkend="sql-createtable"/>
statement.
Alternatively, a table which is already defined in a compatible way can
have a new parent relationship added, using the <literal>INHERIT</literal>
variant of <xref linkend="sql-altertable">.
variant of <xref linkend="sql-altertable"/>.
To do this the new child table must already include columns with
the same names and types as the columns of the parent. It must also include
check constraints with the same names and check expressions as those of the
@ -2645,7 +2645,7 @@ VALUES ('Albany', NULL, NULL, 'NY');
<literal>NO INHERIT</literal> variant of <command>ALTER TABLE</command>.
Dynamically adding and removing inheritance links like this can be useful
when the inheritance relationship is being used for table
partitioning (see <xref linkend="ddl-partitioning">).
partitioning (see <xref linkend="ddl-partitioning"/>).
</para>
<para>
@ -2665,11 +2665,11 @@ VALUES ('Albany', NULL, NULL, 'NY');
if they are inherited
from any parent tables. If you wish to remove a table and all of its
descendants, one easy way is to drop the parent table with the
<literal>CASCADE</literal> option (see <xref linkend="ddl-depend">).
<literal>CASCADE</literal> option (see <xref linkend="ddl-depend"/>).
</para>
<para>
<xref linkend="sql-altertable"> will
<xref linkend="sql-altertable"/> will
propagate any changes in column data definitions and check
constraints down the inheritance hierarchy. Again, dropping
columns that are depended on by other tables is only possible when using
@ -2687,7 +2687,7 @@ VALUES ('Albany', NULL, NULL, 'NY');
that the data is (also) in the parent table. But
the <structname>capitals</structname> table could not be updated directly
without an additional grant. In a similar way, the parent table's row
security policies (see <xref linkend="ddl-rowsecurity">) are applied to
security policies (see <xref linkend="ddl-rowsecurity"/>) are applied to
rows coming from child tables during an inherited query. A child table's
policies, if any, are applied only when it is the table explicitly named
in the query; and in that case, any policies attached to its parent(s) are
@ -2695,7 +2695,7 @@ VALUES ('Albany', NULL, NULL, 'NY');
</para>
<para>
Foreign tables (see <xref linkend="ddl-foreign-data">) can also
Foreign tables (see <xref linkend="ddl-foreign-data"/>) can also
be part of inheritance hierarchies, either as parent or child
tables, just as regular tables can be. If a foreign table is part
of an inheritance hierarchy then any operations not supported by
@ -2719,7 +2719,7 @@ VALUES ('Albany', NULL, NULL, 'NY');
typically only work on individual, physical tables and do not
support recursing over inheritance hierarchies. The respective
behavior of each individual command is documented in its reference
page (<xref linkend="sql-commands">).
page (<xref linkend="sql-commands"/>).
</para>
<para>
@ -2923,7 +2923,7 @@ VALUES ('Albany', NULL, NULL, 'NY');
called <firstterm>sub-partitioning</firstterm>. Partitions may have their
own indexes, constraints and default values, distinct from those of other
partitions. Indexes must be created separately for each partition. See
<xref linkend="sql-createtable"> for more details on creating partitioned
<xref linkend="sql-createtable"/> for more details on creating partitioned
tables and partitions.
</para>
@ -2932,7 +2932,7 @@ VALUES ('Albany', NULL, NULL, 'NY');
vice versa. However, it is possible to add a regular or partitioned table
containing data as a partition of a partitioned table, or remove a
partition from a partitioned table turning it into a standalone table;
see <xref linkend="sql-altertable"> to learn more about the
see <xref linkend="sql-altertable"/> to learn more about the
<command>ATTACH PARTITION</command> and <command>DETACH PARTITION</command>
sub-commands.
</para>
@ -2948,7 +2948,7 @@ VALUES ('Albany', NULL, NULL, 'NY');
inheritance with regular tables. Since a partition hierarchy consisting
of the partitioned table and its partitions is still an inheritance
hierarchy, all the normal rules of inheritance apply as described in
<xref linkend="ddl-inherit"> with some exceptions, most notably:
<xref linkend="ddl-inherit"/> with some exceptions, most notably:
<itemizedlist>
<listitem>
@ -2999,7 +2999,7 @@ VALUES ('Albany', NULL, NULL, 'NY');
<para>
Partitions can also be foreign tables
(see <xref linkend="sql-createforeigntable">),
(see <xref linkend="sql-createforeigntable"/>),
although these have some limitations that normal tables do not. For
example, data inserted into the partitioned table is not routed to
foreign table partitions.
@ -3158,7 +3158,7 @@ CREATE INDEX ON measurement_y2008m01 (logdate);
<listitem>
<para>
Ensure that the <xref linkend="guc-constraint-exclusion">
Ensure that the <xref linkend="guc-constraint-exclusion"/>
configuration parameter is not disabled in <filename>postgresql.conf</filename>.
If it is, queries will not be optimized as desired.
</para>
@ -3595,7 +3595,7 @@ DO INSTEAD
<listitem>
<para>
Ensure that the <xref linkend="guc-constraint-exclusion">
Ensure that the <xref linkend="guc-constraint-exclusion"/>
configuration parameter is not disabled in
<filename>postgresql.conf</filename>.
If it is, queries will not be optimized as desired.
@ -3806,7 +3806,7 @@ EXPLAIN SELECT count(*) FROM measurement WHERE logdate &gt;= DATE '2008-01-01';
<para>
The default (and recommended) setting of
<xref linkend="guc-constraint-exclusion"> is actually neither
<xref linkend="guc-constraint-exclusion"/> is actually neither
<literal>on</literal> nor <literal>off</literal>, but an intermediate setting
called <literal>partition</literal>, which causes the technique to be
applied only to queries that are likely to be working on partitioned
@ -3889,10 +3889,10 @@ EXPLAIN SELECT count(*) FROM measurement WHERE logdate &gt;= DATE '2008-01-01';
library that can communicate with an external data source, hiding the
details of connecting to the data source and obtaining data from it.
There are some foreign data wrappers available as <filename>contrib</filename>
modules; see <xref linkend="contrib">. Other kinds of foreign data
modules; see <xref linkend="contrib"/>. Other kinds of foreign data
wrappers might be found as third party products. If none of the existing
foreign data wrappers suit your needs, you can write your own; see <xref
linkend="fdwhandler">.
linkend="fdwhandler"/>.
</para>
<para>
@ -3918,11 +3918,11 @@ EXPLAIN SELECT count(*) FROM measurement WHERE logdate &gt;= DATE '2008-01-01';
<para>
For additional information, see
<xref linkend="sql-createforeigndatawrapper">,
<xref linkend="sql-createserver">,
<xref linkend="sql-createusermapping">,
<xref linkend="sql-createforeigntable">, and
<xref linkend="sql-importforeignschema">.
<xref linkend="sql-createforeigndatawrapper"/>,
<xref linkend="sql-createserver"/>,
<xref linkend="sql-createusermapping"/>,
<xref linkend="sql-createforeigntable"/>, and
<xref linkend="sql-importforeignschema"/>.
</para>
</sect1>
@ -3966,7 +3966,7 @@ EXPLAIN SELECT count(*) FROM measurement WHERE logdate &gt;= DATE '2008-01-01';
<para>
Detailed information on
these topics appears in <xref linkend="server-programming">.
these topics appears in <xref linkend="server-programming"/>.
</para>
</sect1>
@ -3996,7 +3996,7 @@ EXPLAIN SELECT count(*) FROM measurement WHERE logdate &gt;= DATE '2008-01-01';
<productname>PostgreSQL</productname> makes sure that you cannot
drop objects that other objects still depend on. For example,
attempting to drop the products table we considered in <xref
linkend="ddl-constraints-fk">, with the orders table depending on
linkend="ddl-constraints-fk"/>, with the orders table depending on
it, would result in an error message like this:
<screen>
DROP TABLE products;
@ -4066,7 +4066,7 @@ CREATE FUNCTION get_color_note (rainbow) RETURNS text AS
LANGUAGE SQL;
</programlisting>
(See <xref linkend="xfunc-sql"> for an explanation of SQL-language
(See <xref linkend="xfunc-sql"/> for an explanation of SQL-language
functions.) <productname>PostgreSQL</productname> will be aware that
the <function>get_color_note</function> function depends on the <type>rainbow</type>
type: dropping the type would force dropping the function, because its

View File

@ -226,7 +226,7 @@ gcc -G -o foo.so foo.o
</para>
<para>
Refer back to <xref linkend="xfunc-c-dynload"> about where the
Refer back to <xref linkend="xfunc-c-dynload"/> about where the
server expects to find the shared library files.
</para>

View File

@ -71,7 +71,7 @@ mydb# select ts_lexize('intdict', '12345678');
</programlisting>
but real-world usage will involve including it in a text search
configuration as described in <xref linkend="textsearch">.
configuration as described in <xref linkend="textsearch"/>.
That might look like this:
<programlisting>

View File

@ -135,7 +135,7 @@ mydb=# SELECT ts_lexize('xsyn', 'syn1');
</programlisting>
Real-world usage will involve including it in a text search
configuration as described in <xref linkend="textsearch">.
configuration as described in <xref linkend="textsearch"/>.
That might look like this:
<programlisting>

View File

@ -20,18 +20,18 @@
stored. If the table has any columns with potentially-wide values,
there also might be a <acronym>TOAST</acronym> file associated with the table,
which is used to store values too wide to fit comfortably in the main
table (see <xref linkend="storage-toast">). There will be one valid index
table (see <xref linkend="storage-toast"/>). There will be one valid index
on the <acronym>TOAST</acronym> table, if present. There also might be indexes
associated with the base table. Each table and index is stored in a
separate disk file &mdash; possibly more than one file, if the file would
exceed one gigabyte. Naming conventions for these files are described
in <xref linkend="storage-file-layout">.
in <xref linkend="storage-file-layout"/>.
</para>
<para>
You can monitor disk space in three ways:
using the SQL functions listed in <xref linkend="functions-admin-dbsize">,
using the <xref linkend="oid2name"> module, or
using the SQL functions listed in <xref linkend="functions-admin-dbsize"/>,
using the <xref linkend="oid2name"/> module, or
using manual inspection of the system catalogs.
The SQL functions are the easiest to use and are generally recommended.
The remainder of this section shows how to do it by inspection of the
@ -124,7 +124,7 @@ ORDER BY relpages DESC;
If you cannot free up additional space on the disk by deleting
other things, you can move some of the database files to other file
systems by making use of tablespaces. See <xref
linkend="manage-ag-tablespaces"> for more information about that.
linkend="manage-ag-tablespaces"/> for more information about that.
</para>
<tip>

View File

@ -33,10 +33,10 @@
</para>
<para>
To create a new row, use the <xref linkend="sql-insert">
To create a new row, use the <xref linkend="sql-insert"/>
command. The command requires the
table name and column values. For
example, consider the products table from <xref linkend="ddl">:
example, consider the products table from <xref linkend="ddl"/>:
<programlisting>
CREATE TABLE products (
product_no integer,
@ -107,16 +107,16 @@ INSERT INTO products (product_no, name, price)
WHERE release_date = 'today';
</programlisting>
This provides the full power of the SQL query mechanism (<xref
linkend="queries">) for computing the rows to be inserted.
linkend="queries"/>) for computing the rows to be inserted.
</para>
<tip>
<para>
When inserting a lot of data at the same time, considering using
the <xref linkend="sql-copy"> command.
It is not as flexible as the <xref linkend="sql-insert">
the <xref linkend="sql-copy"/> command.
It is not as flexible as the <xref linkend="sql-insert"/>
command, but is more efficient. Refer
to <xref linkend="populate"> for more information on improving
to <xref linkend="populate"/> for more information on improving
bulk loading performance.
</para>
</tip>
@ -141,7 +141,7 @@ INSERT INTO products (product_no, name, price)
</para>
<para>
To update existing rows, use the <xref linkend="sql-update">
To update existing rows, use the <xref linkend="sql-update"/>
command. This requires
three pieces of information:
<orderedlist spacing="compact">
@ -160,7 +160,7 @@ INSERT INTO products (product_no, name, price)
</para>
<para>
Recall from <xref linkend="ddl"> that SQL does not, in general,
Recall from <xref linkend="ddl"/> that SQL does not, in general,
provide a unique identifier for rows. Therefore it is not
always possible to directly specify which row to update.
Instead, you specify which conditions a row must meet in order to
@ -203,7 +203,7 @@ UPDATE products SET price = price * 1.10;
this does not create any ambiguity. Of course, the
<literal>WHERE</literal> condition does
not have to be an equality test. Many other operators are
available (see <xref linkend="functions">). But the expression
available (see <xref linkend="functions"/>). But the expression
needs to evaluate to a Boolean result.
</para>
@ -243,7 +243,7 @@ UPDATE mytable SET a = 5, b = 3, c = 1 WHERE a &gt; 0;
</para>
<para>
You use the <xref linkend="sql-delete">
You use the <xref linkend="sql-delete"/>
command to remove rows; the syntax is very similar to the
<command>UPDATE</command> command. For instance, to remove all
rows from the products table that have a price of 10, use:
@ -296,7 +296,7 @@ DELETE FROM products;
<para>
The allowed contents of a <literal>RETURNING</literal> clause are the same as
a <command>SELECT</command> command's output list
(see <xref linkend="queries-select-lists">). It can contain column
(see <xref linkend="queries-select-lists"/>). It can contain column
names of the command's target table, or value expressions using those
columns. A common shorthand is <literal>RETURNING *</literal>, which selects
all columns of the target table in order.
@ -340,7 +340,7 @@ DELETE FROM products
</para>
<para>
If there are triggers (<xref linkend="triggers">) on the target table,
If there are triggers (<xref linkend="triggers"/>) on the target table,
the data available to <literal>RETURNING</literal> is the row as modified by
the triggers. Thus, inspecting columns computed by triggers is another
common use-case for <literal>RETURNING</literal>.

View File

@ -47,23 +47,11 @@
<para>
The documentation sources are written in
<firstterm>DocBook</firstterm>, which is a markup language
superficially similar to <acronym>HTML</acronym>. Both of these
languages are applications of the <firstterm>Standard Generalized
Markup Language</firstterm>, <acronym>SGML</acronym>, which is
essentially a language for describing other languages. In what
follows, the terms DocBook and <acronym>SGML</acronym> are both
defined in <acronym>XML</acronym>. In what
follows, the terms DocBook and <acronym>XML</acronym> are both
used, but technically they are not interchangeable.
</para>
<note>
<para>
The PostgreSQL documentation is currently being transitioned from DocBook
SGML and DSSSL style sheets to DocBook XML and XSLT style sheets. Be
careful to look at the instructions relating to the PostgreSQL version you
are dealing with, as the procedures and required tools will change.
</para>
</note>
<para>
<productname>DocBook</productname> allows an author to specify the
structure and content of a technical document without worrying
@ -97,19 +85,8 @@
<para>
This is the definition of DocBook itself. We currently use version
4.2; you cannot use later or earlier versions. You need
the <acronym>SGML</acronym> and the <acronym>XML</acronym> variant of
the DocBook DTD of the same version. These will usually be in separate
packages.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term><ulink url="http://www.oasis-open.org/cover/ISOEnts.zip">ISO 8879 character entities</ulink></term>
<listitem>
<para>
These are required by DocBook SGML but are distributed separately
because they are maintained by ISO.
the <acronym>XML</acronym> variant of the DocBook DTD, not
the <acronym>SGML</acronym> variant.
</para>
</listitem>
</varlistentry>
@ -130,17 +107,6 @@
</listitem>
</varlistentry>
<varlistentry>
<term><ulink url="http://openjade.sourceforge.net">OpenSP</ulink></term>
<listitem>
<para>
This is the base package of <acronym>SGML</acronym> processing. Note
that we no longer need OpenJade, the <acronym>DSSSL</acronym>
processor, only the OpenSP package for converting SGML to XML.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term><ulink url="http://xmlsoft.org/">Libxml2</ulink> for <command>xmllint</command></term>
<listitem>
@ -201,7 +167,7 @@
<para>
To install the required packages, use:
<programlisting>
yum install docbook-dtds docbook-style-xsl fop libxslt opensp
yum install docbook-dtds docbook-style-xsl fop libxslt
</programlisting>
</para>
</sect2>
@ -209,41 +175,10 @@ yum install docbook-dtds docbook-style-xsl fop libxslt opensp
<sect2>
<title>Installation on FreeBSD</title>
<para>
The FreeBSD Documentation Project is itself a heavy user of
DocBook, so it comes as no surprise that there is a full set of
<quote>ports</quote> of the documentation tools available on
FreeBSD. The following ports need to be installed to build the
documentation on FreeBSD.
<itemizedlist>
<listitem>
<para><filename>textproc/docbook-sgml</filename></para>
</listitem>
<listitem>
<para><filename>textproc/docbook-xml</filename></para>
</listitem>
<listitem>
<para><filename>textproc/docbook-xsl</filename></para>
</listitem>
<listitem>
<para><filename>textproc/dsssl-docbook-modular</filename></para>
</listitem>
<listitem>
<para><filename>textproc/libxslt</filename></para>
</listitem>
<listitem>
<para><filename>textproc/fop</filename></para>
</listitem>
<listitem>
<para><filename>textproc/opensp</filename></para>
</listitem>
</itemizedlist>
</para>
<para>
To install the required packages with <command>pkg</command>, use:
<programlisting>
pkg install docbook-sgml docbook-xml docbook-xsl fop libxslt opensp
pkg install docbook-xml docbook-xsl fop libxslt
</programlisting>
</para>
@ -268,7 +203,7 @@ pkg install docbook-sgml docbook-xml docbook-xsl fop libxslt opensp
available for <productname>Debian GNU/Linux</productname>.
To install, simply use:
<programlisting>
apt-get install docbook docbook-xml docbook-xsl fop libxml2-utils opensp xsltproc
apt-get install docbook-xml docbook-xsl fop libxml2-utils xsltproc
</programlisting>
</para>
</sect2>
@ -277,117 +212,21 @@ apt-get install docbook docbook-xml docbook-xsl fop libxml2-utils opensp xsltpro
<title>macOS</title>
<para>
If you use MacPorts, the following will get you set up:
<programlisting>
sudo port install docbook-sgml-4.2 docbook-xml-4.2 docbook-xsl fop libxslt opensp
</programlisting>
On macOS, you can build the HTML and man documentation without installing
anything extra. If you want to build PDFs or want to install a local copy
of DocBook, you can get those from your preferred package manager.
</para>
</sect2>
<sect2>
<title>Manual Installation from Source</title>
<para>
The manual installation process of the DocBook tools is somewhat
complex, so if you have pre-built packages available, use them.
We describe here only a standard setup, with reasonably standard
installation paths, and no <quote>fancy</quote> features. For
details, you should study the documentation of the respective
package, and read <acronym>SGML</acronym> introductory material.
If you use MacPorts, the following will get you set up:
<programlisting>
sudo port install docbook-xml-4.2 docbook-xsl fop
</programlisting>
If you use Homebrew, use this:
<programlisting>
brew install docbook docbook-xsl fop
</programlisting>
</para>
<sect3>
<title>Installing OpenSP</title>
<para>
The installation of OpenSP offers a GNU-style
<literal>./configure; make; make install</literal> build process.
Details can be found in the OpenSP source distribution. In a nutshell:
<synopsis>
./configure --enable-default-catalog=/usr/local/etc/sgml/catalog
make
make install
</synopsis>
Be sure to remember where you put the <quote>default catalog</quote>; you
will need it below. You can also leave it off, but then you will have to
set the environment variable <envar>SGML_CATALOG_FILES</envar> to point
to the file whenever you use any programs from OpenSP later on. (This
method is also an option if OpenSP is already installed and you want to
install the rest of the toolchain locally.)
</para>
</sect3>
<sect3>
<title>Installing the <productname>DocBook</productname> <acronym>DTD</acronym> Kit</title>
<procedure>
<step>
<para>
Obtain the <ulink url="http://www.docbook.org/sgml/4.2/docbook-4.2.zip">
DocBook V4.2 distribution</ulink>.
</para>
</step>
<step>
<para>
Create the directory
<filename>/usr/local/share/sgml/docbook-4.2</filename> and change
to it. (The exact location is irrelevant, but this one is
reasonable within the layout we are following here.)
<screen>
<prompt>$ </prompt><userinput>mkdir /usr/local/share/sgml/docbook-4.2</userinput>
<prompt>$ </prompt><userinput>cd /usr/local/share/sgml/docbook-4.2</userinput>
</screen>
</para>
</step>
<step>
<para>
Unpack the archive:
<screen>
<prompt>$ </prompt><userinput>unzip -a ...../docbook-4.2.zip</userinput>
</screen>
(The archive will unpack its files into the current directory.)
</para>
</step>
<step>
<para>
Edit the file
<filename>/usr/local/share/sgml/catalog</filename> (or whatever
you told jade during installation) and put a line like this
into it:
<programlisting>
CATALOG "docbook-4.2/docbook.cat"
</programlisting>
</para>
</step>
<step>
<para>
Download the <ulink url="http://www.oasis-open.org/cover/ISOEnts.zip">
ISO 8879 character entities archive</ulink>, unpack it, and put the
files in the same directory you put the DocBook files in:
<screen>
<prompt>$ </prompt><userinput>cd /usr/local/share/sgml/docbook-4.2</userinput>
<prompt>$ </prompt><userinput>unzip ...../ISOEnts.zip</userinput>
</screen>
</para>
</step>
<step>
<para>
Run the following command in the directory with the DocBook and ISO files:
<programlisting>
perl -pi -e 's/iso-(.*).gml/ISO\1/g' docbook.cat
</programlisting>
(This fixes a mixup between the names used in the DocBook
catalog file and the actual names of the ISO character entity
files.)
</para>
</step>
</procedure>
</sect3>
</sect2>
<sect2 id="docguide-toolsets-configure">
@ -400,26 +239,14 @@ perl -pi -e 's/iso-(.*).gml/ISO\1/g' docbook.cat
Check the output near the end of the run, it should look something
like this:
<screen>
<computeroutput>
checking for onsgmls... onsgmls
checking for DocBook V4.2... yes
checking for dbtoepub... dbtoepub
checking for xmllint... xmllint
checking for DocBook XML V4.2... yes
checking for dbtoepub... dbtoepub
checking for xsltproc... xsltproc
checking for osx... osx
checking for fop... fop
</computeroutput>
</screen>
If neither <filename>onsgmls</filename> nor
<filename>nsgmls</filename> were found then some of the following tests
will be skipped. <filename>nsgmls</filename> is part of the OpenSP
package. You can pass the environment variable
<envar>NSGMLS</envar> to configure to point
to the programs if they are not found automatically. If
<quote>DocBook V4.2</quote> was not found then you did not install
the DocBook DTD kit in a place where OpenSP can find it, or you have
not set up the catalog files correctly. See the installation hints
above.
If <filename>xmllint</filename> was not found then some of the following
tests will be skipped.
</para>
</sect2>
@ -464,9 +291,7 @@ checking for fop... fop
We use the DocBook XSL stylesheets to
convert <productname>DocBook</productname>
<sgmltag>refentry</sgmltag> pages to *roff output suitable for man
pages. The man pages are also distributed as a tar archive,
similar to the <acronym>HTML</acronym> version. To create the man
pages, use the commands:
pages. To create the man pages, use the command:
<screen>
<prompt>doc/src/sgml$ </prompt><userinput>make man</userinput>
</screen>
@ -536,7 +361,7 @@ ADDITIONAL_FLAGS='-Xmx1000m'
The installation instructions are also distributed as plain text,
in case they are needed in a situation where better reading tools
are not available. The <filename>INSTALL</filename> file
corresponds to <xref linkend="installation">, with some minor
corresponds to <xref linkend="installation"/>, with some minor
changes to account for the different context. To recreate the
file, change to the directory <filename>doc/src/sgml</filename>
and enter <userinput>make INSTALL</userinput>.

View File

@ -56,7 +56,7 @@
<para>
The provided functions are shown
in <xref linkend="earthdistance-cube-functions">.
in <xref linkend="earthdistance-cube-functions"/>.
</para>
<table id="earthdistance-cube-functions">
@ -150,7 +150,7 @@
<para>
A single operator is provided, shown
in <xref linkend="earthdistance-point-operators">.
in <xref linkend="earthdistance-point-operators"/>.
</para>
<table id="earthdistance-point-operators">

View File

@ -31,7 +31,7 @@
specially marked sections. To build the program, the source code (<filename>*.pgc</filename>)
is first passed through the embedded SQL preprocessor, which converts it
to an ordinary C program (<filename>*.c</filename>), and afterwards it can be processed by a C
compiler. (For details about the compiling and linking see <xref linkend="ecpg-process">).
compiler. (For details about the compiling and linking see <xref linkend="ecpg-process"/>).
Converted ECPG applications call functions in the libpq library
through the embedded SQL library (ecpglib), and communicate with
the PostgreSQL server using the normal frontend-backend protocol.
@ -397,9 +397,9 @@ EXEC SQL COMMIT;
row can also be executed using
<literal>EXEC SQL</literal> directly. To handle result sets with
multiple rows, an application has to use a cursor;
see <xref linkend="ecpg-cursors"> below. (As a special case, an
see <xref linkend="ecpg-cursors"/> below. (As a special case, an
application can fetch multiple rows at once into an array host
variable; see <xref linkend="ecpg-variables-arrays">.)
variable; see <xref linkend="ecpg-variables-arrays"/>.)
</para>
<para>
@ -422,7 +422,7 @@ EXEC SQL SHOW search_path INTO :var;
<literal>:<replaceable>something</replaceable></literal> are
<firstterm>host variables</firstterm>, that is, they refer to
variables in the C program. They are explained in <xref
linkend="ecpg-variables">.
linkend="ecpg-variables"/>.
</para>
</sect2>
@ -452,8 +452,8 @@ EXEC SQL COMMIT;
<para>
For more details about declaration of the cursor,
see <xref linkend="ecpg-sql-declare">, and
see <xref linkend="sql-fetch"> for <literal>FETCH</literal> command
see <xref linkend="ecpg-sql-declare"/>, and
see <xref linkend="sql-fetch"/> for <literal>FETCH</literal> command
details.
</para>
@ -477,7 +477,7 @@ EXEC SQL COMMIT;
interface also supports autocommit of transactions (similar to
<application>psql</application>'s default behavior) via the <option>-t</option>
command-line option to <command>ecpg</command> (see <xref
linkend="app-ecpg">) or via the <literal>EXEC SQL SET AUTOCOMMIT TO
linkend="app-ecpg"/>) or via the <literal>EXEC SQL SET AUTOCOMMIT TO
ON</literal> statement. In autocommit mode, each command is
automatically committed unless it is inside an explicit transaction
block. This mode can be explicitly turned off using <literal>EXEC
@ -617,8 +617,8 @@ EXEC SQL DEALLOCATE PREPARE <replaceable>name</replaceable>;
<para>
For more details about <literal>PREPARE</literal>,
see <xref linkend="ecpg-sql-prepare">. Also
see <xref linkend="ecpg-dynamic"> for more details about using
see <xref linkend="ecpg-sql-prepare"/>. Also
see <xref linkend="ecpg-dynamic"/> for more details about using
placeholders and input parameters.
</para>
</sect2>
@ -628,7 +628,7 @@ EXEC SQL DEALLOCATE PREPARE <replaceable>name</replaceable>;
<title>Using Host Variables</title>
<para>
In <xref linkend="ecpg-commands"> you saw how you can execute SQL
In <xref linkend="ecpg-commands"/> you saw how you can execute SQL
statements from an embedded SQL program. Some of those statements
only used fixed values and did not provide a way to insert
user-supplied values into statements or have the program process
@ -646,7 +646,7 @@ EXEC SQL DEALLOCATE PREPARE <replaceable>name</replaceable>;
<para>
Another way to exchange values between PostgreSQL backends and ECPG
applications is the use of SQL descriptors, described
in <xref linkend="ecpg-descriptors">.
in <xref linkend="ecpg-descriptors"/>.
</para>
<sect2 id="ecpg-variables-overview">
@ -812,11 +812,11 @@ do
directly. Other PostgreSQL data types, such
as <type>timestamp</type> and <type>numeric</type> can only be
accessed through special library functions; see
<xref linkend="ecpg-special-types">.
<xref linkend="ecpg-special-types"/>.
</para>
<para>
<xref linkend="ecpg-datatype-hostvars-table"> shows which PostgreSQL
<xref linkend="ecpg-datatype-hostvars-table"/> shows which PostgreSQL
data types correspond to which C data types. When you wish to
send or receive a value of a given PostgreSQL data type, you
should declare a C variable of the corresponding C data type in
@ -851,12 +851,12 @@ do
<row>
<entry><type>decimal</type></entry>
<entry><type>decimal</type><footnote id="ecpg-datatype-table-fn"><para>This type can only be accessed through special library functions; see <xref linkend="ecpg-special-types">.</para></footnote></entry>
<entry><type>decimal</type><footnote id="ecpg-datatype-table-fn"><para>This type can only be accessed through special library functions; see <xref linkend="ecpg-special-types"/>.</para></footnote></entry>
</row>
<row>
<entry><type>numeric</type></entry>
<entry><type>numeric</type><footnoteref linkend="ecpg-datatype-table-fn"></entry>
<entry><type>numeric</type><footnoteref linkend="ecpg-datatype-table-fn"/></entry>
</row>
<row>
@ -901,17 +901,17 @@ do
<row>
<entry><type>timestamp</type></entry>
<entry><type>timestamp</type><footnoteref linkend="ecpg-datatype-table-fn"></entry>
<entry><type>timestamp</type><footnoteref linkend="ecpg-datatype-table-fn"/></entry>
</row>
<row>
<entry><type>interval</type></entry>
<entry><type>interval</type><footnoteref linkend="ecpg-datatype-table-fn"></entry>
<entry><type>interval</type><footnoteref linkend="ecpg-datatype-table-fn"/></entry>
</row>
<row>
<entry><type>date</type></entry>
<entry><type>date</type><footnoteref linkend="ecpg-datatype-table-fn"></entry>
<entry><type>date</type><footnoteref linkend="ecpg-datatype-table-fn"/></entry>
</row>
<row>
@ -1002,7 +1002,7 @@ struct varchar_var { int len; char arr[180]; } var;
structure. Applications deal with these types by declaring host
variables in special types and accessing them using functions in
the pgtypes library. The pgtypes library, described in detail
in <xref linkend="ecpg-pgtypes"> contains basic functions to deal
in <xref linkend="ecpg-pgtypes"/> contains basic functions to deal
with those types, such that you do not need to send a query to
the SQL server just for adding an interval to a time stamp for
example.
@ -1011,7 +1011,7 @@ struct varchar_var { int len; char arr[180]; } var;
<para>
The follow subsections describe these special data types. For
more details about pgtypes library functions,
see <xref linkend="ecpg-pgtypes">.
see <xref linkend="ecpg-pgtypes"/>.
</para>
<sect4>
@ -1062,7 +1062,7 @@ ts = 2010-06-27 18:03:56.949343
program has to include <filename>pgtypes_date.h</filename>, declare a host variable
as the date type and convert a DATE value into a text form using
<function>PGTYPESdate_to_asc()</function> function. For more details about the
pgtypes library functions, see <xref linkend="ecpg-pgtypes">.
pgtypes library functions, see <xref linkend="ecpg-pgtypes"/>.
</para>
</sect4>
@ -1117,7 +1117,7 @@ EXEC SQL END DECLARE SECTION;
allocating some memory space on the heap, and accessing the
variable using the pgtypes library functions. For more details
about the pgtypes library functions,
see <xref linkend="ecpg-pgtypes">.
see <xref linkend="ecpg-pgtypes"/>.
</para>
<para>
@ -1193,7 +1193,7 @@ EXEC SQL END DECLARE SECTION;
There are two use cases for arrays as host variables. The first
is a way to store some text string in <type>char[]</type>
or <type>VARCHAR[]</type>, as
explained in <xref linkend="ecpg-char">. The second use case is to
explained in <xref linkend="ecpg-char"/>. The second use case is to
retrieve multiple rows from a query result without using a
cursor. Without an array, to process a query result consisting
of multiple rows, it is required to use a cursor and
@ -1378,7 +1378,7 @@ EXEC SQL TYPE serial_t IS long;
<para>
You can declare pointers to the most common types. Note however
that you cannot use pointers as target variables of queries
without auto-allocation. See <xref linkend="ecpg-descriptors">
without auto-allocation. See <xref linkend="ecpg-descriptors"/>
for more information on auto-allocation.
</para>
@ -1520,7 +1520,7 @@ while (1)
Another workaround is to store arrays in their external string
representation in host variables of type <type>char[]</type>
or <type>VARCHAR[]</type>. For more details about this
representation, see <xref linkend="arrays-input">. Note that
representation, see <xref linkend="arrays-input"/>. Note that
this means that the array cannot be accessed naturally as an
array in the host program (without further processing that parses
the text representation).
@ -1578,7 +1578,7 @@ EXEC SQL CLOSE cur1;
To enhance this example, the host variables to store values in
the <command>FETCH</command> command can be gathered into one
structure. For more details about the host variable in the
structure form, see <xref linkend="ecpg-variables-struct">.
structure form, see <xref linkend="ecpg-variables-struct"/>.
To switch to the structure, the example can be modified as below.
The two host variables, <varname>intval</varname>
and <varname>textval</varname>, become members of
@ -1659,12 +1659,12 @@ while (1)
<para>
Here is an example using the data type <type>complex</type> from
the example in <xref linkend="xtypes">. The external string
the example in <xref linkend="xtypes"/>. The external string
representation of that type is <literal>(%lf,%lf)</literal>,
which is defined in the
functions <function>complex_in()</function>
and <function>complex_out()</function> functions
in <xref linkend="xtypes">. The following example inserts the
in <xref linkend="xtypes"/>. The following example inserts the
complex type values <literal>(1,1)</literal>
and <literal>(3,3)</literal> into the
columns <literal>a</literal> and <literal>b</literal>, and select
@ -1875,7 +1875,7 @@ EXEC SQL EXECUTE mystmt INTO :v1, :v2, :v3 USING 37;
<para>
If a query is expected to return more than one result row, a
cursor should be used, as in the following example.
(See <xref linkend="ecpg-cursors"> for more details about the
(See <xref linkend="ecpg-cursors"/> for more details about the
cursor.)
<programlisting>
EXEC SQL BEGIN DECLARE SECTION;
@ -1941,7 +1941,7 @@ free(out);
<title>The numeric Type</title>
<para>
The numeric type offers to do calculations with arbitrary precision. See
<xref linkend="datatype-numeric"> for the equivalent type in the
<xref linkend="datatype-numeric"/> for the equivalent type in the
<productname>PostgreSQL</productname> server. Because of the arbitrary precision this
variable needs to be able to expand and shrink dynamically. That's why you
can only create numeric variables on the heap, by means of the
@ -2264,7 +2264,7 @@ int PGTYPESnumeric_from_decimal(decimal *src, numeric *dst);
<title>The date Type</title>
<para>
The date type in C enables your programs to deal with data of the SQL type
date. See <xref linkend="datatype-datetime"> for the equivalent type in the
date. See <xref linkend="datatype-datetime"/> for the equivalent type in the
<productname>PostgreSQL</productname> server.
</para>
<para>
@ -2303,7 +2303,7 @@ date PGTYPESdate_from_asc(char *str, char **endptr);
currently no variable to change that within ECPG.
</para>
<para>
<xref linkend="ecpg-pgtypesdate-from-asc-table"> shows the allowed input formats.
<xref linkend="ecpg-pgtypesdate-from-asc-table"/> shows the allowed input formats.
</para>
<table id="ecpg-pgtypesdate-from-asc-table">
<title>Valid Input Formats for <function>PGTYPESdate_from_asc</function></title>
@ -2558,7 +2558,7 @@ int PGTYPESdate_fmt_asc(date dDate, char *fmtstring, char *outbuf);
All other characters are copied 1:1 to the output string.
</para>
<para>
<xref linkend="ecpg-pgtypesdate-fmt-asc-example-table"> indicates a few possible formats. This will give
<xref linkend="ecpg-pgtypesdate-fmt-asc-example-table"/> indicates a few possible formats. This will give
you an idea of how to use this function. All output lines are based on
the same date: November 23, 1959.
</para>
@ -2649,7 +2649,7 @@ int PGTYPESdate_defmt_asc(date *d, char *fmt, char *str);
day.
</para>
<para>
<xref linkend="ecpg-rdefmtdate-example-table"> indicates a few possible formats. This will give
<xref linkend="ecpg-rdefmtdate-example-table"/> indicates a few possible formats. This will give
you an idea of how to use this function.
</para>
<table id="ecpg-rdefmtdate-example-table">
@ -2741,7 +2741,7 @@ int PGTYPESdate_defmt_asc(date *d, char *fmt, char *str);
<title>The timestamp Type</title>
<para>
The timestamp type in C enables your programs to deal with data of the SQL
type timestamp. See <xref linkend="datatype-datetime"> for the equivalent
type timestamp. See <xref linkend="datatype-datetime"/> for the equivalent
type in the <productname>PostgreSQL</productname> server.
</para>
<para>
@ -2766,7 +2766,7 @@ timestamp PGTYPEStimestamp_from_asc(char *str, char **endptr);
<para>
The function returns the parsed timestamp on success. On error,
<literal>PGTYPESInvalidTimestamp</literal> is returned and <varname>errno</varname> is
set to <literal>PGTYPES_TS_BAD_TIMESTAMP</literal>. See <xref linkend="pgtypesinvalidtimestamp"> for important notes on this value.
set to <literal>PGTYPES_TS_BAD_TIMESTAMP</literal>. See <xref linkend="pgtypesinvalidtimestamp"/> for important notes on this value.
</para>
<para>
In general, the input string can contain any combination of an allowed
@ -2777,7 +2777,7 @@ timestamp PGTYPEStimestamp_from_asc(char *str, char **endptr);
specifiers are silently discarded.
</para>
<para>
<xref linkend="ecpg-pgtypestimestamp-from-asc-example-table"> contains a few examples for input strings.
<xref linkend="ecpg-pgtypestimestamp-from-asc-example-table"/> contains a few examples for input strings.
</para>
<table id="ecpg-pgtypestimestamp-from-asc-example-table">
<title>Valid Input Formats for <function>PGTYPEStimestamp_from_asc</function></title>
@ -3217,7 +3217,7 @@ int PGTYPEStimestamp_defmt_asc(char *str, char *fmt, timestamp *d);
</para>
<para>
This is the reverse function to <xref
linkend="pgtypestimestampfmtasc">. See the documentation there in
linkend="pgtypestimestampfmtasc"/>. See the documentation there in
order to find out about the possible formatting mask entries.
</para>
</listitem>
@ -3270,7 +3270,7 @@ int PGTYPEStimestamp_sub_interval(timestamp *tin, interval *span, timestamp *tou
<title>The interval Type</title>
<para>
The interval type in C enables your programs to deal with data of the SQL
type interval. See <xref linkend="datatype-datetime"> for the equivalent
type interval. See <xref linkend="datatype-datetime"/> for the equivalent
type in the <productname>PostgreSQL</productname> server.
</para>
<para>
@ -3364,7 +3364,7 @@ int PGTYPESinterval_copy(interval *intvlsrc, interval *intvldest);
<function>PGTYPESdecimal_free</function>).
There are a lot of other functions that deal with the decimal type in the
<productname>Informix</productname> compatibility mode described in <xref
linkend="ecpg-informix-compat">.
linkend="ecpg-informix-compat"/>.
</para>
<para>
The following functions can be used to work with the decimal type and are
@ -3632,7 +3632,7 @@ EXEC SQL DESCRIBE stmt1 INTO SQL DESCRIPTOR mydesc;
so using <literal>DESCRIPTOR</literal> and <literal>SQL DESCRIPTOR</literal>
produced named SQL Descriptor Areas. Now it is mandatory, omitting
the <literal>SQL</literal> keyword produces SQLDA Descriptor Areas,
see <xref linkend="ecpg-sqlda-descriptors">.
see <xref linkend="ecpg-sqlda-descriptors"/>.
</para>
<para>
@ -3853,7 +3853,7 @@ EXEC SQL FETCH 3 FROM mycursor INTO DESCRIPTOR mysqlda;
</programlisting>
Note that the <literal>SQL</literal> keyword is omitted. The paragraphs about
the use cases of the <literal>INTO</literal> and <literal>USING</literal>
keywords in <xref linkend="ecpg-named-descriptors"> also apply here with an addition.
keywords in <xref linkend="ecpg-named-descriptors"/> also apply here with an addition.
In a <command>DESCRIBE</command> statement the <literal>DESCRIPTOR</literal>
keyword can be completely omitted if the <literal>INTO</literal> keyword is used:
<programlisting>
@ -4038,7 +4038,7 @@ typedef struct sqlvar_struct sqlvar_t;
<listitem>
<para>
Points to the data. The format of the data is described
in <xref linkend="ecpg-variables-type-mapping">.
in <xref linkend="ecpg-variables-type-mapping"/>.
</para>
</listitem>
</varlistentry>
@ -4447,7 +4447,7 @@ main(void)
<para>
The whole program is shown
in <xref linkend="ecpg-sqlda-example-example">.
in <xref linkend="ecpg-sqlda-example-example"/>.
</para>
<example id="ecpg-sqlda-example-example">
@ -5016,7 +5016,7 @@ sqlstate: 42P01
<literal>SQLSTATE</literal> error codes; therefore a high degree
of consistency can be achieved by using this error code scheme
throughout all applications. For further information see
<xref linkend="errcodes-appendix">.
<xref linkend="errcodes-appendix"/>.
</para>
<para>
@ -5037,7 +5037,7 @@ sqlstate: 42P01
<literal>SQLSTATE</literal> is also listed. There is, however, no
one-to-one or one-to-many mapping between the two schemes (indeed
it is many-to-many), so you should consult the global
<literal>SQLSTATE</literal> listing in <xref linkend="errcodes-appendix">
<literal>SQLSTATE</literal> listing in <xref linkend="errcodes-appendix"/>
in each case.
</para>
@ -5767,7 +5767,7 @@ ECPG = ecpg
<para>
The complete syntax of the <command>ecpg</command> command is
detailed in <xref linkend="app-ecpg">.
detailed in <xref linkend="app-ecpg"/>.
</para>
<para>
@ -5835,7 +5835,7 @@ ECPG = ecpg
<para>
<function>ECPGtransactionStatus(const char *<replaceable>connection_name</replaceable>)</function>
returns the current transaction status of the given connection identified by <replaceable>connection_name</replaceable>.
See <xref linkend="libpq-status"> and libpq's <function>PQtransactionStatus()</function> for details about the returned status codes.
See <xref linkend="libpq-status"/> and libpq's <function>PQtransactionStatus()</function> for details about the returned status codes.
</para>
</listitem>
@ -5867,8 +5867,8 @@ ECPG = ecpg
<para>
For more details about the <function>ECPGget_PGconn()</function>, see
<xref linkend="ecpg-library">. For information about the large
object function interface, see <xref linkend="largeobjects">.
<xref linkend="ecpg-library"/>. For information about the large
object function interface, see <xref linkend="largeobjects"/>.
</para>
<para>
@ -5878,7 +5878,7 @@ ECPG = ecpg
</para>
<para>
<xref linkend="ecpg-lo-example"> shows an example program that
<xref linkend="ecpg-lo-example"/> shows an example program that
illustrates how to create, write, and read a large object in an
ECPG application.
</para>
@ -5997,7 +5997,7 @@ main(void)
A safe way to use the embedded SQL code in a C++ application is
hiding the ECPG calls in a C module, which the C++ application code
calls into to access the database, and linking that together with
the rest of the C++ code. See <xref linkend="ecpg-cpp-and-c">
the rest of the C++ code. See <xref linkend="ecpg-cpp-and-c"/>
about that.
</para>
@ -6252,7 +6252,7 @@ c++ test_cpp.o test_mod.o -lecpg -o test_cpp
<para>
This section describes all SQL commands that are specific to
embedded SQL. Also refer to the SQL commands listed
in <xref linkend="sql-commands">, which can also be used in
in <xref linkend="sql-commands"/>, which can also be used in
embedded SQL, unless stated otherwise.
</para>
@ -6320,9 +6320,9 @@ EXEC SQL ALLOCATE DESCRIPTOR mydesc;
<title>See Also</title>
<simplelist type="inline">
<member><xref linkend="ecpg-sql-deallocate-descriptor"></member>
<member><xref linkend="ecpg-sql-get-descriptor"></member>
<member><xref linkend="ecpg-sql-set-descriptor"></member>
<member><xref linkend="ecpg-sql-deallocate-descriptor"/></member>
<member><xref linkend="ecpg-sql-get-descriptor"/></member>
<member><xref linkend="ecpg-sql-set-descriptor"/></member>
</simplelist>
</refsect1>
</refentry>
@ -6539,8 +6539,8 @@ EXEC SQL END DECLARE SECTION;
<title>See Also</title>
<simplelist type="inline">
<member><xref linkend="ecpg-sql-disconnect"></member>
<member><xref linkend="ecpg-sql-set-connection"></member>
<member><xref linkend="ecpg-sql-disconnect"/></member>
<member><xref linkend="ecpg-sql-set-connection"/></member>
</simplelist>
</refsect1>
</refentry>
@ -6604,9 +6604,9 @@ EXEC SQL DEALLOCATE DESCRIPTOR mydesc;
<title>See Also</title>
<simplelist type="inline">
<member><xref linkend="ecpg-sql-allocate-descriptor"></member>
<member><xref linkend="ecpg-sql-get-descriptor"></member>
<member><xref linkend="ecpg-sql-set-descriptor"></member>
<member><xref linkend="ecpg-sql-allocate-descriptor"/></member>
<member><xref linkend="ecpg-sql-get-descriptor"/></member>
<member><xref linkend="ecpg-sql-set-descriptor"/></member>
</simplelist>
</refsect1>
</refentry>
@ -6668,8 +6668,8 @@ DECLARE <replaceable class="parameter">cursor_name</replaceable> [ BINARY ] [ IN
<term><replaceable class="parameter">query</replaceable></term>
<listitem>
<para>
A <xref linkend="sql-select"> or
<xref linkend="sql-values"> command which will provide the
A <xref linkend="sql-select"/> or
<xref linkend="sql-values"/> command which will provide the
rows to be returned by the cursor.
</para>
</listitem>
@ -6678,7 +6678,7 @@ DECLARE <replaceable class="parameter">cursor_name</replaceable> [ BINARY ] [ IN
<para>
For the meaning of the cursor options,
see <xref linkend="sql-declare">.
see <xref linkend="sql-declare"/>.
</para>
</refsect1>
@ -6715,9 +6715,9 @@ EXEC SQL DECLARE cur1 CURSOR FOR stmt1;
<title>See Also</title>
<simplelist type="inline">
<member><xref linkend="ecpg-sql-open"></member>
<member><xref linkend="sql-close"></member>
<member><xref linkend="sql-declare"></member>
<member><xref linkend="ecpg-sql-open"/></member>
<member><xref linkend="sql-close"/></member>
<member><xref linkend="sql-declare"/></member>
</simplelist>
</refsect1>
</refentry>
@ -6805,8 +6805,8 @@ EXEC SQL DEALLOCATE DESCRIPTOR mydesc;
<title>See Also</title>
<simplelist type="inline">
<member><xref linkend="ecpg-sql-allocate-descriptor"></member>
<member><xref linkend="ecpg-sql-get-descriptor"></member>
<member><xref linkend="ecpg-sql-allocate-descriptor"/></member>
<member><xref linkend="ecpg-sql-get-descriptor"/></member>
</simplelist>
</refsect1>
</refentry>
@ -6915,8 +6915,8 @@ main(void)
<title>See Also</title>
<simplelist type="inline">
<member><xref linkend="ecpg-sql-connect"></member>
<member><xref linkend="ecpg-sql-set-connection"></member>
<member><xref linkend="ecpg-sql-connect"/></member>
<member><xref linkend="ecpg-sql-set-connection"/></member>
</simplelist>
</refsect1>
</refentry>
@ -7056,7 +7056,7 @@ GET DESCRIPTOR <replaceable class="parameter">descriptor_name</replaceable> VALU
<listitem>
<para>
A token identifying which item of information about a column
to retrieve. See <xref linkend="ecpg-named-descriptors"> for
to retrieve. See <xref linkend="ecpg-named-descriptors"/> for
a list of supported items.
</para>
</listitem>
@ -7164,8 +7164,8 @@ d_data = testdb
<title>See Also</title>
<simplelist type="inline">
<member><xref linkend="ecpg-sql-allocate-descriptor"></member>
<member><xref linkend="ecpg-sql-set-descriptor"></member>
<member><xref linkend="ecpg-sql-allocate-descriptor"/></member>
<member><xref linkend="ecpg-sql-set-descriptor"/></member>
</simplelist>
</refsect1>
</refentry>
@ -7258,8 +7258,8 @@ EXEC SQL OPEN :curname1;
<title>See Also</title>
<simplelist type="inline">
<member><xref linkend="ecpg-sql-declare"></member>
<member><xref linkend="sql-close"></member>
<member><xref linkend="ecpg-sql-declare"/></member>
<member><xref linkend="sql-close"/></member>
</simplelist>
</refsect1>
</refentry>
@ -7282,8 +7282,8 @@ PREPARE <replaceable class="parameter">name</replaceable> FROM <replaceable clas
<para>
<command>PREPARE</command> prepares a statement dynamically
specified as a string for execution. This is different from the
direct SQL statement <xref linkend="sql-prepare">, which can also
be used in embedded programs. The <xref linkend="sql-execute">
direct SQL statement <xref linkend="sql-prepare"/>, which can also
be used in embedded programs. The <xref linkend="sql-execute"/>
command is used to execute either kind of prepared statement.
</para>
</refsect1>
@ -7338,7 +7338,7 @@ EXEC SQL EXECUTE foo USING SQL DESCRIPTOR indesc INTO SQL DESCRIPTOR outdesc;
<title>See Also</title>
<simplelist type="inline">
<member><xref linkend="sql-execute"></member>
<member><xref linkend="sql-execute"/></member>
</simplelist>
</refsect1>
</refentry>
@ -7445,8 +7445,8 @@ EXEC SQL SET CONNECTION = con1;
<title>See Also</title>
<simplelist type="inline">
<member><xref linkend="ecpg-sql-connect"></member>
<member><xref linkend="ecpg-sql-disconnect"></member>
<member><xref linkend="ecpg-sql-connect"/></member>
<member><xref linkend="ecpg-sql-disconnect"/></member>
</simplelist>
</refsect1>
</refentry>
@ -7520,7 +7520,7 @@ SET DESCRIPTOR <replaceable class="parameter">descriptor_name</replaceable> VALU
<listitem>
<para>
A token identifying which item of information to set in the
descriptor. See <xref linkend="ecpg-named-descriptors"> for a
descriptor. See <xref linkend="ecpg-named-descriptors"/> for a
list of supported items.
</para>
</listitem>
@ -7561,8 +7561,8 @@ EXEC SQL SET DESCRIPTOR indesc VALUE 2 INDICATOR = :val2null, DATA = :val2;
<title>See Also</title>
<simplelist type="inline">
<member><xref linkend="ecpg-sql-allocate-descriptor"></member>
<member><xref linkend="ecpg-sql-get-descriptor"></member>
<member><xref linkend="ecpg-sql-allocate-descriptor"/></member>
<member><xref linkend="ecpg-sql-get-descriptor"/></member>
</simplelist>
</refsect1>
</refentry>
@ -7796,7 +7796,7 @@ WHENEVER { NOT FOUND | SQLERROR | SQLWARNING } <replaceable class="parameter">ac
<title>Parameters</title>
<para>
See <xref linkend="ecpg-whenever"> for a description of the
See <xref linkend="ecpg-whenever"/> for a description of the
parameters.
</para>
</refsect1>
@ -7979,7 +7979,7 @@ EXEC SQL CLOSE DATABASE;
<title>Informix-compatible SQLDA Descriptor Areas</title>
<para>
Informix-compatible mode supports a different structure than the one described in
<xref linkend="ecpg-sqlda-descriptors">. See below:
<xref linkend="ecpg-sqlda-descriptors"/>. See below:
<programlisting>
struct sqlvar_compat
{
@ -8653,7 +8653,7 @@ void rtoday(date *d);
that it sets to the current date.
</para>
<para>
Internally this function uses the <xref linkend="pgtypesdatetoday">
Internally this function uses the <xref linkend="pgtypesdatetoday"/>
function.
</para>
</listitem>
@ -8678,7 +8678,7 @@ int rjulmdy(date d, short mdy[3]);
The function always returns 0 at the moment.
</para>
<para>
Internally the function uses the <xref linkend="pgtypesdatejulmdy">
Internally the function uses the <xref linkend="pgtypesdatejulmdy"/>
function.
</para>
</listitem>
@ -8748,7 +8748,7 @@ int rdefmtdate(date *d, char *fmt, char *str);
</para>
<para>
Internally this function is implemented to use the <xref
linkend="pgtypesdatedefmtasc"> function. See the reference there for a
linkend="pgtypesdatedefmtasc"/> function. See the reference there for a
table of example input.
</para>
</listitem>
@ -8771,7 +8771,7 @@ int rfmtdate(date d, char *fmt, char *str);
On success, 0 is returned and a negative value if an error occurred.
</para>
<para>
Internally this function uses the <xref linkend="pgtypesdatefmtasc">
Internally this function uses the <xref linkend="pgtypesdatefmtasc"/>
function, see the reference there for examples.
</para>
</listitem>
@ -8795,7 +8795,7 @@ int rmdyjul(short mdy[3], date *d);
</para>
<para>
Internally the function is implemented to use the function <xref
linkend="pgtypesdatemdyjul">.
linkend="pgtypesdatemdyjul"/>.
</para>
</listitem>
</varlistentry>
@ -8851,7 +8851,7 @@ int rdayofweek(date d);
</para>
<para>
Internally the function is implemented to use the function <xref
linkend="pgtypesdatedayofweek">.
linkend="pgtypesdatedayofweek"/>.
</para>
</listitem>
</varlistentry>
@ -8889,7 +8889,7 @@ int dtcvasc(char *str, timestamp *ts);
</para>
<para>
Internally this function uses the <xref
linkend="pgtypestimestampfromasc"> function. See the reference there
linkend="pgtypestimestampfromasc"/> function. See the reference there
for a table with example inputs.
</para>
</listitem>
@ -8911,7 +8911,7 @@ dtcvfmtasc(char *inbuf, char *fmtstr, timestamp *dtvalue)
</para>
<para>
This function is implemented by means of the <xref
linkend="pgtypestimestampdefmtasc"> function. See the documentation
linkend="pgtypestimestampdefmtasc"/> function. See the documentation
there for a list of format specifiers that can be used.
</para>
<para>
@ -8983,7 +8983,7 @@ int dttofmtasc(timestamp *ts, char *output, int str_len, char *fmtstr);
</para>
<para>
Internally, this function uses the <xref
linkend="pgtypestimestampfmtasc"> function. See the reference there for
linkend="pgtypestimestampfmtasc"/> function. See the reference there for
information on what format mask specifiers can be used.
</para>
</listitem>
@ -9289,7 +9289,7 @@ int risnull(int t, char *ptr);
The function receives the type of the variable to test (<literal>t</literal>)
as well a pointer to this variable (<literal>ptr</literal>). Note that the
latter needs to be cast to a char*. See the function <xref
linkend="rsetnull"> for a list of possible variable types.
linkend="rsetnull"/> for a list of possible variable types.
</para>
<para>
Here is an example of how to use this function:

View File

@ -32,7 +32,7 @@
</para>
<para>
<xref linkend="errcodes-table"> lists all the error codes defined in
<xref linkend="errcodes-table"/> lists all the error codes defined in
<productname>PostgreSQL</productname> &version;. (Some are not actually
used at present, but are defined by the SQL standard.)
The error classes are also shown. For each error class there is a
@ -66,9 +66,9 @@
<title><productname>PostgreSQL</productname> Error Codes</title>
<tgroup cols="2">
<colspec colnum="1" colname="errorcode">
<colspec colnum="2" colname="condname">
<spanspec namest="errorcode" nameend="condname" spanname="span12">
<colspec colnum="1" colname="errorcode"/>
<colspec colnum="2" colname="condname"/>
<spanspec namest="errorcode" nameend="condname" spanname="span12"/>
<thead>
<row>

View File

@ -8,7 +8,7 @@
</indexterm>
<para>
To supplement the trigger mechanism discussed in <xref linkend="triggers">,
To supplement the trigger mechanism discussed in <xref linkend="triggers"/>,
<productname>PostgreSQL</productname> also provides event triggers. Unlike regular
triggers, which are attached to a single table and capture only DML events,
event triggers are global to a particular database and are capable of
@ -57,7 +57,7 @@
operations that took place, use the set-returning function
<literal>pg_event_trigger_ddl_commands()</literal> from the
<literal>ddl_command_end</literal> event trigger code (see
<xref linkend="functions-event-triggers">). Note that the trigger fires
<xref linkend="functions-event-triggers"/>). Note that the trigger fires
after the actions have taken place (but before the transaction commits),
and thus the system catalogs can be read as already changed.
</para>
@ -68,7 +68,7 @@
database objects. To list the objects that have been dropped, use the
set-returning function <literal>pg_event_trigger_dropped_objects()</literal> from the
<literal>sql_drop</literal> event trigger code (see
<xref linkend="functions-event-triggers">). Note that
<xref linkend="functions-event-triggers"/>). Note that
the trigger is executed after the objects have been deleted from the
system catalogs, so it's not possible to look them up anymore.
</para>
@ -96,11 +96,11 @@
<para>
For a complete list of commands supported by the event trigger mechanism,
see <xref linkend="event-trigger-matrix">.
see <xref linkend="event-trigger-matrix"/>.
</para>
<para>
Event triggers are created using the command <xref linkend="sql-createeventtrigger">.
Event triggers are created using the command <xref linkend="sql-createeventtrigger"/>.
In order to create an event trigger, you must first create a function with
the special return type <literal>event_trigger</literal>. This function
need not (and may not) return a value; the return type serves merely as
@ -125,7 +125,7 @@
<title>Event Trigger Firing Matrix</title>
<para>
<xref linkend="event-trigger-by-command-tag"> lists all commands
<xref linkend="event-trigger-by-command-tag"/> lists all commands
for which event triggers are supported.
</para>
@ -953,7 +953,7 @@ typedef struct EventTriggerData
Describes the event for which the function is called, one of
<literal>"ddl_command_start"</literal>, <literal>"ddl_command_end"</literal>,
<literal>"sql_drop"</literal>, <literal>"table_rewrite"</literal>.
See <xref linkend="event-trigger-definition"> for the meaning of these
See <xref linkend="event-trigger-definition"/> for the meaning of these
events.
</para>
</listitem>
@ -1003,7 +1003,7 @@ typedef struct EventTriggerData
The event trigger definition associated the function with
the <literal>ddl_command_start</literal> event. The effect is that all DDL
commands (with the exceptions mentioned
in <xref linkend="event-trigger-definition">) are prevented from running.
in <xref linkend="event-trigger-definition"/>) are prevented from running.
</para>
<para>
@ -1037,7 +1037,7 @@ noddl(PG_FUNCTION_ARGS)
</para>
<para>
After you have compiled the source code (see <xref linkend="dfunc">),
After you have compiled the source code (see <xref linkend="dfunc"/>),
declare the function and the triggers:
<programlisting>
CREATE FUNCTION noddl() RETURNS event_trigger

View File

@ -15,32 +15,32 @@
<itemizedlist spacing="compact" mark="bullet">
<listitem>
<para>
functions (starting in <xref linkend="xfunc">)
functions (starting in <xref linkend="xfunc"/>)
</para>
</listitem>
<listitem>
<para>
aggregates (starting in <xref linkend="xaggr">)
aggregates (starting in <xref linkend="xaggr"/>)
</para>
</listitem>
<listitem>
<para>
data types (starting in <xref linkend="xtypes">)
data types (starting in <xref linkend="xtypes"/>)
</para>
</listitem>
<listitem>
<para>
operators (starting in <xref linkend="xoper">)
operators (starting in <xref linkend="xoper"/>)
</para>
</listitem>
<listitem>
<para>
operator classes for indexes (starting in <xref linkend="xindex">)
operator classes for indexes (starting in <xref linkend="xindex"/>)
</para>
</listitem>
<listitem>
<para>
packages of related objects (starting in <xref linkend="extend-extensions">)
packages of related objects (starting in <xref linkend="extend-extensions"/>)
</para>
</listitem>
</itemizedlist>
@ -132,14 +132,14 @@
types through functions provided by the user and only understands
the behavior of such types to the extent that the user describes
them.
The built-in base types are described in <xref linkend="datatype">.
The built-in base types are described in <xref linkend="datatype"/>.
</para>
<para>
Enumerated (enum) types can be considered as a subcategory of base
types. The main difference is that they can be created using
just <acronym>SQL</acronym> commands, without any low-level programming.
Refer to <xref linkend="datatype-enum"> for more information.
Refer to <xref linkend="datatype-enum"/> for more information.
</para>
</sect2>
@ -157,25 +157,25 @@
type is automatically created for each base type, composite type, range
type, and domain type. But there are no arrays of arrays. So far as
the type system is concerned, multi-dimensional arrays are the same as
one-dimensional arrays. Refer to <xref linkend="arrays"> for more
one-dimensional arrays. Refer to <xref linkend="arrays"/> for more
information.
</para>
<para>
Composite types, or row types, are created whenever the user
creates a table. It is also possible to use <xref
linkend="sql-createtype"> to
linkend="sql-createtype"/> to
define a <quote>stand-alone</quote> composite type with no associated
table. A composite type is simply a list of types with
associated field names. A value of a composite type is a row or
record of field values. Refer to <xref linkend="rowtypes">
record of field values. Refer to <xref linkend="rowtypes"/>
for more information.
</para>
<para>
A range type can hold two values of the same type, which are the lower
and upper bounds of the range. Range types are user-created, although
a few built-in ones exist. Refer to <xref linkend="rangetypes">
a few built-in ones exist. Refer to <xref linkend="rangetypes"/>
for more information.
</para>
</sect2>
@ -188,8 +188,8 @@
is interchangeable with its underlying type. However, a domain can have
constraints that restrict its valid values to a subset of what the
underlying type would allow. Domains are created using
the <acronym>SQL</acronym> command <xref linkend="sql-createdomain">.
Refer to <xref linkend="domains"> for more information.
the <acronym>SQL</acronym> command <xref linkend="sql-createdomain"/>.
Refer to <xref linkend="domains"/> for more information.
</para>
</sect2>
@ -202,7 +202,7 @@
container types, but they can be used to declare the argument and
result types of functions. This provides a mechanism within the
type system to identify special classes of functions. <xref
linkend="datatype-pseudotypes-table"> lists the existing
linkend="datatype-pseudotypes-table"/> lists the existing
pseudo-types.
</para>
</sect2>
@ -300,7 +300,7 @@
<para>
A variadic function (one taking a variable number of arguments, as in
<xref linkend="xfunc-sql-variadic-functions">) can be
<xref linkend="xfunc-sql-variadic-functions"/>) can be
polymorphic: this is accomplished by declaring its last parameter as
<literal>VARIADIC</literal> <type>anyarray</type>. For purposes of argument
matching and determining the actual result type, such a function behaves
@ -337,7 +337,7 @@
of the extension itself. If the extension includes C code, there
will typically also be a shared library file into which the C code
has been built. Once you have these files, a simple
<xref linkend="sql-createextension"> command loads the objects into
<xref linkend="sql-createextension"/> command loads the objects into
your database.
</para>
@ -346,7 +346,7 @@
<acronym>SQL</acronym> script to load a bunch of <quote>loose</quote> objects
into your database, is that <productname>PostgreSQL</productname> will then
understand that the objects of the extension go together. You can
drop all the objects with a single <xref linkend="sql-dropextension">
drop all the objects with a single <xref linkend="sql-dropextension"/>
command (no need to maintain a separate <quote>uninstall</quote> script).
Even more useful, <application>pg_dump</application> knows that it should not
dump the individual member objects of the extension &mdash; it will
@ -366,7 +366,7 @@
by <application>pg_dump</application>. Such a change is usually only sensible if
you concurrently make the same change in the extension's script file.
(But there are special provisions for tables containing configuration
data; see <xref linkend="extend-extensions-config-tables">.)
data; see <xref linkend="extend-extensions-config-tables"/>.)
In production situations, it's generally better to create an extension
update script to perform changes to extension member objects.
</para>
@ -405,7 +405,7 @@
<para>
The kinds of SQL objects that can be members of an extension are shown in
the description of <xref linkend="sql-alterextension">. Notably, objects
the description of <xref linkend="sql-alterextension"/>. Notably, objects
that are database-cluster-wide, such as databases, roles, and tablespaces,
cannot be extension members since an extension is only known within one
database. (Although an extension script is not prohibited from creating
@ -438,7 +438,7 @@
</indexterm>
<para>
The <xref linkend="sql-createextension"> command relies on a control
The <xref linkend="sql-createextension"/> command relies on a control
file for each extension, which must be named the same as the extension
with a suffix of <literal>.control</literal>, and must be placed in the
installation's <literal>SHAREDIR/extension</literal> directory. There
@ -499,7 +499,7 @@
when initially creating an extension, but not during extension updates
(since that might override user-added comments). Alternatively,
the extension's comment can be set by writing
a <xref linkend="sql-comment"> command in the script file.
a <xref linkend="sql-comment"/> command in the script file.
</para>
</listitem>
</varlistentry>
@ -562,7 +562,7 @@
its contained objects into a different schema after initial creation
of the extension. The default is <literal>false</literal>, i.e. the
extension is not relocatable.
See <xref linkend="extend-extensions-relocation"> for more information.
See <xref linkend="extend-extensions-relocation"/> for more information.
</para>
</listitem>
</varlistentry>
@ -576,7 +576,7 @@
and not any other.
The <varname>schema</varname> parameter is consulted only when
initially creating an extension, not during extension updates.
See <xref linkend="extend-extensions-relocation"> for more information.
See <xref linkend="extend-extensions-relocation"/> for more information.
</para>
</listitem>
</varlistentry>
@ -609,7 +609,7 @@
comments) by the extension mechanism. This provision is commonly used
to throw an error if the script file is fed to <application>psql</application>
rather than being loaded via <command>CREATE EXTENSION</command> (see example
script in <xref linkend="extend-extensions-example">).
script in <xref linkend="extend-extensions-example"/>).
Without that, users might accidentally load the
extension's contents as <quote>loose</quote> objects rather than as an
extension, a state of affairs that's a bit tedious to recover from.
@ -687,7 +687,7 @@
<para>
In all cases, the script file will be executed with
<xref linkend="guc-search-path"> initially set to point to the target
<xref linkend="guc-search-path"/> initially set to point to the target
schema; that is, <command>CREATE EXTENSION</command> does the equivalent of
this:
<programlisting>
@ -1031,14 +1031,14 @@ include $(PGXS)
</programlisting>
This makefile relies on <acronym>PGXS</acronym>, which is described
in <xref linkend="extend-pgxs">. The command <literal>make install</literal>
in <xref linkend="extend-pgxs"/>. The command <literal>make install</literal>
will install the control and script files into the correct
directory as reported by <application>pg_config</application>.
</para>
<para>
Once the files are installed, use the
<xref linkend="sql-createextension"> command to load the objects into
<xref linkend="sql-createextension"/> command to load the objects into
any particular database.
</para>
</sect2>

View File

@ -40,7 +40,7 @@
</itemizedlist>
All other language interfaces are external projects and are distributed
separately. <xref linkend="language-interface-table"> includes a list of
separately. <xref linkend="language-interface-table"/> includes a list of
some of these projects. Note that some of these packages might not be
released under the same license as <productname>PostgreSQL</productname>. For more
information on each language interface, including licensing terms, refer to
@ -170,7 +170,7 @@
<para>
In addition, there are a number of procedural languages that are developed
and maintained outside the core <productname>PostgreSQL</productname>
distribution. <xref linkend="pl-language-table"> lists some of these
distribution. <xref linkend="pl-language-table"/> lists some of these
packages. Note that some of these projects might not be released under the same
license as <productname>PostgreSQL</productname>. For more information on each
procedural language, including licensing information, refer to its website
@ -238,7 +238,7 @@
just like features that are built in. The
<filename>contrib/</filename> directory shipped with the source code
contains several extensions, which are described in
<xref linkend="contrib">. Other extensions are developed
<xref linkend="contrib"/>. Other extensions are developed
independently, like <application><ulink
url="http://postgis.net/">PostGIS</ulink></application>. Even
<productname>PostgreSQL</productname> replication solutions can be developed

View File

@ -22,7 +22,7 @@
The foreign data wrappers included in the standard distribution are good
references when trying to write your own. Look into the
<filename>contrib</filename> subdirectory of the source tree.
The <xref linkend="sql-createforeigndatawrapper"> reference page also has
The <xref linkend="sql-createforeigndatawrapper"/> reference page also has
some useful details.
</para>
@ -43,7 +43,7 @@
a validator function. Both functions must be written in a compiled
language such as C, using the version-1 interface.
For details on C language calling conventions and dynamic loading,
see <xref linkend="xfunc-c">.
see <xref linkend="xfunc-c"/>.
</para>
<para>
@ -57,7 +57,7 @@
returning the special pseudo-type <type>fdw_handler</type>. The
callback functions are plain C functions and are not visible or
callable at the SQL level. The callback functions are described in
<xref linkend="fdw-callbacks">.
<xref linkend="fdw-callbacks"/>.
</para>
<para>
@ -126,7 +126,7 @@ GetForeignRelSize(PlannerInfo *root,
</para>
<para>
See <xref linkend="fdw-planning"> for additional information.
See <xref linkend="fdw-planning"/> for additional information.
</para>
<para>
@ -157,7 +157,7 @@ GetForeignPaths(PlannerInfo *root,
</para>
<para>
See <xref linkend="fdw-planning"> for additional information.
See <xref linkend="fdw-planning"/> for additional information.
</para>
<para>
@ -193,7 +193,7 @@ GetForeignPlan(PlannerInfo *root,
</para>
<para>
See <xref linkend="fdw-planning"> for additional information.
See <xref linkend="fdw-planning"/> for additional information.
</para>
<para>
@ -341,7 +341,7 @@ GetForeignJoinPaths(PlannerInfo *root,
</para>
<para>
See <xref linkend="fdw-planning"> for additional information.
See <xref linkend="fdw-planning"/> for additional information.
</para>
</sect2>
@ -388,7 +388,7 @@ GetForeignUpperPaths(PlannerInfo *root,
</para>
<para>
See <xref linkend="fdw-planning"> for additional information.
See <xref linkend="fdw-planning"/> for additional information.
</para>
</sect2>
@ -477,7 +477,7 @@ PlanForeignModify(PlannerInfo *root,
</para>
<para>
See <xref linkend="fdw-planning"> for additional information.
See <xref linkend="fdw-planning"/> for additional information.
</para>
<para>
@ -759,7 +759,7 @@ PlanDirectModify(PlannerInfo *root,
</para>
<para>
See <xref linkend="fdw-planning"> for additional information.
See <xref linkend="fdw-planning"/> for additional information.
</para>
<para>
@ -872,7 +872,7 @@ EndDirectModify(ForeignScanState *node);
<para>
If an FDW wishes to support <firstterm>late row locking</firstterm> (as described
in <xref linkend="fdw-row-locking">), it must provide the following
in <xref linkend="fdw-row-locking"/>), it must provide the following
callback functions:
</para>
@ -905,7 +905,7 @@ GetForeignRowMarkType(RangeTblEntry *rte,
</para>
<para>
See <xref linkend="fdw-row-locking"> for more information.
See <xref linkend="fdw-row-locking"/> for more information.
</para>
<para>
@ -964,7 +964,7 @@ RefetchForeignRow(EState *estate,
</para>
<para>
See <xref linkend="fdw-row-locking"> for more information.
See <xref linkend="fdw-row-locking"/> for more information.
</para>
<para>
@ -1093,7 +1093,7 @@ AnalyzeForeignTable(Relation relation,
BlockNumber *totalpages);
</programlisting>
This function is called when <xref linkend="sql-analyze"> is executed on
This function is called when <xref linkend="sql-analyze"/> is executed on
a foreign table. If the FDW can collect statistics for this
foreign table, it should return <literal>true</literal>, and provide a pointer
to a function that will collect sample rows from the table in
@ -1139,10 +1139,10 @@ ImportForeignSchema(ImportForeignSchemaStmt *stmt, Oid serverOid);
</programlisting>
Obtain a list of foreign table creation commands. This function is
called when executing <xref linkend="sql-importforeignschema">, and is
called when executing <xref linkend="sql-importforeignschema"/>, and is
passed the parse tree for that statement, as well as the OID of the
foreign server to use. It should return a list of C strings, each of
which must contain a <xref linkend="sql-createforeigntable"> command.
which must contain a <xref linkend="sql-createforeigntable"/> command.
These strings will be parsed and executed by the core server.
</para>
@ -1605,7 +1605,7 @@ GetForeignServerByName(const char *name, bool missing_ok);
<para>
<function>PlanForeignModify</function> and the other callbacks described in
<xref linkend="fdw-callbacks-update"> are designed around the assumption
<xref linkend="fdw-callbacks-update"/> are designed around the assumption
that the foreign relation will be scanned in the usual way and then
individual row updates will be driven by a local <literal>ModifyTable</literal>
plan node. This approach is necessary for the general case where an
@ -1616,7 +1616,7 @@ GetForeignServerByName(const char *name, bool missing_ok);
compete against the <literal>ModifyTable</literal> approach. This approach
could also be used to implement remote <literal>SELECT FOR UPDATE</literal>,
rather than using the row locking callbacks described in
<xref linkend="fdw-callbacks-row-locking">. Keep in mind that a path
<xref linkend="fdw-callbacks-row-locking"/>. Keep in mind that a path
inserted into <literal>UPPERREL_FINAL</literal> is responsible for
implementing <emphasis>all</emphasis> behavior of the query.
</para>
@ -1676,7 +1676,7 @@ GetForeignServerByName(const char *name, bool missing_ok);
By default, <productname>PostgreSQL</productname> ignores locking considerations
when interfacing to FDWs, but an FDW can perform early locking without
any explicit support from the core code. The API functions described
in <xref linkend="fdw-callbacks-row-locking">, which were added
in <xref linkend="fdw-callbacks-row-locking"/>, which were added
in <productname>PostgreSQL</productname> 9.5, allow an FDW to use late locking if
it wishes.
</para>
@ -1720,7 +1720,7 @@ GetForeignServerByName(const char *name, bool missing_ok);
again perform early locking by fetching tuples with the equivalent
of <command>SELECT FOR UPDATE/SHARE</command>. To perform late locking
instead, provide the callback functions defined
in <xref linkend="fdw-callbacks-row-locking">.
in <xref linkend="fdw-callbacks-row-locking"/>.
In <function>GetForeignRowMarkType</function>, select rowmark option
<literal>ROW_MARK_EXCLUSIVE</literal>, <literal>ROW_MARK_NOKEYEXCLUSIVE</literal>,
<literal>ROW_MARK_SHARE</literal>, or <literal>ROW_MARK_KEYSHARE</literal> depending

View File

@ -13,7 +13,7 @@
files in the server's file system, or to execute programs on the server
and read their output. The data file or program output must be in a format
that can be read by <command>COPY FROM</command>;
see <xref linkend="sql-copy"> for details.
see <xref linkend="sql-copy"/> for details.
Access to data files is currently read-only.
</para>

File diff suppressed because it is too large Load Diff

View File

@ -237,7 +237,7 @@
choices made during both the initial population selection and subsequent
<quote>mutation</quote> of the best candidates. To avoid surprising changes
of the selected plan, each run of the GEQO algorithm restarts its
random number generator with the current <xref linkend="guc-geqo-seed">
random number generator with the current <xref linkend="guc-geqo-seed"/>
parameter setting. As long as <varname>geqo_seed</varname> and the other
GEQO parameters are kept fixed, the same plan will be generated for a
given query (and other planner inputs such as statistics). To experiment
@ -320,13 +320,13 @@
<listitem>
<para>
<xref linkend="elma04">
<xref linkend="elma04"/>
</para>
</listitem>
<listitem>
<para>
<xref linkend="fong">
<xref linkend="fong"/>
</para>
</listitem>
</itemizedlist>

View File

@ -68,8 +68,8 @@
<para>
The core <productname>PostgreSQL</productname> distribution
includes the <acronym>GIN</acronym> operator classes shown in
<xref linkend="gin-builtin-opclasses-table">.
(Some of the optional modules described in <xref linkend="contrib">
<xref linkend="gin-builtin-opclasses-table"/>.
(Some of the optional modules described in <xref linkend="contrib"/>
provide additional <acronym>GIN</acronym> operator classes.)
</para>
@ -127,7 +127,7 @@
Of the two operator classes for type <type>jsonb</type>, <literal>jsonb_ops</literal>
is the default. <literal>jsonb_path_ops</literal> supports fewer operators but
offers better performance for those operators.
See <xref linkend="json-indexing"> for details.
See <xref linkend="json-indexing"/> for details.
</para>
</sect1>
@ -182,7 +182,7 @@
<literal>query</literal> is the value on the right-hand side of an
indexable operator whose left-hand side is the indexed column.
<literal>n</literal> is the strategy number of the operator within the
operator class (see <xref linkend="xindex-strategies">).
operator class (see <xref linkend="xindex-strategies"/>).
Often, <function>extractQuery</function> will need
to consult <literal>n</literal> to determine the data type of
<literal>query</literal> and the method it should use to extract key values.
@ -406,7 +406,7 @@
provide the <function>comparePartial</function> method, and its
<function>extractQuery</function> method must set the <literal>pmatch</literal>
parameter when a partial-match query is encountered. See
<xref linkend="gin-partial-match"> for details.
<xref linkend="gin-partial-match"/> for details.
</para>
<para>
@ -466,7 +466,7 @@
When the table is vacuumed or autoanalyzed, or when
<function>gin_clean_pending_list</function> function is called, or if the
pending list becomes larger than
<xref linkend="guc-gin-pending-list-limit">, the entries are moved to the
<xref linkend="guc-gin-pending-list-limit"/>, the entries are moved to the
main <acronym>GIN</acronym> data structure using the same bulk insert
techniques used during initial index creation. This greatly improves
<acronym>GIN</acronym> index update speed, even counting the additional
@ -488,7 +488,7 @@
If consistent response time is more important than update speed,
use of pending entries can be disabled by turning off the
<literal>fastupdate</literal> storage parameter for a
<acronym>GIN</acronym> index. See <xref linkend="sql-createindex">
<acronym>GIN</acronym> index. See <xref linkend="sql-createindex"/>
for details.
</para>
</sect2>
@ -531,14 +531,14 @@
<para>
As of <productname>PostgreSQL</productname> 8.4, this advice is less
necessary since delayed indexing is used (see <xref
linkend="gin-fast-update"> for details). But for very large updates
linkend="gin-fast-update"/> for details). But for very large updates
it may still be best to drop and recreate the index.
</para>
</listitem>
</varlistentry>
<varlistentry>
<term><xref linkend="guc-maintenance-work-mem"></term>
<term><xref linkend="guc-maintenance-work-mem"/></term>
<listitem>
<para>
Build time for a <acronym>GIN</acronym> index is very sensitive to
@ -549,7 +549,7 @@
</varlistentry>
<varlistentry>
<term><xref linkend="guc-gin-pending-list-limit"></term>
<term><xref linkend="guc-gin-pending-list-limit"/></term>
<listitem>
<para>
During a series of insertions into an existing <acronym>GIN</acronym>
@ -574,7 +574,7 @@
</varlistentry>
<varlistentry>
<term><xref linkend="guc-gin-fuzzy-search-limit"></term>
<term><xref linkend="guc-gin-fuzzy-search-limit"/></term>
<listitem>
<para>
The primary goal of developing <acronym>GIN</acronym> indexes was
@ -631,7 +631,7 @@
<para>
The core <productname>PostgreSQL</productname> distribution
includes the <acronym>GIN</acronym> operator classes previously shown in
<xref linkend="gin-builtin-opclasses-table">.
<xref linkend="gin-builtin-opclasses-table"/>.
The following <filename>contrib</filename> modules also contain
<acronym>GIN</acronym> operator classes:

View File

@ -46,8 +46,8 @@
<para>
The core <productname>PostgreSQL</productname> distribution
includes the <acronym>GiST</acronym> operator classes shown in
<xref linkend="gist-builtin-opclasses-table">.
(Some of the optional modules described in <xref linkend="contrib">
<xref linkend="gist-builtin-opclasses-table"/>.
(Some of the optional modules described in <xref linkend="contrib"/>
provide additional <acronym>GiST</acronym> operator classes.)
</para>
@ -985,7 +985,7 @@ my_fetch(PG_FUNCTION_ARGS)
<para>
By default, a GiST index build switches to the buffering method when the
index size reaches <xref linkend="guc-effective-cache-size">. It can
index size reaches <xref linkend="guc-effective-cache-size"/>. It can
be manually turned on or off by the <literal>buffering</literal> parameter
to the CREATE INDEX command. The default behavior is good for most cases,
but turning buffering off might speed up the build somewhat if the input

View File

@ -100,7 +100,7 @@
Shared hardware functionality is common in network storage devices.
Using a network file system is also possible, though care must be
taken that the file system has full <acronym>POSIX</acronym> behavior (see <xref
linkend="creating-cluster-nfs">). One significant limitation of this
linkend="creating-cluster-nfs"/>). One significant limitation of this
method is that if the shared disk array fails or becomes corrupt, the
primary and standby servers are both nonfunctional. Another issue is
that the standby server should never access the shared storage while
@ -151,9 +151,9 @@ protocol to make nodes agree on a serializable transactional order.
</para>
<para>
A standby server can be implemented using file-based log shipping
(<xref linkend="warm-standby">) or streaming replication (see
<xref linkend="streaming-replication">), or a combination of both. For
information on hot standby, see <xref linkend="hot-standby">.
(<xref linkend="warm-standby"/>) or streaming replication (see
<xref linkend="streaming-replication"/>), or a combination of both. For
information on hot standby, see <xref linkend="hot-standby"/>.
</para>
</listitem>
</varlistentry>
@ -169,8 +169,8 @@ protocol to make nodes agree on a serializable transactional order.
individual tables to be replicated. Logical replication doesn't require
a particular server to be designated as a master or a replica but allows
data to flow in multiple directions. For more information on logical
replication, see <xref linkend="logical-replication">. Through the
logical decoding interface (<xref linkend="logicaldecoding">),
replication, see <xref linkend="logical-replication"/>. Through the
logical decoding interface (<xref linkend="logicaldecoding"/>),
third-party extensions can also provide similar functionality.
</para>
</listitem>
@ -224,8 +224,8 @@ protocol to make nodes agree on a serializable transactional order.
standby servers via master-standby replication, not by the replication
middleware. Care must also be taken that all
transactions either commit or abort on all servers, perhaps
using two-phase commit (<xref linkend="sql-prepare-transaction">
and <xref linkend="sql-commit-prepared">).
using two-phase commit (<xref linkend="sql-prepare-transaction"/>
and <xref linkend="sql-commit-prepared"/>).
<productname>Pgpool-II</productname> and <productname>Continuent Tungsten</productname>
are examples of this type of replication.
</para>
@ -272,8 +272,8 @@ protocol to make nodes agree on a serializable transactional order.
<para>
<productname>PostgreSQL</productname> does not offer this type of replication,
though <productname>PostgreSQL</productname> two-phase commit (<xref
linkend="sql-prepare-transaction"> and <xref
linkend="sql-commit-prepared">)
linkend="sql-prepare-transaction"/> and <xref
linkend="sql-commit-prepared"/>)
can be used to implement this in application code or middleware.
</para>
</listitem>
@ -295,7 +295,7 @@ protocol to make nodes agree on a serializable transactional order.
</variablelist>
<para>
<xref linkend="high-availability-matrix"> summarizes
<xref linkend="high-availability-matrix"/> summarizes
the capabilities of the various solutions listed above.
</para>
@ -522,7 +522,7 @@ protocol to make nodes agree on a serializable transactional order.
varies according to the transaction rate of the primary server.
Record-based log shipping is more granular and streams WAL changes
incrementally over a network connection (see <xref
linkend="streaming-replication">).
linkend="streaming-replication"/>).
</para>
<para>
@ -534,7 +534,7 @@ protocol to make nodes agree on a serializable transactional order.
<varname>archive_timeout</varname> parameter, which can be set as low
as a few seconds. However such a low setting will
substantially increase the bandwidth required for file shipping.
Streaming replication (see <xref linkend="streaming-replication">)
Streaming replication (see <xref linkend="streaming-replication"/>)
allows a much smaller window of data loss.
</para>
@ -547,7 +547,7 @@ protocol to make nodes agree on a serializable transactional order.
rollforward will take considerably longer, so that technique only
offers a solution for disaster recovery, not high availability.
A standby server can also be used for read-only queries, in which case
it is called a Hot Standby server. See <xref linkend="hot-standby"> for
it is called a Hot Standby server. See <xref linkend="hot-standby"/> for
more information.
</para>
@ -585,7 +585,7 @@ protocol to make nodes agree on a serializable transactional order.
associated with tablespaces will be passed across unmodified, so both
primary and standby servers must have the same mount paths for
tablespaces if that feature is used. Keep in mind that if
<xref linkend="sql-createtablespace">
<xref linkend="sql-createtablespace"/>
is executed on the primary, any new mount point needed for it must
be created on the primary and all standby servers before the command
is executed. Hardware need not be exactly the same, but experience shows
@ -618,7 +618,7 @@ protocol to make nodes agree on a serializable transactional order.
<para>
In standby mode, the server continuously applies WAL received from the
master server. The standby server can read WAL from a WAL archive
(see <xref linkend="restore-command">) or directly from the master
(see <xref linkend="restore-command"/>) or directly from the master
over a TCP connection (streaming replication). The standby server will
also attempt to restore any WAL found in the standby cluster's
<filename>pg_wal</filename> directory. That typically happens after a server
@ -657,7 +657,7 @@ protocol to make nodes agree on a serializable transactional order.
<para>
Set up continuous archiving on the primary to an archive directory
accessible from the standby, as described
in <xref linkend="continuous-archiving">. The archive location should be
in <xref linkend="continuous-archiving"/>. The archive location should be
accessible from the standby even when the master is down, i.e. it should
reside on the standby server itself or another trusted server, not on
the master server.
@ -676,7 +676,7 @@ protocol to make nodes agree on a serializable transactional order.
</para>
<para>
Take a base backup as described in <xref linkend="backup-base-backup">
Take a base backup as described in <xref linkend="backup-base-backup"/>
to bootstrap the standby server.
</para>
</sect2>
@ -686,7 +686,7 @@ protocol to make nodes agree on a serializable transactional order.
<para>
To set up the standby server, restore the base backup taken from primary
server (see <xref linkend="backup-pitr-recovery">). Create a recovery
server (see <xref linkend="backup-pitr-recovery"/>). Create a recovery
command file <filename>recovery.conf</filename> in the standby's cluster data
directory, and turn on <varname>standby_mode</varname>. Set
<varname>restore_command</varname> to a simple command to copy files from
@ -701,7 +701,7 @@ protocol to make nodes agree on a serializable transactional order.
Do not use pg_standby or similar tools with the built-in standby mode
described here. <varname>restore_command</varname> should return immediately
if the file does not exist; the server will retry the command again if
necessary. See <xref linkend="log-shipping-alternative">
necessary. See <xref linkend="log-shipping-alternative"/>
for using tools like pg_standby.
</para>
</note>
@ -724,11 +724,11 @@ protocol to make nodes agree on a serializable transactional order.
<para>
If you're using a WAL archive, its size can be minimized using the <xref
linkend="archive-cleanup-command"> parameter to remove files that are no
linkend="archive-cleanup-command"/> parameter to remove files that are no
longer required by the standby server.
The <application>pg_archivecleanup</application> utility is designed specifically to
be used with <varname>archive_cleanup_command</varname> in typical single-standby
configurations, see <xref linkend="pgarchivecleanup">.
configurations, see <xref linkend="pgarchivecleanup"/>.
Note however, that if you're using the archive for backup purposes, you
need to retain files needed to recover from at least the latest base
backup, even if they're no longer needed by the standby.
@ -768,7 +768,7 @@ archive_cleanup_command = 'pg_archivecleanup /path/to/archive %r'
<para>
Streaming replication is asynchronous by default
(see <xref linkend="synchronous-replication">), in which case there is
(see <xref linkend="synchronous-replication"/>), in which case there is
a small delay between committing a transaction in the primary and the
changes becoming visible in the standby. This delay is however much
smaller than with file-based log shipping, typically under one second
@ -791,27 +791,27 @@ archive_cleanup_command = 'pg_archivecleanup /path/to/archive %r'
<para>
To use streaming replication, set up a file-based log-shipping standby
server as described in <xref linkend="warm-standby">. The step that
server as described in <xref linkend="warm-standby"/>. The step that
turns a file-based log-shipping standby into streaming replication
standby is setting <varname>primary_conninfo</varname> setting in the
<filename>recovery.conf</filename> file to point to the primary server. Set
<xref linkend="guc-listen-addresses"> and authentication options
<xref linkend="guc-listen-addresses"/> and authentication options
(see <filename>pg_hba.conf</filename>) on the primary so that the standby server
can connect to the <literal>replication</literal> pseudo-database on the primary
server (see <xref linkend="streaming-replication-authentication">).
server (see <xref linkend="streaming-replication-authentication"/>).
</para>
<para>
On systems that support the keepalive socket option, setting
<xref linkend="guc-tcp-keepalives-idle">,
<xref linkend="guc-tcp-keepalives-interval"> and
<xref linkend="guc-tcp-keepalives-count"> helps the primary promptly
<xref linkend="guc-tcp-keepalives-idle"/>,
<xref linkend="guc-tcp-keepalives-interval"/> and
<xref linkend="guc-tcp-keepalives-count"/> helps the primary promptly
notice a broken connection.
</para>
<para>
Set the maximum number of concurrent connections from the standby servers
(see <xref linkend="guc-max-wal-senders"> for details).
(see <xref linkend="guc-max-wal-senders"/> for details).
</para>
<para>
@ -882,15 +882,15 @@ primary_conninfo = 'host=192.168.1.50 port=5432 user=foo password=foopass'
standby. These locations can be retrieved using
<function>pg_current_wal_lsn</function> on the primary and
<function>pg_last_wal_receive_lsn</function> on the standby,
respectively (see <xref linkend="functions-admin-backup-table"> and
<xref linkend="functions-recovery-info-table"> for details).
respectively (see <xref linkend="functions-admin-backup-table"/> and
<xref linkend="functions-recovery-info-table"/> for details).
The last WAL receive location in the standby is also displayed in the
process status of the WAL receiver process, displayed using the
<command>ps</command> command (see <xref linkend="monitoring-ps"> for details).
<command>ps</command> command (see <xref linkend="monitoring-ps"/> for details).
</para>
<para>
You can retrieve a list of WAL sender processes via the
<xref linkend="pg-stat-replication-view"> view. Large differences between
<xref linkend="pg-stat-replication-view"/> view. Large differences between
<function>pg_current_wal_lsn</function> and the view's <literal>sent_lsn</literal> field
might indicate that the master server is under heavy load, while
differences between <literal>sent_lsn</literal> and
@ -899,7 +899,7 @@ primary_conninfo = 'host=192.168.1.50 port=5432 user=foo password=foopass'
</para>
<para>
On a hot standby, the status of the WAL receiver process can be retrieved
via the <xref linkend="pg-stat-wal-receiver-view"> view. A large
via the <xref linkend="pg-stat-wal-receiver-view"/> view. A large
difference between <function>pg_last_wal_replay_lsn</function> and the
view's <literal>received_lsn</literal> indicates that WAL is being
received faster than it can be replayed.
@ -922,9 +922,9 @@ primary_conninfo = 'host=192.168.1.50 port=5432 user=foo password=foopass'
</para>
<para>
In lieu of using replication slots, it is possible to prevent the removal
of old WAL segments using <xref linkend="guc-wal-keep-segments">, or by
of old WAL segments using <xref linkend="guc-wal-keep-segments"/>, or by
storing the segments in an archive using
<xref linkend="guc-archive-command">.
<xref linkend="guc-archive-command"/>.
However, these methods often result in retaining more WAL segments than
required, whereas replication slots retain only the number of segments
known to be needed. An advantage of these methods is that they bound
@ -932,8 +932,8 @@ primary_conninfo = 'host=192.168.1.50 port=5432 user=foo password=foopass'
to do this using replication slots.
</para>
<para>
Similarly, <xref linkend="guc-hot-standby-feedback">
and <xref linkend="guc-vacuum-defer-cleanup-age"> provide protection against
Similarly, <xref linkend="guc-hot-standby-feedback"/>
and <xref linkend="guc-vacuum-defer-cleanup-age"/> provide protection against
relevant rows being removed by vacuum, but the former provides no
protection during any time period when the standby is not connected,
and the latter often needs to be set to a high value to provide adequate
@ -952,8 +952,8 @@ primary_conninfo = 'host=192.168.1.50 port=5432 user=foo password=foopass'
</para>
<para>
Slots can be created and dropped either via the streaming replication
protocol (see <xref linkend="protocol-replication">) or via SQL
functions (see <xref linkend="functions-replication">).
protocol (see <xref linkend="protocol-replication"/>) or via SQL
functions (see <xref linkend="functions-replication"/>).
</para>
</sect3>
<sect3 id="streaming-replication-slots-config">
@ -1017,7 +1017,7 @@ primary_slot_name = 'node_a_slot'
<para>
Cascading replication is currently asynchronous. Synchronous replication
(see <xref linkend="synchronous-replication">) settings have no effect on
(see <xref linkend="synchronous-replication"/>) settings have no effect on
cascading replication at present.
</para>
@ -1034,7 +1034,7 @@ primary_slot_name = 'node_a_slot'
<para>
To use cascading replication, set up the cascading standby so that it can
accept replication connections (that is, set
<xref linkend="guc-max-wal-senders"> and <xref linkend="guc-hot-standby">,
<xref linkend="guc-max-wal-senders"/> and <xref linkend="guc-hot-standby"/>,
and configure
<link linkend="auth-pg-hba-conf">host-based authentication</link>).
You will also need to set <varname>primary_conninfo</varname> in the downstream
@ -1109,11 +1109,11 @@ primary_slot_name = 'node_a_slot'
<para>
Once streaming replication has been configured, configuring synchronous
replication requires only one additional configuration step:
<xref linkend="guc-synchronous-standby-names"> must be set to
<xref linkend="guc-synchronous-standby-names"/> must be set to
a non-empty value. <varname>synchronous_commit</varname> must also be set to
<literal>on</literal>, but since this is the default value, typically no change is
required. (See <xref linkend="runtime-config-wal-settings"> and
<xref linkend="runtime-config-replication-master">.)
required. (See <xref linkend="runtime-config-wal-settings"/> and
<xref linkend="runtime-config-replication-master"/>.)
This configuration will cause each commit to wait for
confirmation that the standby has written the commit record to durable
storage.
@ -1451,7 +1451,7 @@ synchronous_standby_names = 'ANY 2 (s1, s2, s3)'
and might stay down. To return to normal operation, a standby server
must be recreated,
either on the former primary system when it comes up, or on a third,
possibly new, system. The <xref linkend="app-pgrewind"> utility can be
possibly new, system. The <xref linkend="app-pgrewind"/> utility can be
used to speed up this process on large clusters.
Once complete, the primary and standby can be
considered to have switched roles. Some people choose to use a third
@ -1491,7 +1491,7 @@ synchronous_standby_names = 'ANY 2 (s1, s2, s3)'
This was the only option available in versions 8.4 and below. In this
setup, set <varname>standby_mode</varname> off, because you are implementing
the polling required for standby operation yourself. See the
<xref linkend="pgstandby"> module for a reference
<xref linkend="pgstandby"/> module for a reference
implementation of this.
</para>
@ -1551,7 +1551,7 @@ if (!triggered)
<para>
A working example of a waiting <varname>restore_command</varname> is provided
in the <xref linkend="pgstandby"> module. It
in the <xref linkend="pgstandby"/> module. It
should be used as a reference on how to correctly implement the logic
described above. It can also be extended as needed to support specific
configurations and environments.
@ -1592,17 +1592,17 @@ if (!triggered)
<para>
Set up continuous archiving from the primary to a WAL archive
directory on the standby server. Ensure that
<xref linkend="guc-archive-mode">,
<xref linkend="guc-archive-command"> and
<xref linkend="guc-archive-timeout">
<xref linkend="guc-archive-mode"/>,
<xref linkend="guc-archive-command"/> and
<xref linkend="guc-archive-timeout"/>
are set appropriately on the primary
(see <xref linkend="backup-archiving-wal">).
(see <xref linkend="backup-archiving-wal"/>).
</para>
</listitem>
<listitem>
<para>
Make a base backup of the primary server (see <xref
linkend="backup-base-backup">), and load this data onto the standby.
linkend="backup-base-backup"/>), and load this data onto the standby.
</para>
</listitem>
<listitem>
@ -1610,7 +1610,7 @@ if (!triggered)
Begin recovery on the standby server from the local WAL
archive, using a <filename>recovery.conf</filename> that specifies a
<varname>restore_command</varname> that waits as described
previously (see <xref linkend="backup-pitr-recovery">).
previously (see <xref linkend="backup-pitr-recovery"/>).
</para>
</listitem>
</orderedlist>
@ -1644,7 +1644,7 @@ if (!triggered)
<para>
An external program can call the <function>pg_walfile_name_offset()</function>
function (see <xref linkend="functions-admin">)
function (see <xref linkend="functions-admin"/>)
to find out the file name and the exact byte offset within it of
the current end of WAL. It can then access the WAL file directly
and copy the data from the last known end of WAL through the current end
@ -1663,7 +1663,7 @@ if (!triggered)
<para>
Starting with <productname>PostgreSQL</productname> version 9.0, you can use
streaming replication (see <xref linkend="streaming-replication">) to
streaming replication (see <xref linkend="streaming-replication"/>) to
achieve the same benefits with less effort.
</para>
</sect2>
@ -1697,7 +1697,7 @@ if (!triggered)
<title>User's Overview</title>
<para>
When the <xref linkend="guc-hot-standby"> parameter is set to true on a
When the <xref linkend="guc-hot-standby"/> parameter is set to true on a
standby server, it will begin accepting connections once the recovery has
brought the system to a consistent state. All such connections are
strictly read-only; not even temporary tables may be written.
@ -1713,7 +1713,7 @@ if (!triggered)
made by that transaction will be visible to any new snapshots taken on
the standby. Snapshots may be taken at the start of each query or at the
start of each transaction, depending on the current transaction isolation
level. For more details, see <xref linkend="transaction-iso">.
level. For more details, see <xref linkend="transaction-iso"/>.
</para>
<para>
@ -1891,7 +1891,7 @@ if (!triggered)
<para>
Users will be able to tell whether their session is read-only by
issuing <command>SHOW transaction_read_only</command>. In addition, a set of
functions (<xref linkend="functions-recovery-info-table">) allow users to
functions (<xref linkend="functions-recovery-info-table"/>) allow users to
access information about the standby server. These allow you to write
programs that are aware of the current state of the database. These
can be used to monitor the progress of recovery, or to allow you to
@ -1986,8 +1986,8 @@ if (!triggered)
When a conflicting query is short, it's typically desirable to allow it to
complete by delaying WAL application for a little bit; but a long delay in
WAL application is usually not desirable. So the cancel mechanism has
parameters, <xref linkend="guc-max-standby-archive-delay"> and <xref
linkend="guc-max-standby-streaming-delay">, that define the maximum
parameters, <xref linkend="guc-max-standby-archive-delay"/> and <xref
linkend="guc-max-standby-streaming-delay"/>, that define the maximum
allowed delay in WAL application. Conflicting queries will be canceled
once it has taken longer than the relevant delay setting to apply any
newly-received WAL data. There are two parameters so that different delay
@ -2082,7 +2082,7 @@ if (!triggered)
</para>
<para>
Another option is to increase <xref linkend="guc-vacuum-defer-cleanup-age">
Another option is to increase <xref linkend="guc-vacuum-defer-cleanup-age"/>
on the primary server, so that dead rows will not be cleaned up as quickly
as they normally would be. This will allow more time for queries to
execute before they are canceled on the standby, without having to set
@ -2189,8 +2189,8 @@ LOG: database system is ready to accept read only connections
<para>
It is important that the administrator select appropriate settings for
<xref linkend="guc-max-standby-archive-delay"> and <xref
linkend="guc-max-standby-streaming-delay">. The best choices vary
<xref linkend="guc-max-standby-archive-delay"/> and <xref
linkend="guc-max-standby-streaming-delay"/>. The best choices vary
depending on business priorities. For example if the server is primarily
tasked as a High Availability server, then you will want low delay
settings, perhaps even zero, though that is a very aggressive setting. If
@ -2382,23 +2382,23 @@ LOG: database system is ready to accept read only connections
<para>
Various parameters have been mentioned above in
<xref linkend="hot-standby-conflict"> and
<xref linkend="hot-standby-admin">.
<xref linkend="hot-standby-conflict"/> and
<xref linkend="hot-standby-admin"/>.
</para>
<para>
On the primary, parameters <xref linkend="guc-wal-level"> and
<xref linkend="guc-vacuum-defer-cleanup-age"> can be used.
<xref linkend="guc-max-standby-archive-delay"> and
<xref linkend="guc-max-standby-streaming-delay"> have no effect if set on
On the primary, parameters <xref linkend="guc-wal-level"/> and
<xref linkend="guc-vacuum-defer-cleanup-age"/> can be used.
<xref linkend="guc-max-standby-archive-delay"/> and
<xref linkend="guc-max-standby-streaming-delay"/> have no effect if set on
the primary.
</para>
<para>
On the standby, parameters <xref linkend="guc-hot-standby">,
<xref linkend="guc-max-standby-archive-delay"> and
<xref linkend="guc-max-standby-streaming-delay"> can be used.
<xref linkend="guc-vacuum-defer-cleanup-age"> has no effect
On the standby, parameters <xref linkend="guc-hot-standby"/>,
<xref linkend="guc-max-standby-archive-delay"/> and
<xref linkend="guc-max-standby-streaming-delay"/> can be used.
<xref linkend="guc-vacuum-defer-cleanup-age"/> has no effect
as long as the server remains in standby mode, though it will
become relevant if the standby becomes primary.
</para>
@ -2452,8 +2452,8 @@ LOG: database system is ready to accept read only connections
<listitem>
<para>
The Serializable transaction isolation level is not yet available in hot
standby. (See <xref linkend="xact-serializable"> and
<xref linkend="serializable-consistency"> for details.)
standby. (See <xref linkend="xact-serializable"/> and
<xref linkend="serializable-consistency"/> for details.)
An attempt to set a transaction to the serializable isolation level in
hot standby mode will generate an error.
</para>

View File

@ -31,12 +31,12 @@
Office (<acronym>ARO</acronym>), the National Science Foundation
(<acronym>NSF</acronym>), and ESL, Inc. The implementation of
<productname>POSTGRES</productname> began in 1986. The initial
concepts for the system were presented in <xref linkend="ston86">,
concepts for the system were presented in <xref linkend="ston86"/>,
and the definition of the initial data model appeared in <xref
linkend="rowe87">. The design of the rule system at that time was
described in <xref linkend="ston87a">. The rationale and
linkend="rowe87"/>. The design of the rule system at that time was
described in <xref linkend="ston87a"/>. The rationale and
architecture of the storage manager were detailed in <xref
linkend="ston87b">.
linkend="ston87b"/>.
</para>
<para>
@ -44,10 +44,10 @@
releases since then. The first <quote>demoware</quote> system
became operational in 1987 and was shown at the 1988
<acronym>ACM-SIGMOD</acronym> Conference. Version 1, described in
<xref linkend="ston90a">, was released to a few external users in
<xref linkend="ston90a"/>, was released to a few external users in
June 1989. In response to a critique of the first rule system
(<xref linkend="ston89">), the rule system was redesigned (<xref
linkend="ston90b">), and Version 2 was released in June 1990 with
(<xref linkend="ston89"/>), the rule system was redesigned (<xref
linkend="ston90b"/>), and Version 2 was released in June 1990 with
the new rule system. Version 3 appeared in 1991 and added support
for multiple storage managers, an improved query executor, and a
rewritten rule system. For the most part, subsequent releases
@ -216,7 +216,7 @@
<para>
Details about what has happened in <productname>PostgreSQL</productname> since
then can be found in <xref linkend="release">.
then can be found in <xref linkend="release"/>.
</para>
</sect2>
</sect1>

View File

@ -70,7 +70,7 @@ key =&gt; NULL
constant, then any single-quote characters and (depending on the setting of
the <varname>standard_conforming_strings</varname> configuration parameter)
backslash characters need to be escaped correctly. See
<xref linkend="sql-syntax-strings"> for more on the handling of string
<xref linkend="sql-syntax-strings"/> for more on the handling of string
constants.
</para>
</note>
@ -87,8 +87,8 @@ key =&gt; NULL
<para>
The operators provided by the <literal>hstore</literal> module are
shown in <xref linkend="hstore-op-table">, the functions
in <xref linkend="hstore-func-table">.
shown in <xref linkend="hstore-op-table"/>, the functions
in <xref linkend="hstore-func-table"/>.
</para>
<table id="hstore-op-table">
@ -629,7 +629,7 @@ ALTER TABLE tablename ALTER hstorecol TYPE hstore USING hstorecol || '';
extensions for PL/Python are
called <literal>hstore_plpythonu</literal>, <literal>hstore_plpython2u</literal>,
and <literal>hstore_plpython3u</literal>
(see <xref linkend="plpython-python23"> for the PL/Python naming
(see <xref linkend="plpython-python23"/> for the PL/Python naming
convention). If you use them, <type>hstore</type> values are mapped to
Python dictionaries.
</para>

View File

@ -22,7 +22,7 @@
pages so that they can use the regular storage manager and buffer manager
to access the index contents. (All the existing index access methods
furthermore use the standard page layout described in <xref
linkend="storage-page-layout">, and most use the same format for index
linkend="storage-page-layout"/>, and most use the same format for index
tuple headers; but these decisions are not forced on an access method.)
</para>
@ -31,7 +31,7 @@
<firstterm>tuple identifiers</firstterm>, or <acronym>TIDs</acronym>, of row versions
(tuples) in the index's parent table. A TID consists of a
block number and an item number within that block (see <xref
linkend="storage-page-layout">). This is sufficient
linkend="storage-page-layout"/>). This is sufficient
information to fetch a particular row version from the table.
Indexes are not directly aware that under MVCC, there might be multiple
extant versions of the same logical row; to an index, each tuple is
@ -52,8 +52,8 @@
system catalog. The <structname>pg_am</structname> entry
specifies a name and a <firstterm>handler function</firstterm> for the access
method. These entries can be created and deleted using the
<xref linkend="sql-create-access-method"> and
<xref linkend="sql-drop-access-method"> SQL commands.
<xref linkend="sql-create-access-method"/> and
<xref linkend="sql-drop-access-method"/> SQL commands.
</para>
<para>
@ -71,7 +71,7 @@
functions for the access method, which do all of the real work to access
indexes. These support functions are plain C functions and are not
visible or callable at the SQL level. The support functions are described
in <xref linkend="index-functions">.
in <xref linkend="index-functions"/>.
</para>
<para>
@ -153,7 +153,7 @@ typedef struct IndexAmRoutine
These entries allow the planner
to determine what kinds of query qualifications can be used with
indexes of this access method. Operator families and classes are described
in <xref linkend="xindex">, which is prerequisite material for reading
in <xref linkend="xindex"/>, which is prerequisite material for reading
this chapter.
</para>
@ -177,7 +177,7 @@ typedef struct IndexAmRoutine
<para>
Some of the flag fields of <structname>IndexAmRoutine</structname> have nonobvious
implications. The requirements of <structfield>amcanunique</structfield>
are discussed in <xref linkend="index-unique-checks">.
are discussed in <xref linkend="index-unique-checks"/>.
The <structfield>amcanmulticol</structfield> flag asserts that the
access method supports multicolumn indexes, while
<structfield>amoptionalkey</structfield> asserts that it allows scans
@ -271,7 +271,7 @@ aminsert (Relation indexRelation,
<structfield>amcanunique</structfield> flag is true) then
<literal>checkUnique</literal> indicates the type of uniqueness check to
perform. This varies depending on whether the unique constraint is
deferrable; see <xref linkend="index-unique-checks"> for details.
deferrable; see <xref linkend="index-unique-checks"/> for details.
Normally the access method only needs the <literal>heapRelation</literal>
parameter when performing uniqueness checking (since then it will have to
look into the heap to verify tuple liveness).
@ -386,7 +386,7 @@ amcostestimate (PlannerInfo *root,
double *indexCorrelation);
</programlisting>
Estimate the costs of an index scan. This function is described fully
in <xref linkend="index-cost-estimation">, below.
in <xref linkend="index-cost-estimation"/>, below.
</para>
<para>
@ -480,7 +480,7 @@ amvalidate (Oid opclassoid);
The purpose of an index, of course, is to support scans for tuples matching
an indexable <literal>WHERE</literal> condition, often called a
<firstterm>qualifier</firstterm> or <firstterm>scan key</firstterm>. The semantics of
index scanning are described more fully in <xref linkend="index-scanning">,
index scanning are described more fully in <xref linkend="index-scanning"/>,
below. An index access method can support <quote>plain</quote> index scans,
<quote>bitmap</quote> index scans, or both. The scan-related functions that an
index access method must or may provide are:
@ -594,7 +594,7 @@ amgetbitmap (IndexScanDesc scan,
<function>amgetbitmap</function> and
<function>amgettuple</function> cannot be used in the same index scan; there
are other restrictions too when using <function>amgetbitmap</function>, as explained
in <xref linkend="index-scanning">.
in <xref linkend="index-scanning"/>.
</para>
<para>
@ -852,7 +852,7 @@ amparallelrescan (IndexScanDesc scan);
index tuples.
Finally, <function>amgetbitmap</function>
does not guarantee any locking of the returned tuples, with implications
spelled out in <xref linkend="index-locking">.
spelled out in <xref linkend="index-locking"/>.
</para>
<para>
@ -901,7 +901,7 @@ amparallelrescan (IndexScanDesc scan);
A new heap entry is made before making its index entries. (Therefore
a concurrent index scan is likely to fail to see the heap entry.
This is okay because the index reader would be uninterested in an
uncommitted row anyway. But see <xref linkend="index-unique-checks">.)
uncommitted row anyway. But see <xref linkend="index-unique-checks"/>.)
</para>
</listitem>
<listitem>

View File

@ -77,7 +77,7 @@ CREATE INDEX test1_id_index ON test1 (id);
than a sequential table scan. But you might have to run the
<command>ANALYZE</command> command regularly to update
statistics to allow the query planner to make educated decisions.
See <xref linkend="performance-tips"> for information about
See <xref linkend="performance-tips"/> for information about
how to find out whether an index is used and when and why the
planner might choose <emphasis>not</emphasis> to use an index.
</para>
@ -99,7 +99,7 @@ CREATE INDEX test1_id_index ON test1 (id);
It is possible to allow writes to occur in parallel with index
creation, but there are several caveats to be aware of &mdash;
for more information see <xref linkend="sql-createindex-concurrently"
endterm="sql-createindex-concurrently-title">.
endterm="sql-createindex-concurrently-title"/>.
</para>
<para>
@ -161,7 +161,7 @@ CREATE INDEX test1_id_index ON test1 (id);
<literal>col LIKE '%bar'</literal>. However, if your database does not
use the C locale you will need to create the index with a special
operator class to support indexing of pattern-matching queries; see
<xref linkend="indexes-opclass"> below. It is also possible to use
<xref linkend="indexes-opclass"/> below. It is also possible to use
B-tree indexes for <literal>ILIKE</literal> and
<literal>~*</literal>, but only if the pattern starts with
non-alphabetic characters, i.e., characters that are not affected by
@ -226,13 +226,13 @@ CREATE INDEX <replaceable>name</replaceable> ON <replaceable>table</replaceable>
<member><literal>&amp;&amp;</literal></member>
</simplelist>
(See <xref linkend="functions-geometry"> for the meaning of
(See <xref linkend="functions-geometry"/> for the meaning of
these operators.)
The GiST operator classes included in the standard distribution are
documented in <xref linkend="gist-builtin-opclasses-table">.
documented in <xref linkend="gist-builtin-opclasses-table"/>.
Many other GiST operator
classes are available in the <literal>contrib</literal> collection or as separate
projects. For more information see <xref linkend="gist">.
projects. For more information see <xref linkend="gist"/>.
</para>
<para>
@ -244,7 +244,7 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10;
</programlisting>
which finds the ten places closest to a given target point. The ability
to do this is again dependent on the particular operator class being used.
In <xref linkend="gist-builtin-opclasses-table">, operators that can be
In <xref linkend="gist-builtin-opclasses-table"/>, operators that can be
used in this way are listed in the column <quote>Ordering Operators</quote>.
</para>
@ -274,11 +274,11 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10;
<member><literal>&gt;^</literal></member>
</simplelist>
(See <xref linkend="functions-geometry"> for the meaning of
(See <xref linkend="functions-geometry"/> for the meaning of
these operators.)
The SP-GiST operator classes included in the standard distribution are
documented in <xref linkend="spgist-builtin-opclasses-table">.
For more information see <xref linkend="spgist">.
documented in <xref linkend="spgist-builtin-opclasses-table"/>.
For more information see <xref linkend="spgist"/>.
</para>
<para>
@ -313,13 +313,13 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10;
<member><literal>&amp;&amp;</literal></member>
</simplelist>
(See <xref linkend="functions-array"> for the meaning of
(See <xref linkend="functions-array"/> for the meaning of
these operators.)
The GIN operator classes included in the standard distribution are
documented in <xref linkend="gin-builtin-opclasses-table">.
documented in <xref linkend="gin-builtin-opclasses-table"/>.
Many other GIN operator
classes are available in the <literal>contrib</literal> collection or as separate
projects. For more information see <xref linkend="gin">.
projects. For more information see <xref linkend="gin"/>.
</para>
<para>
@ -351,8 +351,8 @@ SELECT * FROM places ORDER BY location <-> point '(101,456)' LIMIT 10;
</simplelist>
The BRIN operator classes included in the standard distribution are
documented in <xref linkend="brin-builtin-opclasses-table">.
For more information see <xref linkend="brin">.
documented in <xref linkend="brin-builtin-opclasses-table"/>.
For more information see <xref linkend="brin"/>.
</para>
</sect1>
@ -454,8 +454,8 @@ CREATE INDEX test2_mm_idx ON test2 (major, minor);
an index on a single column is sufficient and saves space and time.
Indexes with more than three columns are unlikely to be helpful
unless the usage of the table is extremely stylized. See also
<xref linkend="indexes-bitmap-scans"> and
<xref linkend="indexes-index-only-scans"> for some discussion of the
<xref linkend="indexes-bitmap-scans"/> and
<xref linkend="indexes-index-only-scans"/> for some discussion of the
merits of different index configurations.
</para>
</sect1>
@ -609,7 +609,7 @@ CREATE INDEX test3_desc_index ON test3 (id DESC NULLS LAST);
process the queries that use both columns. You could also create a
multicolumn index on <literal>(x, y)</literal>. This index would typically be
more efficient than index combination for queries involving both
columns, but as discussed in <xref linkend="indexes-multicolumn">, it
columns, but as discussed in <xref linkend="indexes-multicolumn"/>, it
would be almost useless for queries involving only <literal>y</literal>, so it
should not be the only index. A combination of the multicolumn index
and a separate index on <literal>y</literal> would serve reasonably well. For
@ -762,7 +762,7 @@ CREATE INDEX people_names ON people ((first_name || ' ' || last_name));
index at all. This reduces the size of the index, which will speed
up those queries that do use the index. It will also speed up many table
update operations because the index does not need to be
updated in all cases. <xref linkend="indexes-partial-ex1"> shows a
updated in all cases. <xref linkend="indexes-partial-ex1"/> shows a
possible application of this idea.
</para>
@ -827,7 +827,7 @@ WHERE client_ip = inet '192.168.100.23';
Another possible use for a partial index is to exclude values from the
index that the
typical query workload is not interested in; this is shown in <xref
linkend="indexes-partial-ex2">. This results in the same
linkend="indexes-partial-ex2"/>. This results in the same
advantages as listed above, but it prevents the
<quote>uninteresting</quote> values from being accessed via that
index, even if an index scan might be profitable in that
@ -878,7 +878,7 @@ SELECT * FROM orders WHERE order_nr = 3501;
</example>
<para>
<xref linkend="indexes-partial-ex2"> also illustrates that the
<xref linkend="indexes-partial-ex2"/> also illustrates that the
indexed column and the column used in the predicate do not need to
match. <productname>PostgreSQL</productname> supports partial
indexes with arbitrary predicates, so long as only columns of the
@ -909,7 +909,7 @@ SELECT * FROM orders WHERE order_nr = 3501;
A third possible use for partial indexes does not require the
index to be used in queries at all. The idea here is to create
a unique index over a subset of a table, as in <xref
linkend="indexes-partial-ex3">. This enforces uniqueness
linkend="indexes-partial-ex3"/>. This enforces uniqueness
among the rows that satisfy the index predicate, without constraining
those that do not.
</para>
@ -962,8 +962,8 @@ CREATE UNIQUE INDEX tests_success_constraint ON tests (subject, target)
<para>
More information about partial indexes can be found in <xref
linkend="ston89b">, <xref linkend="olson93">, and <xref
linkend="seshadri95">.
linkend="ston89b"/>, <xref linkend="olson93"/>, and <xref
linkend="seshadri95"/>.
</para>
</sect1>
@ -1157,7 +1157,7 @@ CREATE INDEX test1c_content_y_index ON test1c (content COLLATE "y");
the index, the table rows they reference might be anywhere in the heap.
The heap-access portion of an index scan thus involves a lot of random
access into the heap, which can be slow, particularly on traditional
rotating media. (As described in <xref linkend="indexes-bitmap-scans">,
rotating media. (As described in <xref linkend="indexes-bitmap-scans"/>,
bitmap scans try to alleviate this cost by doing the heap accesses in
sorted order, but that only goes so far.)
</para>
@ -1212,7 +1212,7 @@ SELECT x FROM tab WHERE x = 'key' AND z &lt; 42;
is physically possible. But there is an additional requirement for any
table scan in <productname>PostgreSQL</productname>: it must verify that each
retrieved row be <quote>visible</quote> to the query's MVCC snapshot, as
discussed in <xref linkend="mvcc">. Visibility information is not stored
discussed in <xref linkend="mvcc"/>. Visibility information is not stored
in index entries, only in heap entries; so at first glance it would seem
that every row retrieval would require a heap access anyway. And this is
indeed the case, if the table row has been modified recently. However,
@ -1289,7 +1289,7 @@ SELECT f(x) FROM tab WHERE f(x) &lt; 1;
<para>
Partial indexes also have interesting interactions with index-only scans.
Consider the partial index shown in <xref linkend="indexes-partial-ex3">:
Consider the partial index shown in <xref linkend="indexes-partial-ex3"/>:
<programlisting>
CREATE UNIQUE INDEX tests_success_constraint ON tests (subject, target)
WHERE success;
@ -1325,11 +1325,11 @@ SELECT target FROM tests WHERE subject = 'some-subject' AND success;
maintenance or tuning, it is still important to check
which indexes are actually used by the real-life query workload.
Examining index usage for an individual query is done with the
<xref linkend="sql-explain">
<xref linkend="sql-explain"/>
command; its application for this purpose is
illustrated in <xref linkend="using-explain">.
illustrated in <xref linkend="using-explain"/>.
It is also possible to gather overall statistics about index usage
in a running server, as described in <xref linkend="monitoring-stats">.
in a running server, as described in <xref linkend="monitoring-stats"/>.
</para>
<para>
@ -1343,7 +1343,7 @@ SELECT target FROM tests WHERE subject = 'some-subject' AND success;
<itemizedlist>
<listitem>
<para>
Always run <xref linkend="sql-analyze">
Always run <xref linkend="sql-analyze"/>
first. This command
collects statistics about the distribution of the values in the
table. This information is required to estimate the number of rows
@ -1353,8 +1353,8 @@ SELECT target FROM tests WHERE subject = 'some-subject' AND success;
almost certain to be inaccurate. Examining an application's
index usage without having run <command>ANALYZE</command> is
therefore a lost cause.
See <xref linkend="vacuum-for-statistics">
and <xref linkend="autovacuum"> for more information.
See <xref linkend="vacuum-for-statistics"/>
and <xref linkend="autovacuum"/> for more information.
</para>
</listitem>
@ -1386,7 +1386,7 @@ SELECT target FROM tests WHERE subject = 'some-subject' AND success;
<para>
When indexes are not used, it can be useful for testing to force
their use. There are run-time parameters that can turn off
various plan types (see <xref linkend="runtime-config-query-enable">).
various plan types (see <xref linkend="runtime-config-query-enable"/>).
For instance, turning off sequential scans
(<varname>enable_seqscan</varname>) and nested-loop joins
(<varname>enable_nestloop</varname>), which are the most basic plans,
@ -1417,11 +1417,11 @@ SELECT target FROM tests WHERE subject = 'some-subject' AND success;
per-row costs of each plan node times the selectivity estimate of
the plan node. The costs estimated for the plan nodes can be adjusted
via run-time parameters (described in <xref
linkend="runtime-config-query-constants">).
linkend="runtime-config-query-constants"/>).
An inaccurate selectivity estimate is due to
insufficient statistics. It might be possible to improve this by
tuning the statistics-gathering parameters (see
<xref linkend="sql-altertable">).
<xref linkend="sql-altertable"/>).
</para>
<para>

View File

@ -579,7 +579,7 @@
</table>
<para>
See also under <xref linkend="infoschema-columns">, a similarly
See also under <xref linkend="infoschema-columns"/>, a similarly
structured view, for further information on some of the columns.
</para>
</sect1>
@ -776,7 +776,7 @@
<entry><literal>sql_identifier</literal></entry>
<entry>
The <quote>specific name</quote> of the function. See <xref
linkend="infoschema-routines"> for more information.
linkend="infoschema-routines"/> for more information.
</entry>
</row>
</tbody>
@ -895,7 +895,7 @@
identifies which character set the available collations are
applicable to. In PostgreSQL, there is only one character set per
database (see explanation
in <xref linkend="infoschema-character-sets">), so this view does
in <xref linkend="infoschema-character-sets"/>), so this view does
not provide much useful information.
</para>
@ -1178,7 +1178,7 @@
that use data types owned by a currently enabled role. Note that in
<productname>PostgreSQL</productname>, built-in data types behave
like user-defined types, so they are included here as well. See
also <xref linkend="infoschema-columns"> for details.
also <xref linkend="infoschema-columns"/> for details.
</para>
<table>
@ -3134,7 +3134,7 @@ ORDER BY c.ordinal_position;
<entry><type>sql_identifier</type></entry>
<entry>
The <quote>specific name</quote> of the function. See <xref
linkend="infoschema-routines"> for more information.
linkend="infoschema-routines"/> for more information.
</entry>
</row>
@ -3594,7 +3594,7 @@ ORDER BY c.ordinal_position;
<entry><type>sql_identifier</type></entry>
<entry>
The <quote>specific name</quote> of the function. See <xref
linkend="infoschema-routines"> for more information.
linkend="infoschema-routines"/> for more information.
</entry>
</row>
@ -3930,7 +3930,7 @@ ORDER BY c.ordinal_position;
<entry><type>sql_identifier</type></entry>
<entry>
The <quote>specific name</quote> of the function. See <xref
linkend="infoschema-routines"> for more information.
linkend="infoschema-routines"/> for more information.
</entry>
</row>
@ -4762,7 +4762,7 @@ ORDER BY c.ordinal_position;
The table <literal>sql_features</literal> contains information
about which formal features defined in the SQL standard are
supported by <productname>PostgreSQL</productname>. This is the
same information that is presented in <xref linkend="features">.
same information that is presented in <xref linkend="features"/>.
There you can also find some additional background information.
</para>
@ -4998,7 +4998,7 @@ ORDER BY c.ordinal_position;
The table <literal>sql_packages</literal> contains information
about which feature packages defined in the SQL standard are
supported by <productname>PostgreSQL</productname>. Refer to <xref
linkend="features"> for background information on feature packages.
linkend="features"/> for background information on feature packages.
</para>
<table>
@ -5586,7 +5586,7 @@ ORDER BY c.ordinal_position;
<entry><literal>sql_identifier</literal></entry>
<entry>
The <quote>specific name</quote> of the function. See <xref
linkend="infoschema-routines"> for more information.
linkend="infoschema-routines"/> for more information.
</entry>
</row>
@ -5891,9 +5891,9 @@ ORDER BY c.ordinal_position;
<literal>USAGE</literal> privileges granted on user-defined types to a
currently enabled role or by a currently enabled role. There is one row for
each combination of type, grantor, and grantee. This view shows only
composite types (see under <xref linkend="infoschema-user-defined-types">
composite types (see under <xref linkend="infoschema-user-defined-types"/>
for why); see
<xref linkend="infoschema-usage-privileges"> for domain privileges.
<xref linkend="infoschema-usage-privileges"/> for domain privileges.
</para>
<table>
@ -6068,7 +6068,7 @@ ORDER BY c.ordinal_position;
differentiate between these. Other user-defined types such as base
types and enums, which are <productname>PostgreSQL</productname>
extensions, are not shown here. For domains,
see <xref linkend="infoschema-domains"> instead.
see <xref linkend="infoschema-domains"/> instead.
</para>
<table>
@ -6522,7 +6522,7 @@ ORDER BY c.ordinal_position;
<entry><literal>sql_identifier</literal></entry>
<entry>
The <quote>specific name</quote> of the function. See <xref
linkend="infoschema-routines"> for more information.
linkend="infoschema-routines"/> for more information.
</entry>
</row>
</tbody>

View File

@ -37,8 +37,8 @@
<para>
Building using <productname>MinGW</productname> or
<productname>Cygwin</productname> uses the normal build system, see
<xref linkend="installation"> and the specific notes in
<xref linkend="installation-notes-mingw"> and <xref linkend="installation-notes-cygwin">.
<xref linkend="installation"/> and the specific notes in
<xref linkend="installation-notes-mingw"/> and <xref linkend="installation-notes-cygwin"/>.
To produce native 64 bit binaries in these environments, use the tools from
<productname>MinGW-w64</productname>. These tools can also be used to
cross-compile for 32 bit and 64 bit <productname>Windows</productname>
@ -457,7 +457,7 @@ $ENV{CONFIG}="Debug";
</screen>
For more information about the regression tests, see
<xref linkend="regress">.
<xref linkend="regress"/>.
</para>
<para>

View File

@ -54,7 +54,7 @@ su - postgres
In general, a modern Unix-compatible platform should be able to run
<productname>PostgreSQL</productname>.
The platforms that had received specific testing at the
time of release are listed in <xref linkend="supported-platforms">
time of release are listed in <xref linkend="supported-platforms"/>
below. In the <filename>doc</filename> subdirectory of the distribution
there are several platform-specific <acronym>FAQ</acronym> documents you
might wish to consult if you are having trouble.
@ -193,7 +193,7 @@ su - postgres
required version is <productname>Python</productname> 2.4.
<productname>Python 3</productname> is supported if it's
version 3.1 or later; but see
<xref linkend="plpython-python23">
<xref linkend="plpython-python23"/>
when using Python 3.
</para>
@ -262,7 +262,7 @@ su - postgres
<para>
To build the <productname>PostgreSQL</productname> documentation,
there is a separate set of requirements; see
<xref linkend="docguide-toolsets">.
<xref linkend="docguide-toolsets"/>.
</para>
</listitem>
</itemizedlist>
@ -358,7 +358,7 @@ su - postgres
<para>
You can also get the source directly from the version control repository, see
<xref linkend="sourcerepo">.
<xref linkend="sourcerepo"/>.
</para>
</sect1>
@ -835,8 +835,8 @@ su - postgres
<para>
Build with <acronym>LDAP</acronym><indexterm><primary>LDAP</primary></indexterm>
support for authentication and connection parameter lookup (see
<phrase id="install-ldap-links"><xref linkend="libpq-ldap"> and
<xref linkend="auth-ldap"></phrase> for more information). On Unix,
<phrase id="install-ldap-links"><xref linkend="libpq-ldap"/> and
<xref linkend="auth-ldap"/></phrase> for more information). On Unix,
this requires the <productname>OpenLDAP</productname> package to be
installed. On Windows, the default <productname>WinLDAP</productname>
library is used. <filename>configure</filename> will check for the required
@ -855,7 +855,7 @@ su - postgres
for <application>systemd</application><indexterm><primary>systemd</primary></indexterm>
service notifications. This improves integration if the server binary
is started under <application>systemd</application> but has no impact
otherwise<phrase condition="standalone-ignore">; see <xref linkend="server-start"> for more
otherwise<phrase condition="standalone-ignore">; see <xref linkend="server-start"/> for more
information</phrase>. <application>libsystemd</application> and the
associated header files need to be installed to be able to use this
option.
@ -901,7 +901,7 @@ su - postgres
<term><option>--with-uuid=<replaceable>LIBRARY</replaceable></option></term>
<listitem>
<para>
Build the <xref linkend="uuid-ossp"> module
Build the <xref linkend="uuid-ossp"/> module
(which provides functions to generate UUIDs), using the specified
UUID library.<indexterm><primary>UUID</primary></indexterm>
<replaceable>LIBRARY</replaceable> must be one of:
@ -968,7 +968,7 @@ su - postgres
<listitem>
<para>
Use libxslt when building the
<xref linkend="xml2">
<xref linkend="xml2"/>
module. <application>xml2</application> relies on this library
to perform XSL transformations of XML.
</para>
@ -1084,7 +1084,7 @@ su - postgres
has no support for strong random numbers on the platform.
A source of random numbers is needed for some authentication
protocols, as well as some routines in the
<xref linkend="pgcrypto">
<xref linkend="pgcrypto"/>
module. <option>--disable-strong-random</option> disables functionality that
requires cryptographically strong random numbers, and substitutes
a weak pseudo-random-number-generator for the generation of
@ -1188,7 +1188,7 @@ su - postgres
code coverage testing instrumentation. When run, they
generate files in the build directory with code coverage
metrics.
<phrase condition="standalone-ignore">See <xref linkend="regress-coverage">
<phrase condition="standalone-ignore">See <xref linkend="regress-coverage"/>
for more information.</phrase> This option is for use only with GCC
and when doing development work.
</para>
@ -1249,7 +1249,7 @@ su - postgres
</indexterm>
Compiles <productname>PostgreSQL</productname> with support for the
dynamic tracing tool DTrace.
<phrase condition="standalone-ignore">See <xref linkend="dynamic-trace">
<phrase condition="standalone-ignore">See <xref linkend="dynamic-trace"/>
for more information.</phrase>
</para>
@ -1285,7 +1285,7 @@ su - postgres
<para>
Enable tests using the Perl TAP tools. This requires a Perl
installation and the Perl module <literal>IPC::Run</literal>.
<phrase condition="standalone-ignore">See <xref linkend="regress-tap"> for more information.</phrase>
<phrase condition="standalone-ignore">See <xref linkend="regress-tap"/> for more information.</phrase>
</para>
</listitem>
</varlistentry>
@ -1442,7 +1442,7 @@ su - postgres
whether Python 2 or 3 is specified here (or otherwise
implicitly chosen) determines which variant of the PL/Python
language becomes available. See
<xref linkend="plpython-python23">
<xref linkend="plpython-python23"/>
for more information.
</para>
</listitem>
@ -1569,7 +1569,7 @@ PostgreSQL, contrib, and documentation successfully made. Ready to install.
<userinput>make check</userinput>
</screen>
(This won't work as root; do it as an unprivileged user.)
See <xref linkend="regress"> for
See <xref linkend="regress"/> for
detailed information about interpreting the test results. You can
repeat this test at any later time by issuing the same command.
</para>
@ -1581,7 +1581,7 @@ PostgreSQL, contrib, and documentation successfully made. Ready to install.
<note>
<para>
If you are upgrading an existing system be sure to read
<xref linkend="upgrading">,
<xref linkend="upgrading"/>,
which has instructions about upgrading a
cluster.
</para>
@ -1593,7 +1593,7 @@ PostgreSQL, contrib, and documentation successfully made. Ready to install.
<userinput>make install</userinput>
</screen>
This will install files into the directories that were specified
in <xref linkend="configure">. Make sure that you have appropriate
in <xref linkend="configure"/>. Make sure that you have appropriate
permissions to write into that area. Normally you need to do this
step as root. Alternatively, you can create the target
directories in advance and arrange for appropriate permissions to
@ -1727,7 +1727,7 @@ export LD_LIBRARY_PATH
setenv LD_LIBRARY_PATH /usr/local/pgsql/lib
</programlisting>
Replace <literal>/usr/local/pgsql/lib</literal> with whatever you set
<option><literal>--libdir</literal></option> to in <xref linkend="configure">.
<option><literal>--libdir</literal></option> to in <xref linkend="configure"/>.
You should put these commands into a shell start-up file such as
<filename>/etc/profile</filename> or <filename>~/.bash_profile</filename>. Some
good information about the caveats associated with this method can
@ -1793,7 +1793,7 @@ libpq.so.2.1: cannot open shared object file: No such file or directory
If you installed into <filename>/usr/local/pgsql</filename> or some other
location that is not searched for programs by default, you should
add <filename>/usr/local/pgsql/bin</filename> (or whatever you set
<option><literal>--bindir</literal></option> to in <xref linkend="configure">)
<option><literal>--bindir</literal></option> to in <xref linkend="configure"/>)
into your <envar>PATH</envar>. Strictly speaking, this is not
necessary, but it will make the use of <productname>PostgreSQL</productname>
much more convenient.
@ -1873,7 +1873,7 @@ export MANPATH
Other Unix-like systems may also work but are not currently
being tested. In most cases, all CPU architectures supported by
a given operating system will work. Look in
<xref linkend="installation-platform-notes"> below to see if
<xref linkend="installation-platform-notes"/> below to see if
there is information
specific to your operating system, particularly if using an older system.
</para>
@ -1895,8 +1895,8 @@ export MANPATH
This section documents additional platform-specific issues
regarding the installation and setup of PostgreSQL. Be sure to
read the installation instructions, and in
particular <xref linkend="install-requirements"> as well. Also,
check <xref linkend="regress"> regarding the
particular <xref linkend="install-requirements"/> as well. Also,
check <xref linkend="regress"/> regarding the
interpretation of regression test results.
</para>
@ -2247,7 +2247,7 @@ ERROR: could not load library "/opt/dbs/pgsql/lib/plperl.so": Bad address
<para>
PostgreSQL can be built using Cygwin, a Linux-like environment for
Windows, but that method is inferior to the native Windows build
<phrase condition="standalone-ignore">(see <xref linkend="install-windows">)</phrase> and
<phrase condition="standalone-ignore">(see <xref linkend="install-windows"/>)</phrase> and
running a server under Cygwin is no longer recommended.
</para>
@ -2441,7 +2441,7 @@ PHSS_30849 s700_800 u2comp/be/plugin library Patch
Microsoft's <productname>Visual C++</productname> compiler suite.
The MinGW build variant uses the normal build system described in
this chapter; the Visual C++ build works completely differently
and is described in <xref linkend="install-windows">.
and is described in <xref linkend="install-windows"/>.
It is a fully native build and uses no additional software like
MinGW. A ready-made installer is available on the main
PostgreSQL web site.
@ -2602,7 +2602,7 @@ LIBOBJS = snprintf.o
<title>Using DTrace for Tracing PostgreSQL</title>
<para>
Yes, using DTrace is possible. See <xref linkend="dynamic-trace"> for
Yes, using DTrace is possible. See <xref linkend="dynamic-trace"/> for
further information.
</para>

View File

@ -29,8 +29,8 @@
<para>
The functions provided by the <filename>intarray</filename> module
are shown in <xref linkend="intarray-func-table">, the operators
in <xref linkend="intarray-op-table">.
are shown in <xref linkend="intarray-func-table"/>, the operators
in <xref linkend="intarray-op-table"/>.
</para>
<table id="intarray-func-table">

View File

@ -23,13 +23,13 @@
<itemizedlist>
<listitem>
<para>
<xref linkend="tutorial"> is an informal introduction for new users.
<xref linkend="tutorial"/> is an informal introduction for new users.
</para>
</listitem>
<listitem>
<para>
<xref linkend="sql"> documents the <acronym>SQL</acronym> query
<xref linkend="sql"/> documents the <acronym>SQL</acronym> query
language environment, including data types and functions, as well
as user-level performance tuning. Every
<productname>PostgreSQL</productname> user should read this.
@ -38,7 +38,7 @@
<listitem>
<para>
<xref linkend="admin"> describes the installation and
<xref linkend="admin"/> describes the installation and
administration of the server. Everyone who runs a
<productname>PostgreSQL</productname> server, be it for private
use or for others, should read this part.
@ -47,7 +47,7 @@
<listitem>
<para>
<xref linkend="client-interfaces"> describes the programming
<xref linkend="client-interfaces"/> describes the programming
interfaces for <productname>PostgreSQL</productname> client
programs.
</para>
@ -56,7 +56,7 @@
<listitem>
<para>
<xref linkend="server-programming"> contains information for
<xref linkend="server-programming"/> contains information for
advanced users about the extensibility capabilities of the
server. Topics include user-defined data types and
functions.
@ -65,7 +65,7 @@
<listitem>
<para>
<xref linkend="reference"> contains reference information about
<xref linkend="reference"/> contains reference information about
SQL commands, client and server programs. This part supports
the other parts with structured information sorted by command or
program.
@ -74,7 +74,7 @@
<listitem>
<para>
<xref linkend="internals"> contains assorted information that might be of
<xref linkend="internals"/> contains assorted information that might be of
use to <productname>PostgreSQL</productname> developers.
</para>
</listitem>

View File

@ -25,7 +25,7 @@
<title>Data Types</title>
<para>
<xref linkend="isn-datatypes"> shows the data types provided by
<xref linkend="isn-datatypes"/> shows the data types provided by
the <filename>isn</filename> module.
</para>
@ -222,7 +222,7 @@
<para>
The <filename>isn</filename> module provides the standard comparison operators,
plus B-tree and hash indexing support for all these data types. In
addition there are several specialized functions; shown in <xref linkend="isn-functions">.
addition there are several specialized functions; shown in <xref linkend="isn-functions"/>.
In this table,
<type>isn</type> means any one of the module's data types.
</para>

View File

@ -18,7 +18,7 @@
the JSON data types have the advantage of enforcing that each
stored value is valid according to the JSON rules. There are also
assorted JSON-specific functions and operators available for data stored
in these data types; see <xref linkend="functions-json">.
in these data types; see <xref linkend="functions-json"/>.
</para>
<para>
@ -82,7 +82,7 @@
<note>
<para>
Many of the JSON processing functions described
in <xref linkend="functions-json"> will convert Unicode escapes to
in <xref linkend="functions-json"/> will convert Unicode escapes to
regular characters, and will therefore throw the same types of errors
just described even if their input is of type <type>json</type>
not <type>jsonb</type>. The fact that the <type>json</type> input function does
@ -98,7 +98,7 @@
When converting textual JSON input into <type>jsonb</type>, the primitive
types described by <acronym>RFC</acronym> 7159 are effectively mapped onto
native <productname>PostgreSQL</productname> types, as shown
in <xref linkend="json-type-mapping-table">.
in <xref linkend="json-type-mapping-table"/>.
Therefore, there are some minor additional constraints on what
constitutes valid <type>jsonb</type> data that do not apply to
the <type>json</type> type, nor to JSON in the abstract, corresponding
@ -380,7 +380,7 @@ SELECT doc-&gt;'site_name' FROM websites
<para>
The various containment and existence operators, along with all other
JSON operators and functions are documented
in <xref linkend="functions-json">.
in <xref linkend="functions-json"/>.
</para>
</sect2>
@ -404,7 +404,7 @@ SELECT doc-&gt;'site_name' FROM websites
and <literal>?|</literal> operators and path/value-exists operator
<literal>@&gt;</literal>.
(For details of the semantics that these operators
implement, see <xref linkend="functions-jsonb-op-table">.)
implement, see <xref linkend="functions-jsonb-op-table"/>.)
An example of creating an index with this operator class is:
<programlisting>
CREATE INDEX idxgin ON api USING GIN (jdoc);
@ -465,7 +465,7 @@ CREATE INDEX idxgintags ON api USING GIN ((jdoc -&gt; 'tags'));
operator <literal>?</literal> to the indexed
expression <literal>jdoc -&gt; 'tags'</literal>.
(More information on expression indexes can be found in <xref
linkend="indexes-expressional">.)
linkend="indexes-expressional"/>.)
</para>
<para>
Another approach to querying is to exploit containment, for example:

View File

@ -9,10 +9,10 @@
</indexterm>
<para>
<xref linkend="keywords-table"> lists all tokens that are key words
<xref linkend="keywords-table"/> lists all tokens that are key words
in the SQL standard and in <productname>PostgreSQL</productname>
&version;. Background information can be found in <xref
linkend="sql-syntax-identifiers">.
linkend="sql-syntax-identifiers"/>.
(For space reasons, only the latest two versions of the SQL standard, and
SQL-92 for historical comparison, are included. The differences between
those and the other intermediate standard versions are small.)
@ -45,7 +45,7 @@
</para>
<para>
In <xref linkend="keywords-table"> in the column for
In <xref linkend="keywords-table"/> in the column for
<productname>PostgreSQL</productname> we classify as
<quote>non-reserved</quote> those key words that are explicitly
known to the parser but are allowed as column or table names.
@ -69,7 +69,7 @@
<para>
It is important to understand before studying <xref
linkend="keywords-table"> that the fact that a key word is not
linkend="keywords-table"/> that the fact that a key word is not
reserved in <productname>PostgreSQL</productname> does not mean that
the feature related to the word is not implemented. Conversely, the
presence of a key word does not indicate the existence of a feature.

View File

@ -25,15 +25,15 @@
those written for C++, Perl, Python, Tcl and <application>ECPG</application>.
So some aspects of <application>libpq</application>'s behavior will be
important to you if you use one of those packages. In particular,
<xref linkend="libpq-envars">,
<xref linkend="libpq-pgpass"> and
<xref linkend="libpq-ssl">
<xref linkend="libpq-envars"/>,
<xref linkend="libpq-pgpass"/> and
<xref linkend="libpq-ssl"/>
describe behavior that is visible to the user of any application
that uses <application>libpq</application>.
</para>
<para>
Some short programs are included at the end of this chapter (<xref linkend="libpq-example">) to show how
Some short programs are included at the end of this chapter (<xref linkend="libpq-example"/>) to show how
to write programs that use <application>libpq</application>. There are also several
complete examples of <application>libpq</application> applications in the
directory <filename>src/test/examples</filename> in the source code distribution.
@ -118,7 +118,7 @@ PGconn *PQconnectdbParams(const char * const *keywords,
<para>
The currently recognized parameter key words are listed in
<xref linkend="libpq-paramkeywords">.
<xref linkend="libpq-paramkeywords"/>.
</para>
<para>
@ -128,7 +128,7 @@ PGconn *PQconnectdbParams(const char * const *keywords,
<parameter>dbname</parameter> is expanded this way, any subsequent
<parameter>dbname</parameter> value is processed as plain database name. More
details on the possible connection string formats appear in
<xref linkend="libpq-connstring">.
<xref linkend="libpq-connstring"/>.
</para>
<para>
@ -140,7 +140,7 @@ PGconn *PQconnectdbParams(const char * const *keywords,
<para>
If any parameter is <symbol>NULL</symbol> or an empty string, the corresponding
environment variable (see <xref linkend="libpq-envars">) is checked.
environment variable (see <xref linkend="libpq-envars"/>) is checked.
If the environment variable is not set either, then the indicated
built-in defaults are used.
</para>
@ -176,7 +176,7 @@ PGconn *PQconnectdb(const char *conninfo);
The passed string can be empty to use all default parameters, or it can
contain one or more parameter settings separated by whitespace,
or it can contain a <acronym>URI</acronym>.
See <xref linkend="libpq-connstring"> for details.
See <xref linkend="libpq-connstring"/> for details.
</para>
@ -289,7 +289,7 @@ PostgresPollingStatusType PQconnectPoll(PGconn *conn);
<para>
The <literal>hostaddr</literal> and <literal>host</literal> parameters are used appropriately to ensure that
name and reverse name queries are not made. See the documentation of
these parameters in <xref linkend="libpq-paramkeywords"> for details.
these parameters in <xref linkend="libpq-paramkeywords"/> for details.
</para>
</listitem>
@ -802,7 +802,7 @@ host=localhost port=5432 dbname=mydb connect_timeout=10
<para>
The recognized parameter key words are listed in <xref
linkend="libpq-paramkeywords">.
linkend="libpq-paramkeywords"/>.
</para>
</sect3>
@ -847,7 +847,7 @@ postgresql:///mydb?host=localhost&amp;port=5433
<para>
Any connection parameters not corresponding to key words listed in <xref
linkend="libpq-paramkeywords"> are ignored and a warning message about them
linkend="libpq-paramkeywords"/> are ignored and a warning message about them
is sent to <filename>stderr</filename>.
</para>
@ -867,7 +867,7 @@ postgresql://[2001:db8::1234]/database
<para>
The host component is interpreted as described for the parameter <xref
linkend="libpq-connect-host">. In particular, a Unix-domain socket
linkend="libpq-connect-host"/>. In particular, a Unix-domain socket
connection is chosen if the host part is either empty or starts with a
slash, otherwise a TCP/IP connection is initiated. Note, however, that the
slash is a reserved character in the hierarchical part of the URI. So, to
@ -954,7 +954,7 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname
<para>
A comma-separated list of host names is also accepted, in which case
each host name in the list is tried in order. See
<xref linkend="libpq-multiple-hosts"> for details.
<xref linkend="libpq-multiple-hosts"/> for details.
</para>
</listitem>
</varlistentry>
@ -1006,13 +1006,13 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname
is not the name of the server at network address <literal>hostaddr</literal>.
Also, note that <literal>host</literal> rather than <literal>hostaddr</literal>
is used to identify the connection in a password file (see
<xref linkend="libpq-pgpass">).
<xref linkend="libpq-pgpass"/>).
</para>
<para>
A comma-separated list of <literal>hostaddrs</literal> is also accepted, in
which case each host in the list is tried in order. See
<xref linkend="libpq-multiple-hosts"> for details.
<xref linkend="libpq-multiple-hosts"/> for details.
</para>
<para>
Without either a host name or host address,
@ -1044,7 +1044,7 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname
<para>
The database name. Defaults to be the same as the user name.
In certain contexts, the value is checked for extended
formats; see <xref linkend="libpq-connstring"> for more details on
formats; see <xref linkend="libpq-connstring"/> for more details on
those.
</para>
</listitem>
@ -1075,7 +1075,7 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname
<listitem>
<para>
Specifies the name of the file used to store passwords
(see <xref linkend="libpq-pgpass">).
(see <xref linkend="libpq-pgpass"/>).
Defaults to <filename>~/.pgpass</filename>, or
<filename>%APPDATA%\postgresql\pgpass.conf</filename> on Microsoft Windows.
(No error is reported if this file does not exist.)
@ -1125,7 +1125,7 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname
separate command-line arguments, unless escaped with a backslash
(<literal>\</literal>); write <literal>\\</literal> to represent a literal
backslash. For a detailed discussion of the available
options, consult <xref linkend="runtime-config">.
options, consult <xref linkend="runtime-config"/>.
</para>
</listitem>
</varlistentry>
@ -1134,7 +1134,7 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname
<term><literal>application_name</literal></term>
<listitem>
<para>
Specifies a value for the <xref linkend="guc-application-name">
Specifies a value for the <xref linkend="guc-application-name"/>
configuration parameter.
</para>
</listitem>
@ -1145,7 +1145,7 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname
<listitem>
<para>
Specifies a fallback value for the <xref
linkend="guc-application-name"> configuration parameter.
linkend="guc-application-name"/> configuration parameter.
This value will be used if no value has been given for
<literal>application_name</literal> via a connection parameter or the
<envar>PGAPPNAME</envar> environment variable. Specifying
@ -1295,7 +1295,7 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname
</varlistentry>
</variablelist>
See <xref linkend="libpq-ssl"> for a detailed description of how
See <xref linkend="libpq-ssl"/> for a detailed description of how
these options work.
</para>
@ -1430,7 +1430,7 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname
to ensure that you are connected to a server run by a trusted user.)
This option is only supported on platforms for which the
<literal>peer</literal> authentication method is implemented; see
<xref linkend="auth-peer">.
<xref linkend="auth-peer"/>.
</para>
</listitem>
</varlistentry>
@ -1442,7 +1442,7 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname
Kerberos service name to use when authenticating with GSSAPI.
This must match the service name specified in the server
configuration for Kerberos authentication to succeed. (See also
<xref linkend="gssapi-auth">.)
<xref linkend="gssapi-auth"/>.)
</para>
</listitem>
</varlistentry>
@ -1465,7 +1465,7 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname
Service name to use for additional parameters. It specifies a service
name in <filename>pg_service.conf</filename> that holds additional connection parameters.
This allows applications to specify only a service name so connection parameters
can be centrally maintained. See <xref linkend="libpq-pgservice">.
can be centrally maintained. See <xref linkend="libpq-pgservice"/>.
</para>
</listitem>
</varlistentry>
@ -2225,7 +2225,7 @@ PGresult *PQexec(PGconn *conn, const char *command);
<function>PQexec</function> call are processed in a single transaction, unless
there are explicit <command>BEGIN</command>/<command>COMMIT</command>
commands included in the query string to divide it into multiple
transactions. (See <xref linkend="protocol-flow-multi-statement">
transactions. (See <xref linkend="protocol-flow-multi-statement"/>
for more details about how the server handles multi-query strings.)
Note however that the returned
<structname>PGresult</structname> structure describes only the result
@ -2447,7 +2447,7 @@ PGresult *PQprepare(PGconn *conn,
<function>PQprepare</function> creates a prepared statement for later
execution with <function>PQexecPrepared</function>. This feature allows
commands to be executed repeatedly without being parsed and
planned each time; see <xref linkend="sql-prepare"> for details.
planned each time; see <xref linkend="sql-prepare"/> for details.
<function>PQprepare</function> is supported only in protocol 3.0 and later
connections; it will fail when using protocol 2.0.
</para>
@ -2489,10 +2489,10 @@ PGresult *PQprepare(PGconn *conn,
</variablelist>
Prepared statements for use with <function>PQexecPrepared</function> can also
be created by executing SQL <xref linkend="sql-prepare">
be created by executing SQL <xref linkend="sql-prepare"/>
statements. Also, although there is no <application>libpq</application>
function for deleting a prepared statement, the SQL <xref
linkend="sql-deallocate"> statement
linkend="sql-deallocate"/> statement
can be used for that purpose.
</para>
@ -2746,7 +2746,7 @@ ExecStatusType PQresultStatus(const PGresult *res);
The <structname>PGresult</structname> contains a single result tuple
from the current command. This status occurs only when
single-row mode has been selected for the query
(see <xref linkend="libpq-single-row-mode">).
(see <xref linkend="libpq-single-row-mode"/>).
</para>
</listitem>
</varlistentry>
@ -2770,7 +2770,7 @@ ExecStatusType PQresultStatus(const PGresult *res);
never be returned directly by <function>PQexec</function> or other
query execution functions; results of this kind are instead passed
to the notice processor (see <xref
linkend="libpq-notice-processing">).
linkend="libpq-notice-processing"/>).
</para>
</listitem>
</varlistentry>
@ -2941,7 +2941,7 @@ char *PQresultErrorField(const PGresult *res, int fieldcode);
front-end applications to perform specific operations (such
as error handling) in response to a particular database error.
For a list of the possible SQLSTATE codes, see <xref
linkend="errcodes-appendix">. This field is not localizable,
linkend="errcodes-appendix"/>. This field is not localizable,
and is always present.
</para>
</listitem>
@ -3118,7 +3118,7 @@ char *PQresultErrorField(const PGresult *res, int fieldcode);
<para>
The fields for schema name, table name, column name, data type name,
and constraint name are supplied only for a limited number of error
types; see <xref linkend="errcodes-appendix">. Do not assume that
types; see <xref linkend="errcodes-appendix"/>. Do not assume that
the presence of any of these fields guarantees the presence of
another field. Core error sources observe the interrelationships
noted above, but user-defined functions may use these fields in other
@ -4075,7 +4075,7 @@ unsigned char *PQescapeByteaConn(PGconn *conn,
<type>bytea</type> literal in an <acronym>SQL</acronym> statement.
<function>PQescapeByteaConn</function> escapes bytes using
either hex encoding or backslash escaping. See <xref
linkend="datatype-binary"> for more information.
linkend="datatype-binary"/> for more information.
</para>
<para>
@ -4508,7 +4508,7 @@ PGresult *PQgetResult(PGconn *conn);
Another frequently-desired feature that can be obtained with
<function>PQsendQuery</function> and <function>PQgetResult</function>
is retrieving large query results a row at a time. This is discussed
in <xref linkend="libpq-single-row-mode">.
in <xref linkend="libpq-single-row-mode"/>.
</para>
<para>
@ -4600,14 +4600,14 @@ int PQisBusy(PGconn *conn);
<function>PQgetResult</function> if <function>PQisBusy</function>
returns false (0). It can also call <function>PQnotifies</function>
to detect <command>NOTIFY</command> messages (see <xref
linkend="libpq-notify">).
linkend="libpq-notify"/>).
</para>
<para>
A client that uses
<function>PQsendQuery</function>/<function>PQgetResult</function>
can also attempt to cancel a command that is still being processed
by the server; see <xref linkend="libpq-cancel">. But regardless of
by the server; see <xref linkend="libpq-cancel"/>. But regardless of
the return value of <function>PQcancel</function>, the application
must continue with the normal result-reading sequence using
<function>PQgetResult</function>. A successful cancellation will
@ -4753,7 +4753,7 @@ int PQflush(PGconn *conn);
(or a sibling function). This mode selection is effective only for the
currently executing query. Then call <function>PQgetResult</function>
repeatedly, until it returns null, as documented in <xref
linkend="libpq-async">. If the query returns any rows, they are returned
linkend="libpq-async"/>. If the query returns any rows, they are returned
as individual <structname>PGresult</structname> objects, which look like
normal query results except for having status code
<literal>PGRES_SINGLE_TUPLE</literal> instead of
@ -5119,7 +5119,7 @@ typedef struct pgNotify
</para>
<para>
<xref linkend="libpq-example-2"> gives a sample program that illustrates
<xref linkend="libpq-example-2"/> gives a sample program that illustrates
the use of asynchronous notification.
</para>
@ -5242,7 +5242,7 @@ typedef struct pgNotify
0 indicates the overall copy format is textual (rows separated by
newlines, columns separated by separator characters, etc). 1
indicates the overall copy format is binary. See <xref
linkend="sql-copy"> for more information.
linkend="sql-copy"/> for more information.
</para>
</listitem>
</varlistentry>
@ -5322,7 +5322,7 @@ int PQputCopyData(PGconn *conn,
into buffer loads of any convenient size. Buffer-load boundaries
have no semantic significance when sending. The contents of the
data stream must match the data format expected by the
<command>COPY</command> command; see <xref linkend="sql-copy"> for details.
<command>COPY</command> command; see <xref linkend="sql-copy"/> for details.
</para>
</listitem>
</varlistentry>
@ -5982,7 +5982,7 @@ char *PQencryptPasswordConn(PGconn *conn, const char *passwd, const char *user,
version 10, and will not work correctly with older server versions. If
<parameter>algorithm</parameter> is <symbol>NULL</symbol>, this function will query
the server for the current value of the
<xref linkend="guc-password-encryption"> setting. That can block, and
<xref linkend="guc-password-encryption"/> setting. That can block, and
will fail if the current transaction is aborted, or if the connection
is busy executing another query. If you wish to use the default
algorithm for the server but want to avoid blocking, query
@ -6072,7 +6072,7 @@ PGresult *PQmakeEmptyPGresult(PGconn *conn, ExecStatusType status);
<listitem>
<para>
Fires a <literal>PGEVT_RESULTCREATE</literal> event (see <xref
linkend="libpq-events">) for each event procedure registered in the
linkend="libpq-events"/>) for each event procedure registered in the
<structname>PGresult</structname> object. Returns non-zero for success,
zero if any event procedure fails.
@ -7004,7 +7004,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough)
<primary><envar>PGHOST</envar></primary>
</indexterm>
<envar>PGHOST</envar> behaves the same as the <xref
linkend="libpq-connect-host"> connection parameter.
linkend="libpq-connect-host"/> connection parameter.
</para>
</listitem>
@ -7014,7 +7014,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough)
<primary><envar>PGHOSTADDR</envar></primary>
</indexterm>
<envar>PGHOSTADDR</envar> behaves the same as the <xref
linkend="libpq-connect-hostaddr"> connection parameter.
linkend="libpq-connect-hostaddr"/> connection parameter.
This can be set instead of or in addition to <envar>PGHOST</envar>
to avoid DNS lookup overhead.
</para>
@ -7026,7 +7026,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough)
<primary><envar>PGPORT</envar></primary>
</indexterm>
<envar>PGPORT</envar> behaves the same as the <xref
linkend="libpq-connect-port"> connection parameter.
linkend="libpq-connect-port"/> connection parameter.
</para>
</listitem>
@ -7036,7 +7036,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough)
<primary><envar>PGDATABASE</envar></primary>
</indexterm>
<envar>PGDATABASE</envar> behaves the same as the <xref
linkend="libpq-connect-dbname"> connection parameter.
linkend="libpq-connect-dbname"/> connection parameter.
</para>
</listitem>
@ -7046,7 +7046,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough)
<primary><envar>PGUSER</envar></primary>
</indexterm>
<envar>PGUSER</envar> behaves the same as the <xref
linkend="libpq-connect-user"> connection parameter.
linkend="libpq-connect-user"/> connection parameter.
</para>
</listitem>
@ -7056,12 +7056,12 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough)
<primary><envar>PGPASSWORD</envar></primary>
</indexterm>
<envar>PGPASSWORD</envar> behaves the same as the <xref
linkend="libpq-connect-password"> connection parameter.
linkend="libpq-connect-password"/> connection parameter.
Use of this environment variable
is not recommended for security reasons, as some operating systems
allow non-root users to see process environment variables via
<application>ps</application>; instead consider using a password file
(see <xref linkend="libpq-pgpass">).
(see <xref linkend="libpq-pgpass"/>).
</para>
</listitem>
@ -7071,7 +7071,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough)
<primary><envar>PGPASSFILE</envar></primary>
</indexterm>
<envar>PGPASSFILE</envar> behaves the same as the <xref
linkend="libpq-connect-passfile"> connection parameter.
linkend="libpq-connect-passfile"/> connection parameter.
</para>
</listitem>
@ -7081,7 +7081,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough)
<primary><envar>PGSERVICE</envar></primary>
</indexterm>
<envar>PGSERVICE</envar> behaves the same as the <xref
linkend="libpq-connect-service"> connection parameter.
linkend="libpq-connect-service"/> connection parameter.
</para>
</listitem>
@ -7093,7 +7093,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough)
<envar>PGSERVICEFILE</envar> specifies the name of the per-user
connection service file. If not set, it defaults
to <filename>~/.pg_service.conf</filename>
(see <xref linkend="libpq-pgservice">).
(see <xref linkend="libpq-pgservice"/>).
</para>
</listitem>
@ -7103,7 +7103,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough)
<primary><envar>PGOPTIONS</envar></primary>
</indexterm>
<envar>PGOPTIONS</envar> behaves the same as the <xref
linkend="libpq-connect-options"> connection parameter.
linkend="libpq-connect-options"/> connection parameter.
</para>
</listitem>
@ -7113,7 +7113,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough)
<primary><envar>PGAPPNAME</envar></primary>
</indexterm>
<envar>PGAPPNAME</envar> behaves the same as the <xref
linkend="libpq-connect-application-name"> connection parameter.
linkend="libpq-connect-application-name"/> connection parameter.
</para>
</listitem>
@ -7123,7 +7123,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough)
<primary><envar>PGSSLMODE</envar></primary>
</indexterm>
<envar>PGSSLMODE</envar> behaves the same as the <xref
linkend="libpq-connect-sslmode"> connection parameter.
linkend="libpq-connect-sslmode"/> connection parameter.
</para>
</listitem>
@ -7133,7 +7133,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough)
<primary><envar>PGREQUIRESSL</envar></primary>
</indexterm>
<envar>PGREQUIRESSL</envar> behaves the same as the <xref
linkend="libpq-connect-requiressl"> connection parameter.
linkend="libpq-connect-requiressl"/> connection parameter.
This environment variable is deprecated in favor of the
<envar>PGSSLMODE</envar> variable; setting both variables suppresses the
effect of this one.
@ -7146,7 +7146,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough)
<primary><envar>PGSSLCOMPRESSION</envar></primary>
</indexterm>
<envar>PGSSLCOMPRESSION</envar> behaves the same as the <xref
linkend="libpq-connect-sslcompression"> connection parameter.
linkend="libpq-connect-sslcompression"/> connection parameter.
</para>
</listitem>
@ -7156,7 +7156,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough)
<primary><envar>PGSSLCERT</envar></primary>
</indexterm>
<envar>PGSSLCERT</envar> behaves the same as the <xref
linkend="libpq-connect-sslcert"> connection parameter.
linkend="libpq-connect-sslcert"/> connection parameter.
</para>
</listitem>
@ -7166,7 +7166,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough)
<primary><envar>PGSSLKEY</envar></primary>
</indexterm>
<envar>PGSSLKEY</envar> behaves the same as the <xref
linkend="libpq-connect-sslkey"> connection parameter.
linkend="libpq-connect-sslkey"/> connection parameter.
</para>
</listitem>
@ -7176,7 +7176,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough)
<primary><envar>PGSSLROOTCERT</envar></primary>
</indexterm>
<envar>PGSSLROOTCERT</envar> behaves the same as the <xref
linkend="libpq-connect-sslrootcert"> connection parameter.
linkend="libpq-connect-sslrootcert"/> connection parameter.
</para>
</listitem>
@ -7186,7 +7186,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough)
<primary><envar>PGSSLCRL</envar></primary>
</indexterm>
<envar>PGSSLCRL</envar> behaves the same as the <xref
linkend="libpq-connect-sslcrl"> connection parameter.
linkend="libpq-connect-sslcrl"/> connection parameter.
</para>
</listitem>
@ -7196,7 +7196,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough)
<primary><envar>PGREQUIREPEER</envar></primary>
</indexterm>
<envar>PGREQUIREPEER</envar> behaves the same as the <xref
linkend="libpq-connect-requirepeer"> connection parameter.
linkend="libpq-connect-requirepeer"/> connection parameter.
</para>
</listitem>
@ -7206,7 +7206,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough)
<primary><envar>PGKRBSRVNAME</envar></primary>
</indexterm>
<envar>PGKRBSRVNAME</envar> behaves the same as the <xref
linkend="libpq-connect-krbsrvname"> connection parameter.
linkend="libpq-connect-krbsrvname"/> connection parameter.
</para>
</listitem>
@ -7216,7 +7216,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough)
<primary><envar>PGGSSLIB</envar></primary>
</indexterm>
<envar>PGGSSLIB</envar> behaves the same as the <xref
linkend="libpq-connect-gsslib"> connection parameter.
linkend="libpq-connect-gsslib"/> connection parameter.
</para>
</listitem>
@ -7226,7 +7226,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough)
<primary><envar>PGCONNECT_TIMEOUT</envar></primary>
</indexterm>
<envar>PGCONNECT_TIMEOUT</envar> behaves the same as the <xref
linkend="libpq-connect-connect-timeout"> connection parameter.
linkend="libpq-connect-connect-timeout"/> connection parameter.
</para>
</listitem>
@ -7236,7 +7236,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough)
<primary><envar>PGCLIENTENCODING</envar></primary>
</indexterm>
<envar>PGCLIENTENCODING</envar> behaves the same as the <xref
linkend="libpq-connect-client-encoding"> connection parameter.
linkend="libpq-connect-client-encoding"/> connection parameter.
</para>
</listitem>
@ -7246,7 +7246,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough)
<primary><envar>PGTARGETSESSIONATTRS</envar></primary>
</indexterm>
<envar>PGTARGETSESSIONATTRS</envar> behaves the same as the <xref
linkend="libpq-connect-target-session-attrs"> connection parameter.
linkend="libpq-connect-target-session-attrs"/> connection parameter.
</para>
</listitem>
</itemizedlist>
@ -7255,8 +7255,8 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough)
<para>
The following environment variables can be used to specify default
behavior for each <productname>PostgreSQL</productname> session. (See
also the <xref linkend="sql-alterrole">
and <xref linkend="sql-alterdatabase">
also the <xref linkend="sql-alterrole"/>
and <xref linkend="sql-alterdatabase"/>
commands for ways to set default behavior on a per-user or per-database
basis.)
@ -7293,7 +7293,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough)
</listitem>
</itemizedlist>
Refer to the <acronym>SQL</acronym> command <xref linkend="sql-set">
Refer to the <acronym>SQL</acronym> command <xref linkend="sql-set"/>
for information on correct values for these
environment variables.
</para>
@ -7348,7 +7348,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough)
<filename>%APPDATA%</filename> refers to the Application Data subdirectory in
the user's profile).
Alternatively, a password file can be specified
using the connection parameter <xref linkend="libpq-connect-passfile">
using the connection parameter <xref linkend="libpq-connect-passfile"/>
or the environment variable <envar>PGPASSFILE</envar>.
</para>
@ -7422,7 +7422,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough)
<para>
The file uses an <quote>INI file</quote> format where the section
name is the service name and the parameters are connection
parameters; see <xref linkend="libpq-paramkeywords"> for a list. For
parameters; see <xref linkend="libpq-paramkeywords"/> for a list. For
example:
<programlisting>
# comment
@ -7456,7 +7456,7 @@ user=admin
<para>
LDAP connection parameter lookup uses the connection service file
<filename>pg_service.conf</filename> (see <xref
linkend="libpq-pgservice">). A line in a
linkend="libpq-pgservice"/>). A line in a
<filename>pg_service.conf</filename> stanza that starts with
<literal>ldap://</literal> will be recognized as an LDAP URL and an
LDAP query will be performed. The result must be a list of
@ -7528,7 +7528,7 @@ ldap://ldap.acme.com/cn=dbserver,cn=hosts?pgconnectinfo?base?(objectclass=*)
<para>
<productname>PostgreSQL</productname> has native support for using <acronym>SSL</acronym>
connections to encrypt client/server communications for increased
security. See <xref linkend="ssl-tcp"> for details about the server-side
security. See <xref linkend="ssl-tcp"/> for details about the server-side
<acronym>SSL</acronym> functionality.
</para>
@ -7643,7 +7643,7 @@ ldap://ldap.acme.com/cn=dbserver,cn=hosts?pgconnectinfo?base?(objectclass=*)
file, then its parent authority's certificate, and so on up to a certificate
authority, <quote>root</quote> or <quote>intermediate</quote>, that is trusted by
the server, i.e. signed by a certificate in the server's root CA file
(<xref linkend="guc-ssl-ca-file">).
(<xref linkend="guc-ssl-ca-file"/>).
</para>
<para>
@ -7728,7 +7728,7 @@ ldap://ldap.acme.com/cn=dbserver,cn=hosts?pgconnectinfo?base?(objectclass=*)
<para>
All <acronym>SSL</acronym> options carry overhead in the form of encryption and
key-exchange, so there is a trade-off that has to be made between performance
and security. <xref linkend="libpq-ssl-sslmode-statements">
and security. <xref linkend="libpq-ssl-sslmode-statements"/>
illustrates the risks the different <literal>sslmode</literal> values
protect against, and what statement they make about security and overhead.
</para>
@ -7828,7 +7828,7 @@ ldap://ldap.acme.com/cn=dbserver,cn=hosts?pgconnectinfo?base?(objectclass=*)
<title>SSL Client File Usage</title>
<para>
<xref linkend="libpq-ssl-file-usage"> summarizes the files that are
<xref linkend="libpq-ssl-file-usage"/> summarizes the files that are
relevant to the SSL setup on the client.
</para>
@ -8027,7 +8027,7 @@ int PQisthreadsafe();
<structname>PGresult</structname> objects are normally read-only after creation,
and so can be passed around freely between threads. However, if you use
any of the <structname>PGresult</structname>-modifying functions described in
<xref linkend="libpq-misc"> or <xref linkend="libpq-events">, it's up
<xref linkend="libpq-misc"/> or <xref linkend="libpq-events"/>, it's up
to you to avoid concurrent operations on the same <structname>PGresult</structname>,
too.
</para>

View File

@ -102,7 +102,7 @@ CREATE TRIGGER t_raster BEFORE UPDATE OR DELETE ON image
<para>
If you already have, or suspect you have, orphaned large objects, see the
<xref linkend="vacuumlo"> module to help
<xref linkend="vacuumlo"/> module to help
you clean them up. It's a good idea to run <application>vacuumlo</application>
occasionally as a back-stop to the <function>lo_manage</function> trigger.
</para>

View File

@ -83,8 +83,8 @@
<para>
As of <productname>PostgreSQL</productname> 9.0, large objects have an owner
and a set of access permissions, which can be managed using
<xref linkend="sql-grant"> and
<xref linkend="sql-revoke">.
<xref linkend="sql-grant"/> and
<xref linkend="sql-revoke"/>.
<literal>SELECT</literal> privileges are required to read a large
object, and
<literal>UPDATE</literal> privileges are required to write or
@ -92,7 +92,7 @@
Only the large object's owner (or a database superuser) can delete,
comment on, or change the owner of a large object.
To adjust this behavior for compatibility with prior releases, see the
<xref linkend="guc-lo-compat-privileges"> run-time parameter.
<xref linkend="guc-lo-compat-privileges"/> run-time parameter.
</para>
</sect1>
@ -301,7 +301,7 @@ int lo_open(PGconn *conn, Oid lobjId, int mode);
checks were instead performed at the first actual read or write call
using the descriptor.)
These privilege checks can be disabled with the
<xref linkend="guc-lo-compat-privileges"> run-time parameter.
<xref linkend="guc-lo-compat-privileges"/> run-time parameter.
</para>
<para>
@ -539,7 +539,7 @@ int lo_unlink(PGconn *conn, Oid lobjId);
<para>
Server-side functions tailored for manipulating large objects from SQL are
listed in <xref linkend="lo-funcs-table">.
listed in <xref linkend="lo-funcs-table"/>.
</para>
<table id="lo-funcs-table">
@ -656,7 +656,7 @@ SELECT lo_export(image.raster, '/tmp/motd') FROM image
<caution>
<para>
It is possible to <xref linkend="sql-grant"> use of the
It is possible to <xref linkend="sql-grant"/> use of the
server-side <function>lo_import</function>
and <function>lo_export</function> functions to non-superusers, but
careful consideration of the security implications is required. A
@ -688,7 +688,7 @@ SELECT lo_export(image.raster, '/tmp/motd') FROM image
<title>Example Program</title>
<para>
<xref linkend="lo-example"> is a sample program which shows how the large object
<xref linkend="lo-example"/> is a sample program which shows how the large object
interface
in <application>libpq</application> can be used. Parts of the program are
commented out but are left in the source for the reader's

View File

@ -8,7 +8,7 @@
changes, based upon their replication identity (usually a primary key). We
use the term logical in contrast to physical replication, which uses exact
block addresses and byte-by-byte replication. PostgreSQL supports both
mechanisms concurrently, see <xref linkend="high-availability">. Logical
mechanisms concurrently, see <xref linkend="high-availability"/>. Logical
replication allows fine-grained control over both data replication and
security.
</para>
@ -126,7 +126,7 @@
fallback if no other solution is possible. If a replica identity other
than <quote>full</quote> is set on the publisher side, a replica identity
comprising the same or fewer columns must also be set on the subscriber
side. See <xref linkend="sql-createtable-replica-identity"> for details on
side. See <xref linkend="sql-createtable-replica-identity"/> for details on
how to set the replica identity. If a table without a replica identity is
added to a publication that replicates <command>UPDATE</command>
or <command>DELETE</command> operations then
@ -140,13 +140,13 @@
</para>
<para>
A publication is created using the <xref linkend="sql-createpublication">
A publication is created using the <xref linkend="sql-createpublication"/>
command and may later be altered or dropped using corresponding commands.
</para>
<para>
The individual tables can be added and removed dynamically using
<xref linkend="sql-alterpublication">. Both the <literal>ADD
<xref linkend="sql-alterpublication"/>. Both the <literal>ADD
TABLE</literal> and <literal>DROP TABLE</literal> operations are
transactional; so the table will start or stop replicating at the correct
snapshot once the transaction has committed.
@ -179,14 +179,14 @@
<para>
Each subscription will receive changes via one replication slot (see
<xref linkend="streaming-replication-slots">). Additional temporary
<xref linkend="streaming-replication-slots"/>). Additional temporary
replication slots may be required for the initial data synchronization
of pre-existing table data.
</para>
<para>
A logical replication subscription can be a standby for synchronous
replication (see <xref linkend="synchronous-replication">). The standby
replication (see <xref linkend="synchronous-replication"/>). The standby
name is by default the subscription name. An alternative name can be
specified as <literal>application_name</literal> in the connection
information of the subscription.
@ -200,10 +200,10 @@
</para>
<para>
The subscription is added using <xref linkend="sql-createsubscription"> and
The subscription is added using <xref linkend="sql-createsubscription"/> and
can be stopped/resumed at any time using the
<xref linkend="sql-altersubscription"> command and removed using
<xref linkend="sql-dropsubscription">.
<xref linkend="sql-altersubscription"/> command and removed using
<xref linkend="sql-dropsubscription"/>.
</para>
<para>
@ -375,7 +375,7 @@
<listitem>
<para>
Large objects (see <xref linkend="largeobjects">) are not replicated.
Large objects (see <xref linkend="largeobjects"/>) are not replicated.
There is no workaround for that, other than storing data in normal
tables.
</para>
@ -409,13 +409,13 @@
<para>
Logical replication is built with an architecture similar to physical
streaming replication (see <xref linkend="streaming-replication">). It is
streaming replication (see <xref linkend="streaming-replication"/>). It is
implemented by <quote>walsender</quote> and <quote>apply</quote>
processes. The walsender process starts logical decoding (described
in <xref linkend="logicaldecoding">) of the WAL and loads the standard
in <xref linkend="logicaldecoding"/>) of the WAL and loads the standard
logical decoding plugin (pgoutput). The plugin transforms the changes read
from WAL to the logical replication protocol
(see <xref linkend="protocol-logical-replication">) and filters the data
(see <xref linkend="protocol-logical-replication"/>) and filters the data
according to the publication specification. The data is then continuously
transferred using the streaming replication protocol to the apply worker,
which maps the data to local tables and applies the individual changes as
@ -461,7 +461,7 @@
<link linkend="streaming-replication">physical streaming replication</link>,
the monitoring on a publication node is similar to monitoring of a
physical replication master
(see <xref linkend="streaming-replication-monitoring">).
(see <xref linkend="streaming-replication-monitoring"/>).
</para>
<para>

View File

@ -24,17 +24,17 @@
by <command>INSERT</command> and the new row version created
by <command>UPDATE</command>. Availability of old row versions for
<command>UPDATE</command> and <command>DELETE</command> depends on
the configured replica identity (see <xref linkend="sql-createtable-replica-identity">).
the configured replica identity (see <xref linkend="sql-createtable-replica-identity"/>).
</para>
<para>
Changes can be consumed either using the streaming replication protocol
(see <xref linkend="protocol-replication"> and
<xref linkend="logicaldecoding-walsender">), or by calling functions
via SQL (see <xref linkend="logicaldecoding-sql">). It is also possible
(see <xref linkend="protocol-replication"/> and
<xref linkend="logicaldecoding-walsender"/>), or by calling functions
via SQL (see <xref linkend="logicaldecoding-sql"/>). It is also possible
to write additional methods of consuming the output of a replication slot
without modifying core code
(see <xref linkend="logicaldecoding-writer">).
(see <xref linkend="logicaldecoding-writer"/>).
</para>
<sect1 id="logicaldecoding-example">
@ -47,8 +47,8 @@
<para>
Before you can use logical decoding, you must set
<xref linkend="guc-wal-level"> to <literal>logical</literal> and
<xref linkend="guc-max-replication-slots"> to at least 1. Then, you
<xref linkend="guc-wal-level"/> to <literal>logical</literal> and
<xref linkend="guc-max-replication-slots"/> to at least 1. Then, you
should connect to the target database (in the example
below, <literal>postgres</literal>) as a superuser.
</para>
@ -146,10 +146,10 @@ postgres=# SELECT pg_drop_replication_slot('regression_slot');
<para>
The following example shows how logical decoding is controlled over the
streaming replication protocol, using the
program <xref linkend="app-pgrecvlogical"> included in the PostgreSQL
program <xref linkend="app-pgrecvlogical"/> included in the PostgreSQL
distribution. This requires that client authentication is set up to allow
replication connections
(see <xref linkend="streaming-replication-authentication">) and
(see <xref linkend="streaming-replication-authentication"/>) and
that <varname>max_wal_senders</varname> is set sufficiently high to allow
an additional connection.
</para>
@ -208,7 +208,7 @@ $ pg_recvlogical -d postgres --slot test --drop-slot
<note>
<para><productname>PostgreSQL</productname> also has streaming replication slots
(see <xref linkend="streaming-replication">), but they are used somewhat
(see <xref linkend="streaming-replication"/>), but they are used somewhat
differently there.
</para>
</note>
@ -272,9 +272,9 @@ $ pg_recvlogical -d postgres --slot test --drop-slot
<title>Exported Snapshots</title>
<para>
When a new replication slot is created using the streaming replication
interface (see <xref linkend="protocol-replication-create-slot">), a
interface (see <xref linkend="protocol-replication-create-slot"/>), a
snapshot is exported
(see <xref linkend="functions-snapshot-synchronization">), which will show
(see <xref linkend="functions-snapshot-synchronization"/>), which will show
exactly the state of the database after which all changes will be
included in the change stream. This can be used to create a new replica by
using <link linkend="sql-set-transaction"><literal>SET TRANSACTION
@ -313,11 +313,11 @@ $ pg_recvlogical -d postgres --slot test --drop-slot
are used to create, drop, and stream changes from a replication
slot, respectively. These commands are only available over a replication
connection; they cannot be used via SQL.
See <xref linkend="protocol-replication"> for details on these commands.
See <xref linkend="protocol-replication"/> for details on these commands.
</para>
<para>
The command <xref linkend="app-pgrecvlogical"> can be used to control
The command <xref linkend="app-pgrecvlogical"/> can be used to control
logical decoding over a streaming replication connection. (It uses
these commands internally.)
</para>
@ -327,12 +327,12 @@ $ pg_recvlogical -d postgres --slot test --drop-slot
<title>Logical Decoding <acronym>SQL</acronym> Interface</title>
<para>
See <xref linkend="functions-replication"> for detailed documentation on
See <xref linkend="functions-replication"/> for detailed documentation on
the SQL-level API for interacting with logical decoding.
</para>
<para>
Synchronous replication (see <xref linkend="synchronous-replication">) is
Synchronous replication (see <xref linkend="synchronous-replication"/>) is
only supported on replication slots used over the streaming replication interface. The
function interface and additional, non-core interfaces do not support
synchronous replication.
@ -489,7 +489,7 @@ typedef struct OutputPluginOptions
<literal>output_type</literal> has to either be set to
<literal>OUTPUT_PLUGIN_TEXTUAL_OUTPUT</literal>
or <literal>OUTPUT_PLUGIN_BINARY_OUTPUT</literal>. See also
<xref linkend="logicaldecoding-output-mode">.
<xref linkend="logicaldecoding-output-mode"/>.
</para>
<para>
@ -576,8 +576,8 @@ typedef void (*LogicalDecodeChangeCB) (struct LogicalDecodingContext *ctx,
<note>
<para>
Only changes in user defined tables that are not unlogged
(see <xref linkend="sql-createtable-unlogged">) and not temporary
(see <xref linkend="sql-createtable-temporary">) can be extracted using
(see <xref linkend="sql-createtable-unlogged"/>) and not temporary
(see <xref linkend="sql-createtable-temporary"/>) can be extracted using
logical decoding.
</para>
</note>
@ -685,7 +685,7 @@ OutputPluginWrite(ctx, true);
<filename>src/backend/replication/logical/logicalfuncs.c</filename>.
Essentially, three functions need to be provided: one to read WAL, one to
prepare writing output, and one to write the output
(see <xref linkend="logicaldecoding-output-plugin-output">).
(see <xref linkend="logicaldecoding-output-plugin-output"/>).
</para>
</sect1>
@ -698,9 +698,9 @@ OutputPluginWrite(ctx, true);
replication</link> solutions with the same user interface as synchronous
replication for <link linkend="streaming-replication">streaming
replication</link>. To do this, the streaming replication interface
(see <xref linkend="logicaldecoding-walsender">) must be used to stream out
(see <xref linkend="logicaldecoding-walsender"/>) must be used to stream out
data. Clients have to send <literal>Standby status update (F)</literal>
(see <xref linkend="protocol-replication">) messages, just like streaming
(see <xref linkend="protocol-replication"/>) messages, just like streaming
replication clients do.
</para>

View File

@ -183,7 +183,7 @@ Europe &amp; Russia*@ &amp; !Transportation
<literal>&lt;</literal>, <literal>&gt;</literal>, <literal>&lt;=</literal>, <literal>&gt;=</literal>.
Comparison sorts in the order of a tree traversal, with the children
of a node sorted by label text. In addition, the specialized
operators shown in <xref linkend="ltree-op-table"> are available.
operators shown in <xref linkend="ltree-op-table"/> are available.
</para>
<table id="ltree-op-table">
@ -362,7 +362,7 @@ Europe &amp; Russia*@ &amp; !Transportation
</para>
<para>
The available functions are shown in <xref linkend="ltree-func-table">.
The available functions are shown in <xref linkend="ltree-func-table"/>.
</para>
<table id="ltree-func-table">
@ -672,7 +672,7 @@ ltreetest=&gt; SELECT ins_label(path,2,'Space') FROM test WHERE path &lt;@ 'Top.
the <type>ltree</type> type for PL/Python. The extensions are
called <literal>ltree_plpythonu</literal>, <literal>ltree_plpython2u</literal>,
and <literal>ltree_plpython3u</literal>
(see <xref linkend="plpython-python23"> for the PL/Python naming
(see <xref linkend="plpython-python23"/> for the PL/Python naming
convention). If you install these transforms and specify them when
creating a function, <type>ltree</type> values are mapped to Python lists.
(The reverse is currently not supported, however.)

View File

@ -28,20 +28,20 @@
after a catastrophe (disk failure, fire, mistakenly dropping a critical
table, etc.). The backup and recovery mechanisms available in
<productname>PostgreSQL</productname> are discussed at length in
<xref linkend="backup">.
<xref linkend="backup"/>.
</para>
<para>
The other main category of maintenance task is periodic <quote>vacuuming</quote>
of the database. This activity is discussed in
<xref linkend="routine-vacuuming">. Closely related to this is updating
<xref linkend="routine-vacuuming"/>. Closely related to this is updating
the statistics that will be used by the query planner, as discussed in
<xref linkend="vacuum-for-statistics">.
<xref linkend="vacuum-for-statistics"/>.
</para>
<para>
Another task that might need periodic attention is log file management.
This is discussed in <xref linkend="logfile-maintenance">.
This is discussed in <xref linkend="logfile-maintenance"/>.
</para>
<para>
@ -70,7 +70,7 @@
<productname>PostgreSQL</productname> databases require periodic
maintenance known as <firstterm>vacuuming</firstterm>. For many installations, it
is sufficient to let vacuuming be performed by the <firstterm>autovacuum
daemon</firstterm>, which is described in <xref linkend="autovacuum">. You might
daemon</firstterm>, which is described in <xref linkend="autovacuum"/>. You might
need to adjust the autovacuuming parameters described there to obtain best
results for your situation. Some database administrators will want to
supplement or replace the daemon's activities with manually-managed
@ -87,7 +87,7 @@
<para>
<productname>PostgreSQL</productname>'s
<xref linkend="sql-vacuum"> command has to
<xref linkend="sql-vacuum"/> command has to
process each table on a regular basis for several reasons:
<orderedlist>
@ -140,7 +140,7 @@
traffic, which can cause poor performance for other active sessions.
There are configuration parameters that can be adjusted to reduce the
performance impact of background vacuuming &mdash; see
<xref linkend="runtime-config-resource-vacuum-cost">.
<xref linkend="runtime-config-resource-vacuum-cost"/>.
</para>
</sect2>
@ -156,7 +156,7 @@
<command>UPDATE</command> or <command>DELETE</command> of a row does not
immediately remove the old version of the row.
This approach is necessary to gain the benefits of multiversion
concurrency control (<acronym>MVCC</acronym>, see <xref linkend="mvcc">): the row version
concurrency control (<acronym>MVCC</acronym>, see <xref linkend="mvcc"/>): the row version
must not be deleted while it is still potentially visible to other
transactions. But eventually, an outdated or deleted row version is no
longer of interest to any transaction. The space it occupies must then be
@ -217,7 +217,7 @@
their busiest tables as often as once every few minutes.) If you have
multiple databases in a cluster, don't forget to
<command>VACUUM</command> each one; the program <xref
linkend="app-vacuumdb"> might be helpful.
linkend="app-vacuumdb"/> might be helpful.
</para>
<tip>
@ -227,9 +227,9 @@
massive update or delete activity. If you have such a table and
you need to reclaim the excess disk space it occupies, you will need
to use <command>VACUUM FULL</command>, or alternatively
<xref linkend="sql-cluster">
<xref linkend="sql-cluster"/>
or one of the table-rewriting variants of
<xref linkend="sql-altertable">.
<xref linkend="sql-altertable"/>.
These commands rewrite an entire new copy of the table and build
new indexes for it. All these options require exclusive lock. Note that
they also temporarily use extra disk space approximately equal to the size
@ -242,7 +242,7 @@
<para>
If you have a table whose entire contents are deleted on a periodic
basis, consider doing it with
<xref linkend="sql-truncate"> rather
<xref linkend="sql-truncate"/> rather
than using <command>DELETE</command> followed by
<command>VACUUM</command>. <command>TRUNCATE</command> removes the
entire content of the table immediately, without requiring a
@ -269,7 +269,7 @@
The <productname>PostgreSQL</productname> query planner relies on
statistical information about the contents of tables in order to
generate good plans for queries. These statistics are gathered by
the <xref linkend="sql-analyze"> command,
the <xref linkend="sql-analyze"/> command,
which can be invoked by itself or
as an optional step in <command>VACUUM</command>. It is important to have
reasonably accurate statistics, otherwise poor choices of plans might
@ -323,7 +323,7 @@
clauses and have highly irregular data distributions might require a
finer-grain data histogram than other columns. See <command>ALTER TABLE
SET STATISTICS</command>, or change the database-wide default using the <xref
linkend="guc-default-statistics-target"> configuration parameter.
linkend="guc-default-statistics-target"/> configuration parameter.
</para>
<para>
@ -453,7 +453,7 @@
</note>
<para>
<xref linkend="guc-vacuum-freeze-min-age">
<xref linkend="guc-vacuum-freeze-min-age"/>
controls how old an XID value has to be before rows bearing that XID will be
frozen. Increasing this setting may avoid unnecessary work if the
rows that would otherwise be frozen will soon be modified again,
@ -471,7 +471,7 @@
Periodically, <command>VACUUM</command> will perform an <firstterm>aggressive
vacuum</firstterm>, skipping only those pages which contain neither dead rows nor
any unfrozen XID or MXID values.
<xref linkend="guc-vacuum-freeze-table-age">
<xref linkend="guc-vacuum-freeze-table-age"/>
controls when <command>VACUUM</command> does that: all-visible but not all-frozen
pages are scanned if the number of transactions that have passed since the
last such scan is greater than <varname>vacuum_freeze_table_age</varname> minus
@ -488,7 +488,7 @@
that, data loss could result. To ensure that this does not happen,
autovacuum is invoked on any table that might contain unfrozen rows with
XIDs older than the age specified by the configuration parameter <xref
linkend="guc-autovacuum-freeze-max-age">. (This will happen even if
linkend="guc-autovacuum-freeze-max-age"/>. (This will happen even if
autovacuum is disabled.)
</para>
@ -636,7 +636,7 @@ HINT: Stop the postmaster and vacuum that database in single-user mode.
execute commands once it has gone into the safety shutdown mode,
the only way to do this is to stop the server and start the server in single-user
mode to execute <command>VACUUM</command>. The shutdown mode is not enforced
in single-user mode. See the <xref linkend="app-postgres"> reference
in single-user mode. See the <xref linkend="app-postgres"/> reference
page for details about using single-user mode.
</para>
@ -673,13 +673,13 @@ HINT: Stop the postmaster and vacuum that database in single-user mode.
<para>
Whenever <command>VACUUM</command> scans any part of a table, it will replace
any multixact ID it encounters which is older than
<xref linkend="guc-vacuum-multixact-freeze-min-age">
<xref linkend="guc-vacuum-multixact-freeze-min-age"/>
by a different value, which can be the zero value, a single
transaction ID, or a newer multixact ID. For each table,
<structname>pg_class</structname>.<structfield>relminmxid</structfield> stores the oldest
possible multixact ID still appearing in any tuple of that table.
If this value is older than
<xref linkend="guc-vacuum-multixact-freeze-table-age">, an aggressive
<xref linkend="guc-vacuum-multixact-freeze-table-age"/>, an aggressive
vacuum is forced. As discussed in the previous section, an aggressive
vacuum means that only those pages which are known to be all-frozen will
be skipped. <function>mxid_age()</function> can be used on
@ -697,7 +697,7 @@ HINT: Stop the postmaster and vacuum that database in single-user mode.
<para>
As a safety device, an aggressive vacuum scan will occur for any table
whose multixact-age is greater than
<xref linkend="guc-autovacuum-multixact-freeze-max-age">. Aggressive
<xref linkend="guc-autovacuum-multixact-freeze-max-age"/>. Aggressive
vacuum scans will also occur progressively for all tables, starting with
those that have the oldest multixact-age, if the amount of used member
storage space exceeds the amount 50% of the addressable storage space.
@ -723,7 +723,7 @@ HINT: Stop the postmaster and vacuum that database in single-user mode.
tables that have had a large number of inserted, updated or deleted
tuples. These checks use the statistics collection facility;
therefore, autovacuum cannot be used unless <xref
linkend="guc-track-counts"> is set to <literal>true</literal>.
linkend="guc-track-counts"/> is set to <literal>true</literal>.
In the default configuration, autovacuuming is enabled and the related
configuration parameters are appropriately set.
</para>
@ -734,17 +734,17 @@ HINT: Stop the postmaster and vacuum that database in single-user mode.
<firstterm>autovacuum launcher</firstterm>, which is in charge of starting
<firstterm>autovacuum worker</firstterm> processes for all databases. The
launcher will distribute the work across time, attempting to start one
worker within each database every <xref linkend="guc-autovacuum-naptime">
worker within each database every <xref linkend="guc-autovacuum-naptime"/>
seconds. (Therefore, if the installation has <replaceable>N</replaceable> databases,
a new worker will be launched every
<varname>autovacuum_naptime</varname>/<replaceable>N</replaceable> seconds.)
A maximum of <xref linkend="guc-autovacuum-max-workers"> worker processes
A maximum of <xref linkend="guc-autovacuum-max-workers"/> worker processes
are allowed to run at the same time. If there are more than
<varname>autovacuum_max_workers</varname> databases to be processed,
the next database will be processed as soon as the first worker finishes.
Each worker process will check each table within its database and
execute <command>VACUUM</command> and/or <command>ANALYZE</command> as needed.
<xref linkend="guc-log-autovacuum-min-duration"> can be set to monitor
<xref linkend="guc-log-autovacuum-min-duration"/> can be set to monitor
autovacuum workers' activity.
</para>
@ -756,13 +756,13 @@ HINT: Stop the postmaster and vacuum that database in single-user mode.
available. There is no limit on how many workers might be in a
single database, but workers do try to avoid repeating work that has
already been done by other workers. Note that the number of running
workers does not count towards <xref linkend="guc-max-connections"> or
<xref linkend="guc-superuser-reserved-connections"> limits.
workers does not count towards <xref linkend="guc-max-connections"/> or
<xref linkend="guc-superuser-reserved-connections"/> limits.
</para>
<para>
Tables whose <structfield>relfrozenxid</structfield> value is more than
<xref linkend="guc-autovacuum-freeze-max-age"> transactions old are always
<xref linkend="guc-autovacuum-freeze-max-age"/> transactions old are always
vacuumed (this also applies to those tables whose freeze max age has
been modified via storage parameters; see below). Otherwise, if the
number of tuples obsoleted since the last
@ -772,9 +772,9 @@ HINT: Stop the postmaster and vacuum that database in single-user mode.
vacuum threshold = vacuum base threshold + vacuum scale factor * number of tuples
</programlisting>
where the vacuum base threshold is
<xref linkend="guc-autovacuum-vacuum-threshold">,
<xref linkend="guc-autovacuum-vacuum-threshold"/>,
the vacuum scale factor is
<xref linkend="guc-autovacuum-vacuum-scale-factor">,
<xref linkend="guc-autovacuum-vacuum-scale-factor"/>,
and the number of tuples is
<structname>pg_class</structname>.<structfield>reltuples</structfield>.
The number of obsolete tuples is obtained from the statistics
@ -808,16 +808,16 @@ analyze threshold = analyze base threshold + analyze scale factor * number of tu
<filename>postgresql.conf</filename>, but it is possible to override them
(and many other autovacuum control parameters) on a per-table basis; see
<xref linkend="sql-createtable-storage-parameters"
endterm="sql-createtable-storage-parameters-title"> for more information.
endterm="sql-createtable-storage-parameters-title"/> for more information.
If a setting has been changed via a table's storage parameters, that value
is used when processing that table; otherwise the global settings are
used. See <xref linkend="runtime-config-autovacuum"> for more details on
used. See <xref linkend="runtime-config-autovacuum"/> for more details on
the global settings.
</para>
<para>
When multiple workers are running, the autovacuum cost delay parameters
(see <xref linkend="runtime-config-resource-vacuum-cost">) are
(see <xref linkend="runtime-config-resource-vacuum-cost"/>) are
<quote>balanced</quote> among all the running workers, so that the
total I/O impact on the system is the same regardless of the number
of workers actually running. However, any workers processing tables whose
@ -838,7 +838,7 @@ analyze threshold = analyze base threshold + analyze scale factor * number of tu
<para>
In some situations it is worthwhile to rebuild indexes periodically
with the <xref linkend="sql-reindex"> command or a series of individual
with the <xref linkend="sql-reindex"/> command or a series of individual
rebuilding steps.
</para>
@ -868,16 +868,16 @@ analyze threshold = analyze base threshold + analyze scale factor * number of tu
</para>
<para>
<xref linkend="sql-reindex"> can be used safely and easily in all cases.
<xref linkend="sql-reindex"/> can be used safely and easily in all cases.
But since the command requires an exclusive table lock, it is
often preferable to execute an index rebuild with a sequence of
creation and replacement steps. Index types that support
<xref linkend="sql-createindex"> with the <literal>CONCURRENTLY</literal>
<xref linkend="sql-createindex"/> with the <literal>CONCURRENTLY</literal>
option can instead be recreated that way. If that is successful and the
resulting index is valid, the original index can then be replaced by
the newly built one using a combination of <xref linkend="sql-alterindex">
and <xref linkend="sql-dropindex">. When an index is used to enforce
uniqueness or other constraints, <xref linkend="sql-altertable"> might
the newly built one using a combination of <xref linkend="sql-alterindex"/>
and <xref linkend="sql-dropindex"/>. When an index is used to enforce
uniqueness or other constraints, <xref linkend="sql-altertable"/> might
be necessary to swap the existing constraint with one enforced by
the new index. Review this alternate multistep rebuild approach
carefully before using it as there are limitations on which
@ -922,7 +922,7 @@ analyze threshold = analyze base threshold + analyze scale factor * number of tu
setting the configuration parameter <varname>logging_collector</varname> to
<literal>true</literal> in <filename>postgresql.conf</filename>. The control
parameters for this program are described in <xref
linkend="runtime-config-logging-where">. You can also use this approach
linkend="runtime-config-logging-where"/>. You can also use this approach
to capture the log data in machine readable <acronym>CSV</acronym>
(comma-separated values) format.
</para>

View File

@ -49,20 +49,20 @@
resources, they should be put in the same database but possibly
into separate schemas. Schemas are a purely logical structure and who can
access what is managed by the privilege system. More information about
managing schemas is in <xref linkend="ddl-schemas">.
managing schemas is in <xref linkend="ddl-schemas"/>.
</para>
<para>
Databases are created with the <command>CREATE DATABASE</command> command
(see <xref linkend="manage-ag-createdb">) and destroyed with the
(see <xref linkend="manage-ag-createdb"/>) and destroyed with the
<command>DROP DATABASE</command> command
(see <xref linkend="manage-ag-dropdb">).
(see <xref linkend="manage-ag-dropdb"/>).
To determine the set of existing databases, examine the
<structname>pg_database</structname> system catalog, for example
<synopsis>
SELECT datname FROM pg_database;
</synopsis>
The <xref linkend="app-psql"> program's <literal>\l</literal> meta-command
The <xref linkend="app-psql"/> program's <literal>\l</literal> meta-command
and <option>-l</option> command-line option are also useful for listing the
existing databases.
</para>
@ -83,12 +83,12 @@ SELECT datname FROM pg_database;
<para>
In order to create a database, the <productname>PostgreSQL</productname>
server must be up and running (see <xref
linkend="server-start">).
linkend="server-start"/>).
</para>
<para>
Databases are created with the SQL command
<xref linkend="sql-createdatabase">:
<xref linkend="sql-createdatabase"/>:
<synopsis>
CREATE DATABASE <replaceable>name</replaceable>;
</synopsis>
@ -101,7 +101,7 @@ CREATE DATABASE <replaceable>name</replaceable>;
<para>
The creation of databases is a restricted operation. See <xref
linkend="role-attributes"> for how to grant permission.
linkend="role-attributes"/> for how to grant permission.
</para>
<para>
@ -110,7 +110,7 @@ CREATE DATABASE <replaceable>name</replaceable>;
question remains how the <emphasis>first</emphasis> database at any given
site can be created. The first database is always created by the
<command>initdb</command> command when the data storage area is
initialized. (See <xref linkend="creating-cluster">.) This
initialized. (See <xref linkend="creating-cluster"/>.) This
database is called
<literal>postgres</literal>.<indexterm><primary>postgres</primary></indexterm> So to
create the first <quote>ordinary</quote> database you can connect to
@ -127,7 +127,7 @@ CREATE DATABASE <replaceable>name</replaceable>;
propagated to all subsequently created databases. Because of this,
avoid creating objects in <literal>template1</literal> unless you want them
propagated to every newly created database. More details
appear in <xref linkend="manage-ag-templatedbs">.
appear in <xref linkend="manage-ag-templatedbs"/>.
</para>
<para>
@ -142,14 +142,14 @@ createdb <replaceable class="parameter">dbname</replaceable>
<command>createdb</command> does no magic. It connects to the <literal>postgres</literal>
database and issues the <command>CREATE DATABASE</command> command,
exactly as described above.
The <xref linkend="app-createdb"> reference page contains the invocation
The <xref linkend="app-createdb"/> reference page contains the invocation
details. Note that <command>createdb</command> without any arguments will create
a database with the current user name.
</para>
<note>
<para>
<xref linkend="client-authentication"> contains information about
<xref linkend="client-authentication"/> contains information about
how to restrict who can connect to a given database.
</para>
</note>
@ -283,7 +283,7 @@ createdb -T template0 <replaceable>dbname</replaceable>
<title>Database Configuration</title>
<para>
Recall from <xref linkend="runtime-config"> that the
Recall from <xref linkend="runtime-config"/> that the
<productname>PostgreSQL</productname> server provides a large number of
run-time configuration variables. You can set database-specific
default values for many of these settings.
@ -315,7 +315,7 @@ ALTER DATABASE mydb SET geqo TO off;
<para>
Databases are destroyed with the command
<xref linkend="sql-dropdatabase">:<indexterm><primary>DROP DATABASE</primary></indexterm>
<xref linkend="sql-dropdatabase"/>:<indexterm><primary>DROP DATABASE</primary></indexterm>
<synopsis>
DROP DATABASE <replaceable>name</replaceable>;
</synopsis>
@ -337,7 +337,7 @@ DROP DATABASE <replaceable>name</replaceable>;
<para>
For convenience, there is also a shell program to drop
databases, <xref linkend="app-dropdb">:<indexterm><primary>dropdb</primary></indexterm>
databases, <xref linkend="app-dropdb"/>:<indexterm><primary>dropdb</primary></indexterm>
<synopsis>
dropdb <replaceable class="parameter">dbname</replaceable>
</synopsis>
@ -396,7 +396,7 @@ dropdb <replaceable class="parameter">dbname</replaceable>
<para>
To define a tablespace, use the <xref
linkend="sql-createtablespace">
linkend="sql-createtablespace"/>
command, for example:<indexterm><primary>CREATE TABLESPACE</primary></indexterm>:
<programlisting>
CREATE TABLESPACE fastspace LOCATION '/ssd1/postgresql/data';
@ -438,7 +438,7 @@ CREATE TABLE foo(i int) TABLESPACE space1;
</para>
<para>
Alternatively, use the <xref linkend="guc-default-tablespace"> parameter:
Alternatively, use the <xref linkend="guc-default-tablespace"/> parameter:
<programlisting>
SET default_tablespace = space1;
CREATE TABLE foo(i int);
@ -450,7 +450,7 @@ CREATE TABLE foo(i int);
</para>
<para>
There is also a <xref linkend="guc-temp-tablespaces"> parameter, which
There is also a <xref linkend="guc-temp-tablespaces"/> parameter, which
determines the placement of temporary tables and indexes, as well as
temporary files that are used for purposes such as sorting large data
sets. This can be a list of tablespace names, rather than only one,
@ -490,7 +490,7 @@ CREATE TABLE foo(i int);
<para>
To remove an empty tablespace, use the <xref
linkend="sql-droptablespace">
linkend="sql-droptablespace"/>
command.
</para>
@ -501,7 +501,7 @@ CREATE TABLE foo(i int);
<synopsis>
SELECT spcname FROM pg_tablespace;
</synopsis>
The <xref linkend="app-psql"> program's <literal>\db</literal> meta-command
The <xref linkend="app-psql"/> program's <literal>\db</literal> meta-command
is also useful for listing the existing tablespaces.
</para>

View File

@ -27,8 +27,8 @@
<command>ps</command>, <command>top</command>, <command>iostat</command>, and <command>vmstat</command>.
Also, once one has identified a
poorly-performing query, further investigation might be needed using
<productname>PostgreSQL</productname>'s <xref linkend="sql-explain"> command.
<xref linkend="using-explain"> discusses <command>EXPLAIN</command>
<productname>PostgreSQL</productname>'s <xref linkend="sql-explain"/> command.
<xref linkend="using-explain"/> discusses <command>EXPLAIN</command>
and other methods for understanding the behavior of an individual
query.
</para>
@ -92,7 +92,7 @@ postgres: <replaceable>user</replaceable> <replaceable>database</replaceable> <r
</para>
<para>
If <xref linkend="guc-cluster-name"> has been configured the
If <xref linkend="guc-cluster-name"/> has been configured the
cluster name will also be shown in <command>ps</command> output:
<screen>
$ psql -c 'SHOW cluster_name'
@ -108,7 +108,7 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser
</para>
<para>
If you have turned off <xref linkend="guc-update-process-title"> then the
If you have turned off <xref linkend="guc-update-process-title"/> then the
activity indicator is not updated; the process title is set only once
when a new process is launched. On some platforms this saves a measurable
amount of per-command overhead; on others it's insignificant.
@ -161,27 +161,27 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser
Since collection of statistics adds some overhead to query execution,
the system can be configured to collect or not collect information.
This is controlled by configuration parameters that are normally set in
<filename>postgresql.conf</filename>. (See <xref linkend="runtime-config"> for
<filename>postgresql.conf</filename>. (See <xref linkend="runtime-config"/> for
details about setting configuration parameters.)
</para>
<para>
The parameter <xref linkend="guc-track-activities"> enables monitoring
The parameter <xref linkend="guc-track-activities"/> enables monitoring
of the current command being executed by any server process.
</para>
<para>
The parameter <xref linkend="guc-track-counts"> controls whether
The parameter <xref linkend="guc-track-counts"/> controls whether
statistics are collected about table and index accesses.
</para>
<para>
The parameter <xref linkend="guc-track-functions"> enables tracking of
The parameter <xref linkend="guc-track-functions"/> enables tracking of
usage of user-defined functions.
</para>
<para>
The parameter <xref linkend="guc-track-io-timing"> enables monitoring
The parameter <xref linkend="guc-track-io-timing"/> enables monitoring
of block read and write times.
</para>
@ -189,7 +189,7 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser
Normally these parameters are set in <filename>postgresql.conf</filename> so
that they apply to all server processes, but it is possible to turn
them on or off in individual sessions using the <xref
linkend="sql-set"> command. (To prevent
linkend="sql-set"/> command. (To prevent
ordinary users from hiding their activity from the administrator,
only superusers are allowed to change these parameters with
<command>SET</command>.)
@ -199,7 +199,7 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser
The statistics collector transmits the collected information to other
<productname>PostgreSQL</productname> processes through temporary files.
These files are stored in the directory named by the
<xref linkend="guc-stats-temp-directory"> parameter,
<xref linkend="guc-stats-temp-directory"/> parameter,
<filename>pg_stat_tmp</filename> by default.
For better performance, <varname>stats_temp_directory</varname> can be
pointed at a RAM-based file system, decreasing physical I/O requirements.
@ -217,13 +217,13 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser
<para>
Several predefined views, listed in <xref
linkend="monitoring-stats-dynamic-views-table">, are available to show
linkend="monitoring-stats-dynamic-views-table"/>, are available to show
the current state of the system. There are also several other
views, listed in <xref
linkend="monitoring-stats-views-table">, available to show the results
linkend="monitoring-stats-views-table"/>, available to show the results
of statistics collection. Alternatively, one can
build custom views using the underlying statistics functions, as discussed
in <xref linkend="monitoring-stats-functions">.
in <xref linkend="monitoring-stats-functions"/>.
</para>
<para>
@ -288,7 +288,7 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser
<entry>
One row per server process, showing information related to
the current activity of that process, such as state and current query.
See <xref linkend="pg-stat-activity-view"> for details.
See <xref linkend="pg-stat-activity-view"/> for details.
</entry>
</row>
@ -296,7 +296,7 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser
<entry><structname>pg_stat_replication</structname><indexterm><primary>pg_stat_replication</primary></indexterm></entry>
<entry>One row per WAL sender process, showing statistics about
replication to that sender's connected standby server.
See <xref linkend="pg-stat-replication-view"> for details.
See <xref linkend="pg-stat-replication-view"/> for details.
</entry>
</row>
@ -304,7 +304,7 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser
<entry><structname>pg_stat_wal_receiver</structname><indexterm><primary>pg_stat_wal_receiver</primary></indexterm></entry>
<entry>Only one row, showing statistics about the WAL receiver from
that receiver's connected server.
See <xref linkend="pg-stat-wal-receiver-view"> for details.
See <xref linkend="pg-stat-wal-receiver-view"/> for details.
</entry>
</row>
@ -312,7 +312,7 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser
<entry><structname>pg_stat_subscription</structname><indexterm><primary>pg_stat_subscription</primary></indexterm></entry>
<entry>At least one row per subscription, showing information about
the subscription workers.
See <xref linkend="pg-stat-subscription"> for details.
See <xref linkend="pg-stat-subscription"/> for details.
</entry>
</row>
@ -320,7 +320,7 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser
<entry><structname>pg_stat_ssl</structname><indexterm><primary>pg_stat_ssl</primary></indexterm></entry>
<entry>One row per connection (regular and replication), showing information about
SSL used on this connection.
See <xref linkend="pg-stat-ssl-view"> for details.
See <xref linkend="pg-stat-ssl-view"/> for details.
</entry>
</row>
@ -328,7 +328,7 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser
<entry><structname>pg_stat_progress_vacuum</structname><indexterm><primary>pg_stat_progress_vacuum</primary></indexterm></entry>
<entry>One row for each backend (including autovacuum worker processes) running
<command>VACUUM</command>, showing current progress.
See <xref linkend='vacuum-progress-reporting'>.
See <xref linkend='vacuum-progress-reporting'/>.
</entry>
</row>
@ -352,7 +352,7 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser
<entry><structname>pg_stat_archiver</structname><indexterm><primary>pg_stat_archiver</primary></indexterm></entry>
<entry>One row only, showing statistics about the
WAL archiver process's activity. See
<xref linkend="pg-stat-archiver-view"> for details.
<xref linkend="pg-stat-archiver-view"/> for details.
</entry>
</row>
@ -360,14 +360,14 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser
<entry><structname>pg_stat_bgwriter</structname><indexterm><primary>pg_stat_bgwriter</primary></indexterm></entry>
<entry>One row only, showing statistics about the
background writer process's activity. See
<xref linkend="pg-stat-bgwriter-view"> for details.
<xref linkend="pg-stat-bgwriter-view"/> for details.
</entry>
</row>
<row>
<entry><structname>pg_stat_database</structname><indexterm><primary>pg_stat_database</primary></indexterm></entry>
<entry>One row per database, showing database-wide statistics. See
<xref linkend="pg-stat-database-view"> for details.
<xref linkend="pg-stat-database-view"/> for details.
</entry>
</row>
@ -376,7 +376,7 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser
<entry>
One row per database, showing database-wide statistics about
query cancels due to conflict with recovery on standby servers.
See <xref linkend="pg-stat-database-conflicts-view"> for details.
See <xref linkend="pg-stat-database-conflicts-view"/> for details.
</entry>
</row>
@ -385,7 +385,7 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser
<entry>
One row for each table in the current database, showing statistics
about accesses to that specific table.
See <xref linkend="pg-stat-all-tables-view"> for details.
See <xref linkend="pg-stat-all-tables-view"/> for details.
</entry>
</row>
@ -427,7 +427,7 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser
<entry>
One row for each index in the current database, showing statistics
about accesses to that specific index.
See <xref linkend="pg-stat-all-indexes-view"> for details.
See <xref linkend="pg-stat-all-indexes-view"/> for details.
</entry>
</row>
@ -448,7 +448,7 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser
<entry>
One row for each table in the current database, showing statistics
about I/O on that specific table.
See <xref linkend="pg-statio-all-tables-view"> for details.
See <xref linkend="pg-statio-all-tables-view"/> for details.
</entry>
</row>
@ -469,7 +469,7 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser
<entry>
One row for each index in the current database,
showing statistics about I/O on that specific index.
See <xref linkend="pg-statio-all-indexes-view"> for details.
See <xref linkend="pg-statio-all-indexes-view"/> for details.
</entry>
</row>
@ -490,7 +490,7 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser
<entry>
One row for each sequence in the current database,
showing statistics about I/O on that specific sequence.
See <xref linkend="pg-statio-all-sequences-view"> for details.
See <xref linkend="pg-statio-all-sequences-view"/> for details.
</entry>
</row>
@ -512,7 +512,7 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser
<entry>
One row for each tracked function, showing statistics
about executions of that function. See
<xref linkend="pg-stat-user-functions-view"> for details.
<xref linkend="pg-stat-user-functions-view"/> for details.
</entry>
</row>
@ -609,7 +609,7 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser
<entry>Host name of the connected client, as reported by a
reverse DNS lookup of <structfield>client_addr</structfield>. This field will
only be non-null for IP connections, and only when <xref
linkend="guc-log-hostname"> is enabled.
linkend="guc-log-hostname"/> is enabled.
</entry>
</row>
<row>
@ -731,7 +731,7 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser
<entry><structfield>wait_event</structfield></entry>
<entry><type>text</type></entry>
<entry>Wait event name if backend is currently waiting, otherwise NULL.
See <xref linkend="wait-event-table"> for details.
See <xref linkend="wait-event-table"/> for details.
</entry>
</row>
<row>
@ -772,7 +772,7 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser
<listitem>
<para>
<literal>disabled</literal>: This state is reported if <xref
linkend="guc-track-activities"> is disabled in this backend.
linkend="guc-track-activities"/> is disabled in this backend.
</para>
</listitem>
</itemizedlist>
@ -796,7 +796,7 @@ postgres 27093 0.0 0.0 30096 2752 ? Ss 11:34 0:00 postgres: ser
currently executing query. In all other states, it shows the last query
that was executed. By default the query text is truncated at 1024
characters; this value can be changed via the parameter
<xref linkend="guc-track-activity-query-size">.
<xref linkend="guc-track-activity-query-size"/>.
</entry>
</row>
<row>
@ -1683,7 +1683,7 @@ SELECT pid, wait_event_type, wait_event FROM pg_stat_activity WHERE wait_event i
<entry>Host name of the connected client, as reported by a
reverse DNS lookup of <structfield>client_addr</structfield>. This field will
only be non-null for IP connections, and only when <xref
linkend="guc-log-hostname"> is enabled.
linkend="guc-log-hostname"/> is enabled.
</entry>
</row>
<row>
@ -1704,7 +1704,7 @@ SELECT pid, wait_event_type, wait_event FROM pg_stat_activity WHERE wait_event i
<entry><structfield>backend_xmin</structfield></entry>
<entry><type>xid</type></entry>
<entry>This standby's <literal>xmin</literal> horizon reported
by <xref linkend="guc-hot-standby-feedback">.</entry>
by <xref linkend="guc-hot-standby-feedback"/>.</entry>
</row>
<row>
<entry><structfield>state</structfield></entry>
@ -2347,7 +2347,7 @@ SELECT pid, wait_event_type, wait_event FROM pg_stat_activity WHERE wait_event i
<entry><type>bigint</type></entry>
<entry>Number of queries canceled due to conflicts with recovery
in this database. (Conflicts occur only on standby servers; see
<xref linkend="pg-stat-database-conflicts-view"> for details.)
<xref linkend="pg-stat-database-conflicts-view"/> for details.)
</entry>
</row>
<row>
@ -2356,7 +2356,7 @@ SELECT pid, wait_event_type, wait_event FROM pg_stat_activity WHERE wait_event i
<entry>Number of temporary files created by queries in this database.
All temporary files are counted, regardless of why the temporary file
was created (e.g., sorting or hashing), and regardless of the
<xref linkend="guc-log-temp-files"> setting.
<xref linkend="guc-log-temp-files"/> setting.
</entry>
</row>
<row>
@ -2365,7 +2365,7 @@ SELECT pid, wait_event_type, wait_event FROM pg_stat_activity WHERE wait_event i
<entry>Total amount of data written to temporary files by queries in
this database. All temporary files are counted, regardless of why
the temporary file was created, and
regardless of the <xref linkend="guc-log-temp-files"> setting.
regardless of the <xref linkend="guc-log-temp-files"/> setting.
</entry>
</row>
<row>
@ -2942,7 +2942,7 @@ SELECT pid, wait_event_type, wait_event FROM pg_stat_activity WHERE wait_event i
<para>
The <structname>pg_stat_user_functions</structname> view will contain
one row for each tracked function, showing statistics about executions of
that function. The <xref linkend="guc-track-functions"> parameter
that function. The <xref linkend="guc-track-functions"/> parameter
controls exactly which functions are tracked.
</para>
@ -2967,7 +2967,7 @@ SELECT pid, wait_event_type, wait_event FROM pg_stat_activity WHERE wait_event i
<para>
Additional functions related to statistics collection are listed in <xref
linkend="monitoring-stats-funcs-table">.
linkend="monitoring-stats-funcs-table"/>.
</para>
<table id="monitoring-stats-funcs-table">
@ -3074,7 +3074,7 @@ SELECT pid, wait_event_type, wait_event FROM pg_stat_activity WHERE wait_event i
Sometimes it may be more convenient to obtain just a subset of this
information. In such cases, an older set of per-backend statistics
access functions can be used; these are shown in <xref
linkend="monitoring-stats-backend-funcs-table">.
linkend="monitoring-stats-backend-funcs-table"/>.
These access functions use a backend ID number, which ranges from one
to the number of currently active backends.
The function <function>pg_stat_get_backend_idset</function> provides a
@ -3162,7 +3162,7 @@ SELECT pg_stat_get_backend_pid(s.backendid) AS pid,
<entry><literal><function>pg_stat_get_backend_wait_event_type(integer)</function></literal></entry>
<entry><type>text</type></entry>
<entry>Wait event type name if backend is currently waiting, otherwise NULL.
See <xref linkend="wait-event-table"> for details.
See <xref linkend="wait-event-table"/> for details.
</entry>
</row>
@ -3170,7 +3170,7 @@ SELECT pg_stat_get_backend_pid(s.backendid) AS pid,
<entry><literal><function>pg_stat_get_backend_wait_event(integer)</function></literal></entry>
<entry><type>text</type></entry>
<entry>Wait event name if backend is currently waiting, otherwise NULL.
See <xref linkend="wait-event-table"> for details.
See <xref linkend="wait-event-table"/> for details.
</entry>
</row>
@ -3230,9 +3230,9 @@ SELECT pg_stat_get_backend_pid(s.backendid) AS pid,
</itemizedlist>
Details of the <structname>pg_locks</structname> view appear in
<xref linkend="view-pg-locks">.
<xref linkend="view-pg-locks"/>.
For more information on locking and managing concurrency with
<productname>PostgreSQL</productname>, refer to <xref linkend="mvcc">.
<productname>PostgreSQL</productname>, refer to <xref linkend="mvcc"/>.
</para>
</sect1>
@ -3296,7 +3296,7 @@ SELECT pg_stat_get_backend_pid(s.backendid) AS pid,
<entry><structfield>phase</structfield></entry>
<entry><type>text</type></entry>
<entry>
Current processing phase of vacuum. See <xref linkend='vacuum-phases'>.
Current processing phase of vacuum. See <xref linkend='vacuum-phases'/>.
</entry>
</row>
<row>
@ -3343,7 +3343,7 @@ SELECT pg_stat_get_backend_pid(s.backendid) AS pid,
<entry>
Number of dead tuples that we can store before needing to perform
an index vacuum cycle, based on
<xref linkend="guc-maintenance-work-mem">.
<xref linkend="guc-maintenance-work-mem"/>.
</entry>
</row>
<row>
@ -3390,7 +3390,7 @@ SELECT pg_stat_get_backend_pid(s.backendid) AS pid,
<command>VACUUM</command> is currently vacuuming the indexes. If a table has
any indexes, this will happen at least once per vacuum, after the heap
has been completely scanned. It may happen multiple times per vacuum
if <xref linkend="guc-maintenance-work-mem"> is insufficient to
if <xref linkend="guc-maintenance-work-mem"/> is insufficient to
store the number of dead tuples found.
</entry>
</row>
@ -3478,7 +3478,7 @@ SELECT pg_stat_get_backend_pid(s.backendid) AS pid,
explicitly tell the configure script to make the probes available
in <productname>PostgreSQL</productname>. To include DTrace support
specify <option>--enable-dtrace</option> to configure. See <xref
linkend="install-procedure"> for further information.
linkend="install-procedure"/> for further information.
</para>
</sect2>
@ -3487,8 +3487,8 @@ SELECT pg_stat_get_backend_pid(s.backendid) AS pid,
<para>
A number of standard probes are provided in the source code,
as shown in <xref linkend="dtrace-probe-point-table">;
<xref linkend="typedefs-table">
as shown in <xref linkend="dtrace-probe-point-table"/>;
<xref linkend="typedefs-table"/>
shows the types used in the probes. More probes can certainly be
added to enhance <productname>PostgreSQL</productname>'s observability.
</para>
@ -3752,7 +3752,7 @@ SELECT pg_stat_get_backend_pid(s.backendid) AS pid,
<entry><literal>(ForkNumber, BlockNumber, Oid, Oid, Oid)</literal></entry>
<entry>Probe that fires when a server process begins to write a dirty
buffer. (If this happens often, it implies that
<xref linkend="guc-shared-buffers"> is too
<xref linkend="guc-shared-buffers"/> is too
small or the background writer control parameters need adjustment.)
arg0 and arg1 contain the fork and block numbers of the page.
arg2, arg3, and arg4 contain the tablespace, database, and relation OIDs
@ -3770,7 +3770,7 @@ SELECT pg_stat_get_backend_pid(s.backendid) AS pid,
<entry>Probe that fires when a server process begins to write a
dirty WAL buffer because no more WAL buffer space is available.
(If this happens often, it implies that
<xref linkend="guc-wal-buffers"> is too small.)</entry>
<xref linkend="guc-wal-buffers"/> is too small.)</entry>
</row>
<row>
<entry><literal>wal-buffer-write-dirty-done</literal></entry>

View File

@ -165,7 +165,7 @@
<primary>transaction isolation level</primary>
</indexterm>
The SQL standard and PostgreSQL-implemented transaction isolation levels
are described in <xref linkend="mvcc-isolevel-table">.
are described in <xref linkend="mvcc-isolevel-table"/>.
</para>
<table tocentry="1" id="mvcc-isolevel-table">
@ -286,7 +286,7 @@
<para>
To set the transaction isolation level of a transaction, use the
command <xref linkend="sql-set-transaction">.
command <xref linkend="sql-set-transaction"/>.
</para>
<important>
@ -296,8 +296,8 @@
made to a sequence (and therefore the counter of a
column declared using <type>serial</type>) are immediately visible
to all other transactions and are not rolled back if the transaction
that made the changes aborts. See <xref linkend="functions-sequence">
and <xref linkend="datatype-serial">.
that made the changes aborts. See <xref linkend="functions-sequence"/>
and <xref linkend="datatype-serial"/>.
</para>
</important>
@ -461,7 +461,7 @@ COMMIT;
even though they are not yet committed.) This is a stronger
guarantee than is required by the <acronym>SQL</acronym> standard
for this isolation level, and prevents all of the phenomena described
in <xref linkend="mvcc-isolevel-table"> except for serialization
in <xref linkend="mvcc-isolevel-table"/> except for serialization
anomalies. As mentioned above, this is
specifically allowed by the standard, which only describes the
<emphasis>minimum</emphasis> protections each isolation level must
@ -748,7 +748,7 @@ ERROR: could not serialize access due to read/write dependencies among transact
<para>
Don't leave connections dangling <quote>idle in transaction</quote>
longer than necessary. The configuration parameter
<xref linkend="guc-idle-in-transaction-session-timeout"> may be used to
<xref linkend="guc-idle-in-transaction-session-timeout"/> may be used to
automatically disconnect lingering sessions.
</para>
</listitem>
@ -765,9 +765,9 @@ ERROR: could not serialize access due to read/write dependencies among transact
locks into a single relation-level predicate lock because the predicate
lock table is short of memory, an increase in the rate of serialization
failures may occur. You can avoid this by increasing
<xref linkend="guc-max-pred-locks-per-transaction">,
<xref linkend="guc-max-pred-locks-per-relation">, and/or
<xref linkend="guc-max-pred-locks-per-page">.
<xref linkend="guc-max-pred-locks-per-transaction"/>,
<xref linkend="guc-max-pred-locks-per-relation"/>, and/or
<xref linkend="guc-max-pred-locks-per-page"/>.
</para>
</listitem>
<listitem>
@ -775,8 +775,8 @@ ERROR: could not serialize access due to read/write dependencies among transact
A sequential scan will always necessitate a relation-level predicate
lock. This can result in an increased rate of serialization failures.
It may be helpful to encourage the use of index scans by reducing
<xref linkend="guc-random-page-cost"> and/or increasing
<xref linkend="guc-cpu-tuple-cost">. Be sure to weigh any decrease
<xref linkend="guc-random-page-cost"/> and/or increasing
<xref linkend="guc-cpu-tuple-cost"/>. Be sure to weigh any decrease
in transaction rollbacks and restarts against any overall change in
query execution time.
</para>
@ -811,7 +811,7 @@ ERROR: could not serialize access due to read/write dependencies among transact
server, use the
<link linkend="view-pg-locks"><structname>pg_locks</structname></link>
system view. For more information on monitoring the status of the lock
manager subsystem, refer to <xref linkend="monitoring">.
manager subsystem, refer to <xref linkend="monitoring"/>.
</para>
<sect2 id="locking-tables">
@ -826,14 +826,14 @@ ERROR: could not serialize access due to read/write dependencies among transact
which they are used automatically by
<productname>PostgreSQL</productname>. You can also acquire any
of these locks explicitly with the command <xref
linkend="sql-lock">.
linkend="sql-lock"/>.
Remember that all of these lock modes are table-level locks,
even if the name contains the word
<quote>row</quote>; the names of the lock modes are historical.
To some extent the names reflect the typical usage of each lock
mode &mdash; but the semantics are all the same. The only real difference
between one lock mode and another is the set of lock modes with
which each conflicts (see <xref linkend="table-lock-compatibility">).
which each conflicts (see <xref linkend="table-lock-compatibility"/>).
Two transactions cannot hold locks of conflicting
modes on the same table at the same time. (However, a transaction
never conflicts with itself. For example, it might acquire
@ -929,7 +929,7 @@ ERROR: could not serialize access due to read/write dependencies among transact
<command>CREATE STATISTICS</command> and
<command>ALTER TABLE VALIDATE</command> and other
<command>ALTER TABLE</command> variants (for full details see
<xref linkend="sql-altertable">).
<xref linkend="sql-altertable"/>).
</para>
</listitem>
</varlistentry>
@ -972,7 +972,7 @@ ERROR: could not serialize access due to read/write dependencies among transact
<para>
Acquired by <command>CREATE COLLATION</command>,
<command>CREATE TRIGGER</command>, and many forms of
<command>ALTER TABLE</command> (see <xref linkend="sql-altertable">).
<command>ALTER TABLE</command> (see <xref linkend="sql-altertable"/>).
</para>
</listitem>
</varlistentry>
@ -1053,9 +1053,9 @@ ERROR: could not serialize access due to read/write dependencies among transact
<table tocentry="1" id="table-lock-compatibility">
<title> Conflicting Lock Modes</title>
<tgroup cols="9">
<colspec colnum="2" colname="lockst">
<colspec colnum="9" colname="lockend">
<spanspec namest="lockst" nameend="lockend" spanname="lockreq">
<colspec colnum="2" colname="lockst"/>
<colspec colnum="9" colname="lockend"/>
<spanspec namest="lockst" nameend="lockend" spanname="lockreq"/>
<thead>
<row>
<entry morerows="1">Requested Lock Mode</entry>
@ -1173,7 +1173,7 @@ ERROR: could not serialize access due to read/write dependencies among transact
In addition to table-level locks, there are row-level locks, which
are listed as below with the contexts in which they are used
automatically by <productname>PostgreSQL</productname>. See
<xref linkend="row-lock-compatibility"> for a complete table of
<xref linkend="row-lock-compatibility"/> for a complete table of
row-level lock conflicts. Note that a transaction can hold
conflicting locks on the same row, even in different subtransactions;
but other than that, two transactions can never hold conflicting locks
@ -1208,7 +1208,7 @@ ERROR: could not serialize access due to read/write dependencies among transact
<literal>SERIALIZABLE</literal> transaction,
however, an error will be thrown if a row to be locked has changed
since the transaction started. For further discussion see
<xref linkend="applevel-consistency">.
<xref linkend="applevel-consistency"/>.
</para>
<para>
The <literal>FOR UPDATE</literal> lock mode
@ -1286,9 +1286,9 @@ ERROR: could not serialize access due to read/write dependencies among transact
<table tocentry="1" id="row-lock-compatibility">
<title>Conflicting Row-level Locks</title>
<tgroup cols="5">
<colspec colnum="2" colname="lockst">
<colspec colnum="5" colname="lockend">
<spanspec namest="lockst" nameend="lockend" spanname="lockreq">
<colspec colnum="2" colname="lockst"/>
<colspec colnum="5" colname="lockend"/>
<spanspec namest="lockst" nameend="lockend" spanname="lockreq"/>
<thead>
<row>
<entry morerows="1">Requested Lock Mode</entry>
@ -1495,8 +1495,8 @@ UPDATE accounts SET balance = balance - 100.00 WHERE acctnum = 22222;
<para>
Both advisory locks and regular locks are stored in a shared memory
pool whose size is defined by the configuration variables
<xref linkend="guc-max-locks-per-transaction"> and
<xref linkend="guc-max-connections">.
<xref linkend="guc-max-locks-per-transaction"/> and
<xref linkend="guc-max-connections"/>.
Care must be taken not to exhaust this
memory or the server will be unable to grant any locks at all.
This imposes an upper limit on the number of advisory locks
@ -1529,7 +1529,7 @@ SELECT pg_advisory_lock(q.id) FROM
<para>
The functions provided to manipulate advisory locks are described in
<xref linkend="functions-advisory-locks">.
<xref linkend="functions-advisory-locks"/>.
</para>
</sect2>
@ -1565,7 +1565,7 @@ SELECT pg_advisory_lock(q.id) FROM
</para>
<para>
As mentioned in <xref linkend="xact-serializable">, Serializable
As mentioned in <xref linkend="xact-serializable"/>, Serializable
transactions are just Repeatable Read transactions which add
nonblocking monitoring for dangerous patterns of read/write conflicts.
When a pattern is detected which could cause a cycle in the apparent
@ -1598,13 +1598,13 @@ SELECT pg_advisory_lock(q.id) FROM
</para>
<para>
See <xref linkend="xact-serializable"> for performance suggestions.
See <xref linkend="xact-serializable"/> for performance suggestions.
</para>
<warning>
<para>
This level of integrity protection using Serializable transactions
does not yet extend to hot standby mode (<xref linkend="hot-standby">).
does not yet extend to hot standby mode (<xref linkend="hot-standby"/>).
Because of that, those using hot standby may want to use Repeatable
Read and explicit locking on the master.
</para>
@ -1687,8 +1687,8 @@ SELECT pg_advisory_lock(q.id) FROM
<title>Caveats</title>
<para>
Some DDL commands, currently only <xref linkend="sql-truncate"> and the
table-rewriting forms of <xref linkend="sql-altertable">, are not
Some DDL commands, currently only <xref linkend="sql-truncate"/> and the
table-rewriting forms of <xref linkend="sql-altertable"/>, are not
MVCC-safe. This means that after the truncation or rewrite commits, the
table will appear empty to concurrent transactions, if they are using a
snapshot taken before the DDL command committed. This will only be an
@ -1705,7 +1705,7 @@ SELECT pg_advisory_lock(q.id) FROM
<para>
Support for the Serializable transaction isolation level has not yet
been added to Hot Standby replication targets (described in
<xref linkend="hot-standby">). The strictest isolation level currently
<xref linkend="hot-standby"/>). The strictest isolation level currently
supported in hot standby mode is Repeatable Read. While performing all
permanent database writes within Serializable transactions on the
master will ensure that all standbys will eventually reach a consistent

View File

@ -272,7 +272,7 @@ msgstr "Die Datei %2$s hat %1$u Zeichen."
open file %s</literal>) should probably not start with a
capital letter (if your language distinguishes letter case) or
end with a period (if your language uses punctuation marks).
It might help to read <xref linkend="error-style-guide">.
It might help to read <xref linkend="error-style-guide"/>.
</para>
</listitem>

View File

@ -30,7 +30,7 @@
<application>oid2name</application> is a utility program that helps administrators to
examine the file structure used by PostgreSQL. To make use of it, you need
to be familiar with the database file structure, which is described in
<xref linkend="storage">.
<xref linkend="storage"/>.
</para>
<note>

View File

@ -64,10 +64,10 @@ EXPLAIN SELECT * FROM pgbench_accounts WHERE filler LIKE '%x%';
worker processes</link> equal to the number
of workers chosen by the planner. The number of background workers that
the planner will consider using is limited to at most
<xref linkend="guc-max-parallel-workers-per-gather">. The total number
<xref linkend="guc-max-parallel-workers-per-gather"/>. The total number
of background workers that can exist at any one time is limited by both
<xref linkend="guc-max-worker-processes"> and
<xref linkend="guc-max-parallel-workers">. Therefore, it is possible for a
<xref linkend="guc-max-worker-processes"/> and
<xref linkend="guc-max-parallel-workers"/>. Therefore, it is possible for a
parallel query to run with fewer workers than planned, or even with
no workers at all. The optimal plan may depend on the number of workers
that are available, so this can result in poor query performance. If this
@ -118,7 +118,7 @@ EXPLAIN SELECT * FROM pgbench_accounts WHERE filler LIKE '%x%';
<itemizedlist>
<listitem>
<para>
<xref linkend="guc-max-parallel-workers-per-gather"> must be set to a
<xref linkend="guc-max-parallel-workers-per-gather"/> must be set to a
value which is greater than zero. This is a special case of the more
general principle that no more workers should be used than the number
configured via <varname>max_parallel_workers_per_gather</varname>.
@ -127,7 +127,7 @@ EXPLAIN SELECT * FROM pgbench_accounts WHERE filler LIKE '%x%';
<listitem>
<para>
<xref linkend="guc-dynamic-shared-memory-type"> must be set to a
<xref linkend="guc-dynamic-shared-memory-type"/> must be set to a
value other than <literal>none</literal>. Parallel query requires dynamic
shared memory in order to pass data between cooperating processes.
</para>
@ -178,7 +178,7 @@ EXPLAIN SELECT * FROM pgbench_accounts WHERE filler LIKE '%x%';
Most system-defined functions are <literal>PARALLEL SAFE</literal>,
but user-defined functions are marked <literal>PARALLEL
UNSAFE</literal> by default. See the discussion of
<xref linkend="parallel-safety">.
<xref linkend="parallel-safety"/>.
</para>
</listitem>
@ -215,7 +215,7 @@ EXPLAIN SELECT * FROM pgbench_accounts WHERE filler LIKE '%x%';
<para>
No background workers can be obtained because of the limitation that
the total number of background workers cannot exceed
<xref linkend="guc-max-worker-processes">.
<xref linkend="guc-max-worker-processes"/>.
</para>
</listitem>
@ -223,7 +223,7 @@ EXPLAIN SELECT * FROM pgbench_accounts WHERE filler LIKE '%x%';
<para>
No background workers can be obtained because of the limitation that
the total number of background workers launched for purposes of
parallel query cannot exceed <xref linkend="guc-max-parallel-workers">.
parallel query cannot exceed <xref linkend="guc-max-parallel-workers"/>.
</para>
</listitem>
@ -236,7 +236,7 @@ EXPLAIN SELECT * FROM pgbench_accounts WHERE filler LIKE '%x%';
send such a message, this can only occur when using a client that
does not rely on libpq. If this is a frequent
occurrence, it may be a good idea to set
<xref linkend="guc-max-parallel-workers-per-gather"> to zero in
<xref linkend="guc-max-parallel-workers-per-gather"/> to zero in
sessions where it is likely, so as to avoid generating query plans
that may be suboptimal when run serially.
</para>
@ -374,7 +374,7 @@ EXPLAIN SELECT * FROM pgbench_accounts WHERE filler LIKE '%x%';
must be <link linkend="parallel-safety">safe</link> for parallelism and must
have a combine function. If the aggregate has a transition state of type
<literal>internal</literal>, it must have serialization and deserialization
functions. See <xref linkend="sql-createaggregate"> for more details.
functions. See <xref linkend="sql-createaggregate"/> for more details.
Parallel aggregation is not supported if any aggregate function call
contains <literal>DISTINCT</literal> or <literal>ORDER BY</literal> clause and is also
not supported for ordered set aggregates or when the query involves
@ -389,15 +389,15 @@ EXPLAIN SELECT * FROM pgbench_accounts WHERE filler LIKE '%x%';
<para>
If a query that is expected to do so does not produce a parallel plan,
you can try reducing <xref linkend="guc-parallel-setup-cost"> or
<xref linkend="guc-parallel-tuple-cost">. Of course, this plan may turn
you can try reducing <xref linkend="guc-parallel-setup-cost"/> or
<xref linkend="guc-parallel-tuple-cost"/>. Of course, this plan may turn
out to be slower than the serial plan which the planner preferred, but
this will not always be the case. If you don't get a parallel
plan even with very small values of these settings (e.g. after setting
them both to zero), there may be some reason why the query planner is
unable to generate a parallel plan for your query. See
<xref linkend="when-can-parallel-query-be-used"> and
<xref linkend="parallel-safety"> for information on why this may be
<xref linkend="when-can-parallel-query-be-used"/> and
<xref linkend="parallel-safety"/> for information on why this may be
the case.
</para>
@ -473,11 +473,11 @@ EXPLAIN SELECT * FROM pgbench_accounts WHERE filler LIKE '%x%';
where it conceivably be done, we do not try, since this would be expensive
and error-prone. Instead, all user-defined functions are assumed to
be parallel unsafe unless otherwise marked. When using
<xref linkend="sql-createfunction"> or
<xref linkend="sql-alterfunction">, markings can be set by specifying
<xref linkend="sql-createfunction"/> or
<xref linkend="sql-alterfunction"/>, markings can be set by specifying
<literal>PARALLEL SAFE</literal>, <literal>PARALLEL RESTRICTED</literal>, or
<literal>PARALLEL UNSAFE</literal> as appropriate. When using
<xref linkend="sql-createaggregate">, the
<xref linkend="sql-createaggregate"/>, the
<literal>PARALLEL</literal> option can be specified with <literal>SAFE</literal>,
<literal>RESTRICTED</literal>, or <literal>UNSAFE</literal> as the corresponding value.
</para>

View File

@ -10,15 +10,15 @@
<para>
The <filename>passwordcheck</filename> module checks users' passwords
whenever they are set with
<xref linkend="sql-createrole"> or
<xref linkend="sql-alterrole">.
<xref linkend="sql-createrole"/> or
<xref linkend="sql-alterrole"/>.
If a password is considered too weak, it will be rejected and
the command will terminate with an error.
</para>
<para>
To enable this module, add <literal>'$libdir/passwordcheck'</literal>
to <xref linkend="guc-shared-preload-libraries"> in
to <xref linkend="guc-shared-preload-libraries"/> in
<filename>postgresql.conf</filename>, then restart the server.
</para>
@ -49,7 +49,7 @@
For this reason, <filename>passwordcheck</filename> is not
recommended if your security requirements are high.
It is more secure to use an external authentication method such as GSSAPI
(see <xref linkend="client-authentication">) than to rely on
(see <xref linkend="client-authentication"/>) than to rely on
passwords within the database.
</para>
<para>

View File

@ -31,7 +31,7 @@
plan to match the query structure and the properties of the data
is absolutely critical for good performance, so the system includes
a complex <firstterm>planner</firstterm> that tries to choose good plans.
You can use the <xref linkend="sql-explain"> command
You can use the <xref linkend="sql-explain"/> command
to see what query plan the planner creates for any query.
Plan-reading is an art that requires some experience to master,
but this section attempts to cover the basics.
@ -132,9 +132,9 @@ EXPLAIN SELECT * FROM tenk1;
<para>
The costs are measured in arbitrary units determined by the planner's
cost parameters (see <xref linkend="runtime-config-query-constants">).
cost parameters (see <xref linkend="runtime-config-query-constants"/>).
Traditional practice is to measure the costs in units of disk page
fetches; that is, <xref linkend="guc-seq-page-cost"> is conventionally
fetches; that is, <xref linkend="guc-seq-page-cost"/> is conventionally
set to <literal>1.0</literal> and the other cost parameters are set relative
to that. The examples in this section are run with the default cost
parameters.
@ -182,8 +182,8 @@ SELECT relpages, reltuples FROM pg_class WHERE relname = 'tenk1';
you will find that <classname>tenk1</classname> has 358 disk
pages and 10000 rows. The estimated cost is computed as (disk pages read *
<xref linkend="guc-seq-page-cost">) + (rows scanned *
<xref linkend="guc-cpu-tuple-cost">). By default,
<xref linkend="guc-seq-page-cost"/>) + (rows scanned *
<xref linkend="guc-cpu-tuple-cost"/>). By default,
<varname>seq_page_cost</varname> is 1.0 and <varname>cpu_tuple_cost</varname> is 0.01,
so the estimated cost is (358 * 1.0) + (10000 * 0.01) = 458.
</para>
@ -209,7 +209,7 @@ EXPLAIN SELECT * FROM tenk1 WHERE unique1 &lt; 7000;
<literal>WHERE</literal> clause.
However, the scan will still have to visit all 10000 rows, so the cost
hasn't decreased; in fact it has gone up a bit (by 10000 * <xref
linkend="guc-cpu-operator-cost">, to be exact) to reflect the extra CPU
linkend="guc-cpu-operator-cost"/>, to be exact) to reflect the extra CPU
time spent checking the <literal>WHERE</literal> condition.
</para>
@ -508,9 +508,9 @@ WHERE t1.unique1 &lt; 100 AND t1.unique2 = t2.unique2;
<para>
One way to look at variant plans is to force the planner to disregard
whatever strategy it thought was the cheapest, using the enable/disable
flags described in <xref linkend="runtime-config-query-enable">.
flags described in <xref linkend="runtime-config-query-enable"/>.
(This is a crude tool, but useful. See
also <xref linkend="explicit-joins">.)
also <xref linkend="explicit-joins"/>.)
For example, if we're unconvinced that sequential-scan-and-sort is the best way to
deal with table <literal>onek</literal> in the previous example, we could try
@ -828,7 +828,7 @@ EXPLAIN UPDATE parent SET f2 = f2 + 1 WHERE f1 = 101;
Second, the measurement overhead added by <command>EXPLAIN
ANALYZE</command> can be significant, especially on machines with slow
<function>gettimeofday()</function> operating-system calls. You can use the
<xref linkend="pgtesttiming"> tool to measure the overhead of timing
<xref linkend="pgtesttiming"/> tool to measure the overhead of timing
on your system.
</para>
@ -1032,7 +1032,7 @@ WHERE tablename = 'road';
arrays for each column, can be set on a
column-by-column basis using the <command>ALTER TABLE SET STATISTICS</command>
command, or globally by setting the
<xref linkend="guc-default-statistics-target"> configuration variable.
<xref linkend="guc-default-statistics-target"/> configuration variable.
The default limit is presently 100 entries. Raising the limit
might allow more accurate planner estimates to be made, particularly for
columns with irregular data distributions, at the price of consuming
@ -1043,7 +1043,7 @@ WHERE tablename = 'road';
<para>
Further details about the planner's use of statistics can be found in
<xref linkend="planner-stats-details">.
<xref linkend="planner-stats-details"/>.
</para>
</sect2>
@ -1087,7 +1087,7 @@ WHERE tablename = 'road';
<para>
Statistics objects are created using
<xref linkend="sql-createstatistics">, which see for more details.
<xref linkend="sql-createstatistics"/>, which see for more details.
Creation of such an object merely creates a catalog entry expressing
interest in the statistics. Actual data collection is performed
by <command>ANALYZE</command> (either a manual command, or background
@ -1323,7 +1323,7 @@ SELECT * FROM a, b, c WHERE a.id = b.id AND b.ref = c.id;
<productname>PostgreSQL</productname> planner will switch from exhaustive
search to a <firstterm>genetic</firstterm> probabilistic search
through a limited number of possibilities. (The switch-over threshold is
set by the <xref linkend="guc-geqo-threshold"> run-time
set by the <xref linkend="guc-geqo-threshold"/> run-time
parameter.)
The genetic search takes less time, but it won't
necessarily find the best possible plan.
@ -1379,7 +1379,7 @@ SELECT * FROM a JOIN (b JOIN c ON (b.ref = c.id)) ON (a.id = b.id);
<para>
To force the planner to follow the join order laid out by explicit
<literal>JOIN</literal>s,
set the <xref linkend="guc-join-collapse-limit"> run-time parameter to 1.
set the <xref linkend="guc-join-collapse-limit"/> run-time parameter to 1.
(Other possible values are discussed below.)
</para>
@ -1436,8 +1436,8 @@ SELECT * FROM x, y, a, b, c WHERE something AND somethingelse;
</para>
<para>
<xref linkend="guc-from-collapse-limit"> and <xref
linkend="guc-join-collapse-limit">
<xref linkend="guc-from-collapse-limit"/> and <xref
linkend="guc-join-collapse-limit"/>
are similarly named because they do almost the same thing: one controls
when the planner will <quote>flatten out</quote> subqueries, and the
other controls when it will flatten out explicit joins. Typically
@ -1488,7 +1488,7 @@ SELECT * FROM x, y, a, b, c WHERE something AND somethingelse;
<title>Use <command>COPY</command></title>
<para>
Use <xref linkend="sql-copy"> to load
Use <xref linkend="sql-copy"/> to load
all the rows in one command, instead of using a series of
<command>INSERT</command> commands. The <command>COPY</command>
command is optimized for loading large numbers of rows; it is less
@ -1500,7 +1500,7 @@ SELECT * FROM x, y, a, b, c WHERE something AND somethingelse;
<para>
If you cannot use <command>COPY</command>, it might help to use <xref
linkend="sql-prepare"> to create a
linkend="sql-prepare"/> to create a
prepared <command>INSERT</command> statement, and then use
<command>EXECUTE</command> as many times as required. This avoids
some of the overhead of repeatedly parsing and planning
@ -1523,7 +1523,7 @@ SELECT * FROM x, y, a, b, c WHERE something AND somethingelse;
needs to be written, because in case of an error, the files
containing the newly loaded data will be removed anyway.
However, this consideration only applies when
<xref linkend="guc-wal-level"> is <literal>minimal</literal> as all commands
<xref linkend="guc-wal-level"/> is <literal>minimal</literal> as all commands
must write WAL otherwise.
</para>
@ -1581,7 +1581,7 @@ SELECT * FROM x, y, a, b, c WHERE something AND somethingelse;
<title>Increase <varname>maintenance_work_mem</varname></title>
<para>
Temporarily increasing the <xref linkend="guc-maintenance-work-mem">
Temporarily increasing the <xref linkend="guc-maintenance-work-mem"/>
configuration variable when loading large amounts of data can
lead to improved performance. This will help to speed up <command>CREATE
INDEX</command> commands and <command>ALTER TABLE ADD FOREIGN KEY</command> commands.
@ -1594,7 +1594,7 @@ SELECT * FROM x, y, a, b, c WHERE something AND somethingelse;
<title>Increase <varname>max_wal_size</varname></title>
<para>
Temporarily increasing the <xref linkend="guc-max-wal-size">
Temporarily increasing the <xref linkend="guc-max-wal-size"/>
configuration variable can also
make large data loads faster. This is because loading a large
amount of data into <productname>PostgreSQL</productname> will
@ -1617,9 +1617,9 @@ SELECT * FROM x, y, a, b, c WHERE something AND somethingelse;
new base backup after the load has completed than to process a large
amount of incremental WAL data. To prevent incremental WAL logging
while loading, disable archiving and streaming replication, by setting
<xref linkend="guc-wal-level"> to <literal>minimal</literal>,
<xref linkend="guc-archive-mode"> to <literal>off</literal>, and
<xref linkend="guc-max-wal-senders"> to zero.
<xref linkend="guc-wal-level"/> to <literal>minimal</literal>,
<xref linkend="guc-archive-mode"/> to <literal>off</literal>, and
<xref linkend="guc-max-wal-senders"/> to zero.
But note that changing these settings requires a server restart.
</para>
@ -1668,7 +1668,7 @@ SELECT * FROM x, y, a, b, c WHERE something AND somethingelse;
<para>
Whenever you have significantly altered the distribution of data
within a table, running <xref linkend="sql-analyze"> is strongly recommended. This
within a table, running <xref linkend="sql-analyze"/> is strongly recommended. This
includes bulk loading large amounts of data into the table. Running
<command>ANALYZE</command> (or <command>VACUUM ANALYZE</command>)
ensures that the planner has up-to-date statistics about the
@ -1677,8 +1677,8 @@ SELECT * FROM x, y, a, b, c WHERE something AND somethingelse;
performance on any tables with inaccurate or nonexistent
statistics. Note that if the autovacuum daemon is enabled, it might
run <command>ANALYZE</command> automatically; see
<xref linkend="vacuum-for-statistics">
and <xref linkend="autovacuum"> for more information.
<xref linkend="vacuum-for-statistics"/>
and <xref linkend="autovacuum"/> for more information.
</para>
</sect2>
@ -1779,8 +1779,8 @@ SELECT * FROM x, y, a, b, c WHERE something AND somethingelse;
<varname>maintenance_work_mem</varname>; rather, you'd do that while
manually recreating indexes and foreign keys afterwards.
And don't forget to <command>ANALYZE</command> when you're done; see
<xref linkend="vacuum-for-statistics">
and <xref linkend="autovacuum"> for more information.
<xref linkend="vacuum-for-statistics"/>
and <xref linkend="autovacuum"/> for more information.
</para>
</sect2>
</sect1>
@ -1816,14 +1816,14 @@ SELECT * FROM x, y, a, b, c WHERE something AND somethingelse;
<listitem>
<para>
Turn off <xref linkend="guc-fsync">; there is no need to flush
Turn off <xref linkend="guc-fsync"/>; there is no need to flush
data to disk.
</para>
</listitem>
<listitem>
<para>
Turn off <xref linkend="guc-synchronous-commit">; there might be no
Turn off <xref linkend="guc-synchronous-commit"/>; there might be no
need to force <acronym>WAL</acronym> writes to disk on every
commit. This setting does risk transaction loss (though not data
corruption) in case of a crash of the <emphasis>database</emphasis>.
@ -1832,15 +1832,15 @@ SELECT * FROM x, y, a, b, c WHERE something AND somethingelse;
<listitem>
<para>
Turn off <xref linkend="guc-full-page-writes">; there is no need
Turn off <xref linkend="guc-full-page-writes"/>; there is no need
to guard against partial page writes.
</para>
</listitem>
<listitem>
<para>
Increase <xref linkend="guc-max-wal-size"> and <xref
linkend="guc-checkpoint-timeout">; this reduces the frequency
Increase <xref linkend="guc-max-wal-size"/> and <xref
linkend="guc-checkpoint-timeout"/>; this reduces the frequency
of checkpoints, but increases the storage requirements of
<filename>/pg_wal</filename>.
</para>

View File

@ -33,7 +33,7 @@
<title>The <structname>pg_buffercache</structname> View</title>
<para>
The definitions of the columns exposed by the view are shown in <xref linkend="pgbuffercache-columns">.
The definitions of the columns exposed by the view are shown in <xref linkend="pgbuffercache-columns"/>.
</para>
<table id="pgbuffercache-columns">

View File

@ -40,7 +40,7 @@ digest(data bytea, type text) returns bytea
<literal>sha384</literal> and <literal>sha512</literal>.
If <filename>pgcrypto</filename> was built with
OpenSSL, more algorithms are available, as detailed in
<xref linkend="pgcrypto-with-without-openssl">.
<xref linkend="pgcrypto-with-without-openssl"/>.
</para>
<para>
@ -129,7 +129,7 @@ hmac(data bytea, key text, type text) returns bytea
</orderedlist>
<para>
<xref linkend="pgcrypto-crypt-algorithms"> lists the algorithms
<xref linkend="pgcrypto-crypt-algorithms"/> lists the algorithms
supported by the <function>crypt()</function> function.
</para>
@ -247,7 +247,7 @@ gen_salt(type text [, iter_count integer ]) returns text
&mdash; which is somewhat impractical. If the <parameter>iter_count</parameter>
parameter is omitted, the default iteration count is used.
Allowed values for <parameter>iter_count</parameter> depend on the algorithm and
are shown in <xref linkend="pgcrypto-icfc-table">.
are shown in <xref linkend="pgcrypto-icfc-table"/>.
</para>
<table id="pgcrypto-icfc-table">
@ -292,7 +292,7 @@ gen_salt(type text [, iter_count integer ]) returns text
</para>
<para>
<xref linkend="pgcrypto-hash-speed-table"> gives an overview of the relative slowness
<xref linkend="pgcrypto-hash-speed-table"/> gives an overview of the relative slowness
of different hashing algorithms.
The table shows how much time it would take to try all
combinations of characters in an 8-character password, assuming

View File

@ -13,7 +13,7 @@
or the <productname>PostgreSQL</productname> buffer cache. Prewarming
can be performed manually using the <filename>pg_prewarm</filename> function,
or can be performed automatically by including <literal>pg_prewarm</literal> in
<xref linkend="guc-shared-preload-libraries">. In the latter case, the
<xref linkend="guc-shared-preload-libraries"/>. In the latter case, the
system will run a background worker which periodically records the contents
of shared buffers in a file called <filename>autoprewarm.blocks</filename> and
will, using 2 background workers, reload those same blocks after a restart.

View File

@ -33,7 +33,7 @@ pgrowlocks(text) returns setof record
<para>
The parameter is the name of a table. The result is a set of records,
with one row for each locked row within the table. The output columns
are shown in <xref linkend="pgrowlocks-columns">.
are shown in <xref linkend="pgrowlocks-columns"/>.
</para>
<table id="pgrowlocks-columns">

View File

@ -41,7 +41,7 @@
<varname>restore_command</varname>, which is needed to turn a standard
archive recovery into a warm standby operation. Other
configuration is required as well, all of which is described in the main
server manual (see <xref linkend="warm-standby">).
server manual (see <xref linkend="warm-standby"/>).
</para>
<para>
@ -180,7 +180,7 @@ restore_command = 'pg_standby <replaceable>archiveDir</replaceable> %f %p %r'
Set the number of seconds (up to 60, default 5) to sleep between
tests to see if the WAL file to be restored is available in
the archive yet. The default setting is not necessarily
recommended; consult <xref linkend="warm-standby"> for discussion.
recommended; consult <xref linkend="warm-standby"/> for discussion.
</para>
</listitem>
</varlistentry>
@ -216,7 +216,7 @@ restore_command = 'pg_standby <replaceable>archiveDir</replaceable> %f %p %r'
after which a fast failover will be performed.
A setting of zero (the default) means wait forever.
The default setting is not necessarily recommended;
consult <xref linkend="warm-standby"> for discussion.
consult <xref linkend="warm-standby"/> for discussion.
</para>
</listitem>
</varlistentry>
@ -388,7 +388,7 @@ recovery_end_command = 'del C:\pgsql.trigger.5442'
<title>See Also</title>
<simplelist type="inline">
<member><xref linkend="pgarchivecleanup"></member>
<member><xref linkend="pgarchivecleanup"/></member>
</simplelist>
</refsect1>
</refentry>

View File

@ -14,7 +14,7 @@
<para>
The module must be loaded by adding <literal>pg_stat_statements</literal> to
<xref linkend="guc-shared-preload-libraries"> in
<xref linkend="guc-shared-preload-libraries"/> in
<filename>postgresql.conf</filename>, because it requires additional shared memory.
This means that a server restart is needed to add or remove the module.
</para>
@ -38,7 +38,7 @@
contains one row for each distinct database ID, user ID and query
ID (up to the maximum number of distinct statements that the module
can track). The columns of the view are shown in
<xref linkend="pgstatstatements-columns">.
<xref linkend="pgstatstatements-columns"/>.
</para>
<table id="pgstatstatements-columns">
@ -207,7 +207,7 @@
<entry></entry>
<entry>
Total time the statement spent reading blocks, in milliseconds
(if <xref linkend="guc-track-io-timing"> is enabled, otherwise zero)
(if <xref linkend="guc-track-io-timing"/> is enabled, otherwise zero)
</entry>
</row>
@ -217,7 +217,7 @@
<entry></entry>
<entry>
Total time the statement spent writing blocks, in milliseconds
(if <xref linkend="guc-track-io-timing"> is enabled, otherwise zero)
(if <xref linkend="guc-track-io-timing"/> is enabled, otherwise zero)
</entry>
</row>

View File

@ -55,7 +55,7 @@ dead_tuple_percent | 0.69
free_space | 8932
free_percent | 1.95
</programlisting>
The output columns are described in <xref linkend="pgstattuple-columns">.
The output columns are described in <xref linkend="pgstattuple-columns"/>.
</para>
<table id="pgstattuple-columns">
@ -509,7 +509,7 @@ dead_tuple_percent | 0
approx_free_space | 11996
approx_free_percent | 2.09
</programlisting>
The output columns are described in <xref linkend="pgstatapprox-columns">.
The output columns are described in <xref linkend="pgstatapprox-columns"/>.
</para>
<para>

View File

@ -58,8 +58,8 @@
<para>
The functions provided by the <filename>pg_trgm</filename> module
are shown in <xref linkend="pgtrgm-func-table">, the operators
in <xref linkend="pgtrgm-op-table">.
are shown in <xref linkend="pgtrgm-func-table"/>, the operators
in <xref linkend="pgtrgm-op-table"/>.
</para>
<table id="pgtrgm-func-table">

View File

@ -5,7 +5,7 @@
<para>
This chapter builds on the material covered in <xref
linkend="using-explain"> and <xref linkend="planner-stats"> to show some
linkend="using-explain"/> and <xref linkend="planner-stats"/> to show some
additional details about how the planner uses the
system statistics to estimate the number of rows each part of a query might
return. This is a significant part of the planning process,
@ -49,7 +49,7 @@ EXPLAIN SELECT * FROM tenk1;
</programlisting>
How the planner determines the cardinality of <structname>tenk1</structname>
is covered in <xref linkend="planner-stats">, but is repeated here for
is covered in <xref linkend="planner-stats"/>, but is repeated here for
completeness. The number of pages and rows is looked up in
<structname>pg_class</structname>:
@ -468,7 +468,7 @@ INSERT INTO t SELECT i % 100, i % 100 FROM generate_series(1, 10000) s(i);
ANALYZE t;
</programlisting>
As explained in <xref linkend="planner-stats">, the planner can determine
As explained in <xref linkend="planner-stats"/>, the planner can determine
cardinality of <structname>t</structname> using the number of pages and
rows obtained from <structname>pg_class</structname>:

View File

@ -29,7 +29,7 @@
special pseudo-type identifies the function as a call handler and
prevents it from being called directly in SQL commands.
For more details on C language calling conventions and dynamic loading,
see <xref linkend="xfunc-c">.
see <xref linkend="xfunc-c"/>.
</para>
<para>
@ -144,7 +144,7 @@ plsample_call_handler(PG_FUNCTION_ARGS)
<para>
After having compiled the handler function into a loadable module
(see <xref linkend="dfunc">), the following commands then
(see <xref linkend="dfunc"/>), the following commands then
register the sample procedural language:
<programlisting>
CREATE FUNCTION plsample_call_handler() RETURNS language_handler
@ -162,9 +162,9 @@ CREATE LANGUAGE plsample
are a <firstterm>validator</firstterm> and an
<firstterm>inline handler</firstterm>. A validator can be provided
to allow language-specific checking to be done during
<xref linkend="sql-createfunction">.
<xref linkend="sql-createfunction"/>.
An inline handler can be provided to allow the language to support
anonymous code blocks executed via the <xref linkend="sql-do"> command.
anonymous code blocks executed via the <xref linkend="sql-do"/> command.
</para>
<para>
@ -191,7 +191,7 @@ CREATE LANGUAGE plsample
<para>
Validator functions should typically honor the <xref
linkend="guc-check-function-bodies"> parameter: if it is turned off then
linkend="guc-check-function-bodies"/> parameter: if it is turned off then
any expensive or context-sensitive checking should be skipped. If the
language provides for code execution at compilation time, the validator
must suppress checks that would induce such execution. In particular,
@ -230,7 +230,7 @@ CREATE LANGUAGE plsample
as well as the <command>CREATE LANGUAGE</command> command itself, into
an <firstterm>extension</firstterm> so that a simple <command>CREATE EXTENSION</command>
command is sufficient to install the language. See
<xref linkend="extend-extensions"> for information about writing
<xref linkend="extend-extensions"/> for information about writing
extensions.
</para>
@ -238,7 +238,7 @@ CREATE LANGUAGE plsample
The procedural languages included in the standard distribution
are good references when trying to write your own language handler.
Look into the <filename>src/pl</filename> subdirectory of the source tree.
The <xref linkend="sql-createlanguage">
The <xref linkend="sql-createlanguage"/>
reference page also has some useful details.
</para>

View File

@ -41,7 +41,7 @@
<para>
Users of source packages must specially enable the build of
PL/Perl during the installation process. (Refer to <xref
linkend="installation"> for more information.) Users of
linkend="installation"/> for more information.) Users of
binary packages might find PL/Perl in a separate subpackage.
</para>
</note>
@ -51,7 +51,7 @@
<para>
To create a function in the PL/Perl language, use the standard
<xref linkend="sql-createfunction">
<xref linkend="sql-createfunction"/>
syntax:
<programlisting>
@ -69,7 +69,7 @@ $$ LANGUAGE plperl;
<para>
PL/Perl also supports anonymous code blocks called with the
<xref linkend="sql-do"> statement:
<xref linkend="sql-do"/> statement:
<programlisting>
DO $$
@ -99,11 +99,11 @@ $$ LANGUAGE plperl;
The syntax of the <command>CREATE FUNCTION</command> command requires
the function body to be written as a string constant. It is usually
most convenient to use dollar quoting (see <xref
linkend="sql-syntax-dollar-quoting">) for the string constant.
linkend="sql-syntax-dollar-quoting"/>) for the string constant.
If you choose to use escape string syntax <literal>E''</literal>,
you must double any single quote marks (<literal>'</literal>) and backslashes
(<literal>\</literal>) used in the body of the function
(see <xref linkend="sql-syntax-strings">).
(see <xref linkend="sql-syntax-strings"/>).
</para>
<para>
@ -686,9 +686,9 @@ SELECT release_hosts_query();
priority levels.
Whether messages of a particular priority are reported to the client,
written to the server log, or both is controlled by the
<xref linkend="guc-log-min-messages"> and
<xref linkend="guc-client-min-messages"> configuration
variables. See <xref linkend="runtime-config"> for more
<xref linkend="guc-log-min-messages"/> and
<xref linkend="guc-client-min-messages"/> configuration
variables. See <xref linkend="runtime-config"/> for more
information.
</para>
</listitem>
@ -792,7 +792,7 @@ SELECT release_hosts_query();
<listitem>
<para>
Returns the contents of the referenced array as a string in array literal format
(see <xref linkend="arrays-input">).
(see <xref linkend="arrays-input"/>).
Returns the argument value unaltered if it's not a reference to an array.
The delimiter used between elements of the array literal defaults to "<literal>, </literal>"
if a delimiter is not specified or is undef.
@ -828,7 +828,7 @@ SELECT release_hosts_query();
<listitem>
<para>
Returns the contents of the referenced array as a string in array constructor format
(see <xref linkend="sql-syntax-array-constructors">).
(see <xref linkend="sql-syntax-array-constructors"/>).
Individual values are quoted using <function>quote_nullable</function>.
Returns the argument value, quoted using <function>quote_nullable</function>,
if it's not a reference to an array.
@ -1336,7 +1336,7 @@ DO 'elog(WARNING, join ", ", sort keys %INC)' LANGUAGE plperl;
</para>
<para>
Initialization will happen in the postmaster if the <literal>plperl</literal> library is
included in <xref linkend="guc-shared-preload-libraries">, in which
included in <xref linkend="guc-shared-preload-libraries"/>, in which
case extra consideration should be given to the risk of destabilizing
the postmaster. The principal reason for making use of this feature
is that Perl modules loaded by <literal>plperl.on_init</literal> need be

View File

@ -125,14 +125,14 @@
It is also possible to declare a <application>PL/pgSQL</application>
function as returning <type>record</type>, which means that the result
is a row type whose columns are determined by specification in the
calling query, as discussed in <xref linkend="queries-tablefunctions">.
calling query, as discussed in <xref linkend="queries-tablefunctions"/>.
</para>
<para>
<application>PL/pgSQL</application> functions can be declared to accept a variable
number of arguments by using the <literal>VARIADIC</literal> marker. This
works exactly the same way as for SQL functions, as discussed in
<xref linkend="xfunc-sql-variadic-functions">.
<xref linkend="xfunc-sql-variadic-functions"/>.
</para>
<para>
@ -141,8 +141,8 @@
<type>anyelement</type>, <type>anyarray</type>, <type>anynonarray</type>,
<type>anyenum</type>, and <type>anyrange</type>. The actual
data types handled by a polymorphic function can vary from call to
call, as discussed in <xref linkend="extend-types-polymorphic">.
An example is shown in <xref linkend="plpgsql-declaration-parameters">.
call, as discussed in <xref linkend="extend-types-polymorphic"/>.
An example is shown in <xref linkend="plpgsql-declaration-parameters"/>.
</para>
<para>
@ -170,8 +170,8 @@
<para>
Specific examples appear in
<xref linkend="plpgsql-declaration-parameters"> and
<xref linkend="plpgsql-statements-returning">.
<xref linkend="plpgsql-declaration-parameters"/> and
<xref linkend="plpgsql-statements-returning"/>.
</para>
</sect2>
</sect1>
@ -181,7 +181,7 @@
<para>
Functions written in <application>PL/pgSQL</application> are defined
to the server by executing <xref linkend="sql-createfunction"> commands.
to the server by executing <xref linkend="sql-createfunction"/> commands.
Such a command would normally look like, say,
<programlisting>
CREATE FUNCTION somefunc(integer, text) RETURNS integer
@ -190,7 +190,7 @@ LANGUAGE plpgsql;
</programlisting>
The function body is simply a string literal so far as <command>CREATE
FUNCTION</command> is concerned. It is often helpful to use dollar quoting
(see <xref linkend="sql-syntax-dollar-quoting">) to write the function
(see <xref linkend="sql-syntax-dollar-quoting"/>) to write the function
body, rather than the normal single quote syntax. Without dollar quoting,
any single quotes or backslashes in the function body must be escaped by
doubling them. Almost all the examples in this chapter use dollar-quoted
@ -289,7 +289,7 @@ $$ LANGUAGE plpgsql;
of any <application>PL/pgSQL</application> function. This block provides the
declarations of the function's parameters (if any), as well as some
special variables such as <literal>FOUND</literal> (see
<xref linkend="plpgsql-statements-diagnostics">). The outer block is
<xref linkend="plpgsql-statements-diagnostics"/>). The outer block is
labeled with the function's name, meaning that parameters and special
variables can be qualified with the function's name.
</para>
@ -308,7 +308,7 @@ $$ LANGUAGE plpgsql;
However, a block containing an <literal>EXCEPTION</literal> clause effectively
forms a subtransaction that can be rolled back without affecting the
outer transaction. For more about that see <xref
linkend="plpgsql-error-trapping">.
linkend="plpgsql-error-trapping"/>.
</para>
</sect1>
@ -356,7 +356,7 @@ arow RECORD;
assigned to after initialization, so that its value will remain constant
for the duration of the block.
The <literal>COLLATE</literal> option specifies a collation to use for the
variable (see <xref linkend="plpgsql-declaration-collation">).
variable (see <xref linkend="plpgsql-declaration-collation"/>).
If <literal>NOT NULL</literal>
is specified, an assignment of a null value results in a run-time
error. All variables declared as <literal>NOT NULL</literal>
@ -491,7 +491,7 @@ END;
$$ LANGUAGE plpgsql;
</programlisting>
As discussed in <xref linkend="xfunc-output-parameters">, this
As discussed in <xref linkend="xfunc-output-parameters"/>, this
effectively creates an anonymous record type for the function's
results. If a <literal>RETURNS</literal> clause is given, it must say
<literal>RETURNS record</literal>.
@ -523,9 +523,9 @@ $$ LANGUAGE plpgsql;
or <type>anyrange</type>), a special parameter <literal>$0</literal>
is created. Its data type is the actual return type of the function,
as deduced from the actual input types (see <xref
linkend="extend-types-polymorphic">).
linkend="extend-types-polymorphic"/>).
This allows the function to access its actual return type
as shown in <xref linkend="plpgsql-declaration-type">.
as shown in <xref linkend="plpgsql-declaration-type"/>.
<literal>$0</literal> is initialized to null and can be modified by
the function, so it can be used to hold the return value if desired,
though that is not required. <literal>$0</literal> can also be
@ -740,7 +740,7 @@ SELECT merge_fields(t.*) FROM table1 t WHERE ... ;
When a <application>PL/pgSQL</application> function has one or more
parameters of collatable data types, a collation is identified for each
function call depending on the collations assigned to the actual
arguments, as described in <xref linkend="collation">. If a collation is
arguments, as described in <xref linkend="collation"/>. If a collation is
successfully identified (i.e., there are no conflicts of implicit
collations among the arguments) then all the collatable parameters are
treated as having that collation implicitly. This will affect the
@ -841,7 +841,7 @@ SELECT <replaceable>expression</replaceable>
to the main SQL engine. While forming the <command>SELECT</command> command,
any occurrences of <application>PL/pgSQL</application> variable names
are replaced by parameters, as discussed in detail in
<xref linkend="plpgsql-var-subst">.
<xref linkend="plpgsql-var-subst"/>.
This allows the query plan for the <command>SELECT</command> to
be prepared just once and then reused for subsequent
evaluations with different values of the variables. Thus, what
@ -861,7 +861,7 @@ PREPARE <replaceable>statement_name</replaceable>(integer, integer) AS SELECT $1
parameter values. Normally these details are
not important to a <application>PL/pgSQL</application> user, but
they are useful to know when trying to diagnose a problem.
More information appears in <xref linkend="plpgsql-plan-caching">.
More information appears in <xref linkend="plpgsql-plan-caching"/>.
</para>
</sect1>
@ -874,8 +874,8 @@ PREPARE <replaceable>statement_name</replaceable>(integer, integer) AS SELECT $1
<application>PL/pgSQL</application>.
Anything not recognized as one of these statement types is presumed
to be an SQL command and is sent to the main database engine to execute,
as described in <xref linkend="plpgsql-statements-sql-noresult">
and <xref linkend="plpgsql-statements-sql-onerow">.
as described in <xref linkend="plpgsql-statements-sql-noresult"/>
and <xref linkend="plpgsql-statements-sql-onerow"/>.
</para>
<sect2 id="plpgsql-statements-assignment">
@ -900,7 +900,7 @@ PREPARE <replaceable>statement_name</replaceable>(integer, integer) AS SELECT $1
<para>
If the expression's result data type doesn't match the variable's
data type, the value will be coerced as though by an assignment cast
(see <xref linkend="typeconv-query">). If no assignment cast is known
(see <xref linkend="typeconv-query"/>). If no assignment cast is known
for the pair of data types involved, the <application>PL/pgSQL</application>
interpreter will attempt to convert the result value textually, that is
by applying the result type's output function followed by the variable
@ -933,14 +933,14 @@ my_record.user_id := 20;
in the command text is treated as a parameter, and then the
current value of the variable is provided as the parameter value
at run time. This is exactly like the processing described earlier
for expressions; for details see <xref linkend="plpgsql-var-subst">.
for expressions; for details see <xref linkend="plpgsql-var-subst"/>.
</para>
<para>
When executing a SQL command in this way,
<application>PL/pgSQL</application> may cache and re-use the execution
plan for the command, as discussed in
<xref linkend="plpgsql-plan-caching">.
<xref linkend="plpgsql-plan-caching"/>.
</para>
<para>
@ -966,7 +966,7 @@ PERFORM <replaceable>query</replaceable>;
and the plan is cached in the same way. Also, the special variable
<literal>FOUND</literal> is set to true if the query produced at
least one row, or false if it produced no rows (see
<xref linkend="plpgsql-statements-diagnostics">).
<xref linkend="plpgsql-statements-diagnostics"/>).
</para>
<note>
@ -1067,7 +1067,7 @@ DELETE ... RETURNING <replaceable>expressions</replaceable> INTO <optional>STRIC
well-defined unless you've used <literal>ORDER BY</literal>.) Any result rows
after the first row are discarded.
You can check the special <literal>FOUND</literal> variable (see
<xref linkend="plpgsql-statements-diagnostics">) to
<xref linkend="plpgsql-statements-diagnostics"/>) to
determine whether a row was returned:
<programlisting>
@ -1147,7 +1147,7 @@ CONTEXT: PL/pgSQL function get_userid(text) line 6 at SQL statement
<para>
To handle cases where you need to process multiple result rows
from a SQL query, see <xref linkend="plpgsql-records-iterating">.
from a SQL query, see <xref linkend="plpgsql-records-iterating"/>.
</para>
</sect2>
@ -1161,7 +1161,7 @@ CONTEXT: PL/pgSQL function get_userid(text) line 6 at SQL statement
that will involve different tables or different data types each
time they are executed. <application>PL/pgSQL</application>'s
normal attempts to cache plans for commands (as discussed in
<xref linkend="plpgsql-plan-caching">) will not work in such
<xref linkend="plpgsql-plan-caching"/>) will not work in such
scenarios. To handle this sort of problem, the
<command>EXECUTE</command> statement is provided:
@ -1283,7 +1283,7 @@ EXECUTE format('SELECT count(*) FROM %I '
<para>
The <application>PL/pgSQL</application>
<command>EXECUTE</command> statement is not related to the
<xref linkend="sql-execute"> SQL
<xref linkend="sql-execute"/> SQL
statement supported by the
<productname>PostgreSQL</productname> server. The server's
<command>EXECUTE</command> statement cannot be used directly within
@ -1319,7 +1319,7 @@ EXECUTE format('SELECT count(*) FROM %I '
of single quotes. The recommended method for quoting fixed text in your
function body is dollar quoting. (If you have legacy code that does
not use dollar quoting, please refer to the
overview in <xref linkend="plpgsql-quote-tips">, which can save you
overview in <xref linkend="plpgsql-quote-tips"/>, which can save you
some effort when translating said code to a more reasonable scheme.)
</para>
@ -1347,7 +1347,7 @@ EXECUTE 'UPDATE tbl SET '
This example demonstrates the use of the
<function>quote_ident</function> and
<function>quote_literal</function> functions (see <xref
linkend="functions-string">). For safety, expressions containing column
linkend="functions-string"/>). For safety, expressions containing column
or table identifiers should be passed through
<function>quote_ident</function> before insertion in a dynamic query.
Expressions containing values that should be literal strings in the
@ -1394,7 +1394,7 @@ EXECUTE 'UPDATE tbl SET '
</programlisting>
(At present, <literal>IS NOT DISTINCT FROM</literal> is handled much less
efficiently than <literal>=</literal>, so don't do this unless you must.
See <xref linkend="functions-comparison"> for
See <xref linkend="functions-comparison"/> for
more information on nulls and <literal>IS DISTINCT</literal>.)
</para>
@ -1420,7 +1420,7 @@ EXECUTE 'UPDATE tbl SET '
<para>
Dynamic SQL statements can also be safely constructed using the
<function>format</function> function (see <xref
linkend="functions-string">). For example:
linkend="functions-string"/>). For example:
<programlisting>
EXECUTE format('UPDATE tbl SET %I = %L '
'WHERE key = %L', colname, newvalue, keyvalue);
@ -1442,7 +1442,7 @@ EXECUTE format('UPDATE tbl SET %I = $1 WHERE key = $2', colname)
<para>
A much larger example of a dynamic command and
<command>EXECUTE</command> can be seen in <xref
linkend="plpgsql-porting-ex2">, which builds and executes a
linkend="plpgsql-porting-ex2"/>, which builds and executes a
<command>CREATE FUNCTION</command> command to define a new function.
</para>
</sect2>
@ -1461,12 +1461,12 @@ GET <optional> CURRENT </optional> DIAGNOSTICS <replaceable>variable</replaceabl
This command allows retrieval of system status indicators.
<literal>CURRENT</literal> is a noise word (but see also <command>GET STACKED
DIAGNOSTICS</command> in <xref linkend="plpgsql-exception-diagnostics">).
DIAGNOSTICS</command> in <xref linkend="plpgsql-exception-diagnostics"/>).
Each <replaceable>item</replaceable> is a key word identifying a status
value to be assigned to the specified <replaceable>variable</replaceable>
(which should be of the right data type to receive it). The currently
available status items are shown
in <xref linkend="plpgsql-current-diagnostics-values">. Colon-equal
in <xref linkend="plpgsql-current-diagnostics-values"/>. Colon-equal
(<literal>:=</literal>) can be used instead of the SQL-standard <literal>=</literal>
token. An example:
<programlisting>
@ -1503,7 +1503,7 @@ GET DIAGNOSTICS integer_var = ROW_COUNT;
<entry><literal>PG_CONTEXT</literal></entry>
<entry><type>text</type></entry>
<entry>line(s) of text describing the current call stack
(see <xref linkend="plpgsql-call-stack">)</entry>
(see <xref linkend="plpgsql-call-stack"/>)</entry>
</row>
</tbody>
</tgroup>
@ -1856,7 +1856,7 @@ SELECT * FROM get_available_flightid(CURRENT_DATE);
allow users to define set-returning functions
that do not have this limitation. Currently, the point at
which data begins being written to disk is controlled by the
<xref linkend="guc-work-mem">
<xref linkend="guc-work-mem"/>
configuration variable. Administrators who have sufficient
memory to store larger result sets in memory should consider
increasing this parameter.
@ -2440,8 +2440,8 @@ $$ LANGUAGE plpgsql;
<para>
<application>PL/pgSQL</application> variables are substituted into the query text,
and the query plan is cached for possible re-use, as discussed in
detail in <xref linkend="plpgsql-var-subst"> and
<xref linkend="plpgsql-plan-caching">.
detail in <xref linkend="plpgsql-var-subst"/> and
<xref linkend="plpgsql-plan-caching"/>.
</para>
<para>
@ -2465,7 +2465,7 @@ END LOOP <optional> <replaceable>label</replaceable> </optional>;
<para>
Another way to specify the query whose results should be iterated
through is to declare it as a cursor. This is described in
<xref linkend="plpgsql-cursor-for-loop">.
<xref linkend="plpgsql-cursor-for-loop"/>.
</para>
</sect2>
@ -2605,7 +2605,7 @@ END;
<para>
The <replaceable>condition</replaceable> names can be any of
those shown in <xref linkend="errcodes-appendix">. A category
those shown in <xref linkend="errcodes-appendix"/>. A category
name matches any error within its category. The special
condition name <literal>OTHERS</literal> matches every error type except
<literal>QUERY_CANCELED</literal> and <literal>ASSERT_FAILURE</literal>.
@ -2729,7 +2729,7 @@ SELECT merge_db(1, 'dennis');
<para>
Within an exception handler, the special variable
<varname>SQLSTATE</varname> contains the error code that corresponds to
the exception that was raised (refer to <xref linkend="errcodes-table">
the exception that was raised (refer to <xref linkend="errcodes-table"/>
for a list of possible error codes). The special variable
<varname>SQLERRM</varname> contains the error message associated with the
exception. These variables are undefined outside exception handlers.
@ -2748,7 +2748,7 @@ GET STACKED DIAGNOSTICS <replaceable>variable</replaceable> { = | := } <replacea
value to be assigned to the specified <replaceable>variable</replaceable>
(which should be of the right data type to receive it). The currently
available status items are shown
in <xref linkend="plpgsql-exception-diagnostics-values">.
in <xref linkend="plpgsql-exception-diagnostics-values"/>.
</para>
<table id="plpgsql-exception-diagnostics-values">
@ -2811,7 +2811,7 @@ GET STACKED DIAGNOSTICS <replaceable>variable</replaceable> { = | := } <replacea
<entry><literal>PG_EXCEPTION_CONTEXT</literal></entry>
<entry><type>text</type></entry>
<entry>line(s) of text describing the call stack at the time of the
exception (see <xref linkend="plpgsql-call-stack">)</entry>
exception (see <xref linkend="plpgsql-call-stack"/>)</entry>
</row>
</tbody>
</tgroup>
@ -2847,7 +2847,7 @@ END;
<para>
The <command>GET DIAGNOSTICS</command> command, previously described
in <xref linkend="plpgsql-statements-diagnostics">, retrieves information
in <xref linkend="plpgsql-statements-diagnostics"/>, retrieves information
about current execution state (whereas the <command>GET STACKED
DIAGNOSTICS</command> command discussed above reports information about
the execution state as of a previous error). Its <literal>PG_CONTEXT</literal>
@ -2978,7 +2978,7 @@ DECLARE
<para>
Bound cursor variables can also be used without explicitly opening the cursor,
via the <command>FOR</command> statement described in
<xref linkend="plpgsql-cursor-for-loop">.
<xref linkend="plpgsql-cursor-for-loop"/>.
</para>
</note>
@ -3031,7 +3031,7 @@ OPEN <replaceable>unbound_cursorvar</replaceable> <optional> <optional> NO </opt
<type>refcursor</type> variable). The query is specified as a string
expression, in the same way as in the <command>EXECUTE</command>
command. As usual, this gives flexibility so the query plan can vary
from one run to the next (see <xref linkend="plpgsql-plan-caching">),
from one run to the next (see <xref linkend="plpgsql-plan-caching"/>),
and it also means that variable substitution is not done on the
command string. As with <command>EXECUTE</command>, parameter values
can be inserted into the dynamic command via
@ -3082,7 +3082,7 @@ OPEN <replaceable>bound_cursorvar</replaceable> <optional> ( <optional> <replace
notation, all arguments are specified in order. In named notation,
each argument's name is specified using <literal>:=</literal> to
separate it from the argument expression. Similar to calling
functions, described in <xref linkend="sql-syntax-calling-funcs">, it
functions, described in <xref linkend="sql-syntax-calling-funcs"/>, it
is also allowed to mix positional and named notation.
</para>
@ -3160,7 +3160,7 @@ FETCH <optional> <replaceable>direction</replaceable> { FROM | IN } </optional>
<para>
The <replaceable>direction</replaceable> clause can be any of the
variants allowed in the SQL <xref linkend="sql-fetch">
variants allowed in the SQL <xref linkend="sql-fetch"/>
command except the ones that can fetch
more than one row; namely, it can be
<literal>NEXT</literal>,
@ -3212,7 +3212,7 @@ MOVE <optional> <replaceable>direction</replaceable> { FROM | IN } </optional> <
<para>
The <replaceable>direction</replaceable> clause can be any of the
variants allowed in the SQL <xref linkend="sql-fetch">
variants allowed in the SQL <xref linkend="sql-fetch"/>
command, namely
<literal>NEXT</literal>,
<literal>PRIOR</literal>,
@ -3255,7 +3255,7 @@ DELETE FROM <replaceable>table</replaceable> WHERE CURRENT OF <replaceable>curso
restrictions on what the cursor's query can be (in particular,
no grouping) and it's best to use <literal>FOR UPDATE</literal> in the
cursor. For more information see the
<xref linkend="sql-declare">
<xref linkend="sql-declare"/>
reference page.
</para>
@ -3422,7 +3422,7 @@ END LOOP <optional> <replaceable>label</replaceable> </optional>;
expressions must appear if and only if the cursor was declared to take
arguments. These values will be substituted in the query, in just
the same way as during an <command>OPEN</command> (see <xref
linkend="plpgsql-open-bound-cursor">).
linkend="plpgsql-open-bound-cursor"/>).
</para>
<para>
@ -3475,9 +3475,9 @@ RAISE ;
priority levels.
Whether messages of a particular priority are reported to the client,
written to the server log, or both is controlled by the
<xref linkend="guc-log-min-messages"> and
<xref linkend="guc-client-min-messages"> configuration
variables. See <xref linkend="runtime-config"> for more
<xref linkend="guc-log-min-messages"/> and
<xref linkend="guc-client-min-messages"/> configuration
variables. See <xref linkend="runtime-config"/> for more
information.
</para>
@ -3541,7 +3541,7 @@ RAISE NOTICE 'Calling cs_create_job(%)', v_job_id;
<term><literal>ERRCODE</literal></term>
<listitem>
<para>Specifies the error code (SQLSTATE) to report, either by condition
name, as shown in <xref linkend="errcodes-appendix">, or directly as a
name, as shown in <xref linkend="errcodes-appendix"/>, or directly as a
five-character SQLSTATE code.</para>
</listitem>
</varlistentry>
@ -3928,7 +3928,7 @@ ASSERT <replaceable class="parameter">condition</replaceable> <optional> , <repl
</para>
<para>
<xref linkend="plpgsql-trigger-example"> shows an example of a
<xref linkend="plpgsql-trigger-example"/> shows an example of a
trigger procedure in <application>PL/pgSQL</application>.
</para>
@ -3981,7 +3981,7 @@ CREATE TRIGGER emp_stamp BEFORE INSERT OR UPDATE ON emp
Another way to log changes to a table involves creating a new table that
holds a row for each insert, update, or delete that occurs. This approach
can be thought of as auditing changes to a table.
<xref linkend="plpgsql-trigger-audit-example"> shows an example of an
<xref linkend="plpgsql-trigger-audit-example"/> shows an example of an
audit trigger procedure in <application>PL/pgSQL</application>.
</para>
@ -4038,7 +4038,7 @@ AFTER INSERT OR UPDATE OR DELETE ON emp
approach still records the full audit trail of changes to the table,
but also presents a simplified view of the audit trail, showing just
the last modified timestamp derived from the audit trail for each entry.
<xref linkend="plpgsql-view-trigger-audit-example"> shows an example
<xref linkend="plpgsql-view-trigger-audit-example"/> shows an example
of an audit trigger on a view in <application>PL/pgSQL</application>.
</para>
@ -4118,7 +4118,7 @@ INSTEAD OF INSERT OR UPDATE OR DELETE ON emp_view
times.
This technique is commonly used in Data Warehousing, where the tables
of measured or observed data (called fact tables) might be extremely large.
<xref linkend="plpgsql-trigger-summary-example"> shows an example of a
<xref linkend="plpgsql-trigger-summary-example"/> shows an example of a
trigger procedure in <application>PL/pgSQL</application> that maintains
a summary table for a fact table in a data warehouse.
</para>
@ -4272,7 +4272,7 @@ SELECT * FROM sales_summary_bytime;
statement. The <command>CREATE TRIGGER</command> command assigns names to one
or both transition tables, and then the function can refer to those names
as though they were read-only temporary tables.
<xref linkend="plpgsql-trigger-audit-transition-example"> shows an example.
<xref linkend="plpgsql-trigger-audit-transition-example"/> shows an example.
</para>
<example id="plpgsql-trigger-audit-transition-example">
@ -4280,7 +4280,7 @@ SELECT * FROM sales_summary_bytime;
<para>
This example produces the same results as
<xref linkend="plpgsql-trigger-audit-example">, but instead of using a
<xref linkend="plpgsql-trigger-audit-example"/>, but instead of using a
trigger that fires for every row, it uses a trigger that fires once
per statement, after collecting the relevant information in a transition
table. This can be significantly faster than the row-trigger approach
@ -4383,7 +4383,7 @@ CREATE TRIGGER emp_audit_del
</para>
<para>
<xref linkend="plpgsql-event-trigger-example"> shows an example of an
<xref linkend="plpgsql-event-trigger-example"/> shows an example of an
event trigger procedure in <application>PL/pgSQL</application>.
</para>
@ -4482,7 +4482,7 @@ INSERT INTO dest (col) SELECT foo + bar FROM src;
In the above example, <literal>src.foo</literal> would be an unambiguous reference
to the table column. To create an unambiguous reference to a variable,
declare it in a labeled block and use the block's label
(see <xref linkend="plpgsql-structure">). For example,
(see <xref linkend="plpgsql-structure"/>). For example,
<programlisting>
&lt;&lt;block&gt;&gt;
DECLARE
@ -4575,7 +4575,7 @@ $$ LANGUAGE plpgsql;
to <command>EXECUTE</command> or one of its variants. If you need to
insert a varying value into such a command, do so as part of
constructing the string value, or use <literal>USING</literal>, as illustrated in
<xref linkend="plpgsql-statements-executing-dyn">.
<xref linkend="plpgsql-statements-executing-dyn"/>.
</para>
<para>
@ -4636,7 +4636,7 @@ $$ LANGUAGE plpgsql;
this will happen only if the execution plan is not very sensitive to
the values of the <application>PL/pgSQL</application> variables referenced in it.
If it is, generating a plan each time is a net win. See <xref
linkend="sql-prepare"> for more information about the behavior of
linkend="sql-prepare"/> for more information about the behavior of
prepared statements.
</para>
@ -4796,7 +4796,7 @@ $$ LANGUAGE plpgsql;
easily find yourself needing half a dozen or more adjacent quote marks.
It's recommended that you instead write the function body as a
<quote>dollar-quoted</quote> string literal (see <xref
linkend="sql-syntax-dollar-quoting">). In the dollar-quoting
linkend="sql-syntax-dollar-quoting"/>). In the dollar-quoting
approach, you never double any quote marks, but instead take care to
choose a different dollar-quoting delimiter for each level of
nesting you need. For example, you might write the <command>CREATE
@ -4907,7 +4907,7 @@ a_output := a_output || $$ AND name LIKE 'foobar'$$
accounts for 8 quotation marks) and this is adjacent to the end of that
string constant (2 more). You will probably only need that if
you are writing a function that generates other functions, as in
<xref linkend="plpgsql-porting-ex2">.
<xref linkend="plpgsql-porting-ex2"/>.
For example:
<programlisting>
a_output := a_output || '' if v_'' ||
@ -5029,7 +5029,7 @@ CREATE FUNCTION
to <application>PL/pgSQL</application>'s
<literal>plpgsql.variable_conflict</literal> = <literal>use_column</literal>
behavior, which is not the default,
as explained in <xref linkend="plpgsql-var-subst">.
as explained in <xref linkend="plpgsql-var-subst"/>.
It's often best to avoid such ambiguities in the first place,
but if you have to port a large amount of code that depends on
this behavior, setting <literal>variable_conflict</literal> may be the
@ -5042,7 +5042,7 @@ CREATE FUNCTION
In <productname>PostgreSQL</productname> the function body must be written as
a string literal. Therefore you need to use dollar quoting or escape
single quotes in the function body. (See <xref
linkend="plpgsql-quote-tips">.)
linkend="plpgsql-quote-tips"/>.)
</para>
</listitem>
@ -5080,7 +5080,7 @@ CREATE FUNCTION
from the first number to the second, requiring the loop bounds
to be swapped when porting. This incompatibility is unfortunate
but is unlikely to be changed. (See <xref
linkend="plpgsql-integer-for">.)
linkend="plpgsql-integer-for"/>.)
</para>
</listitem>
@ -5108,7 +5108,7 @@ CREATE FUNCTION
<title>Porting Examples</title>
<para>
<xref linkend="pgsql-porting-ex1"> shows how to port a simple
<xref linkend="pgsql-porting-ex1"/> shows how to port a simple
function from <application>PL/SQL</application> to <application>PL/pgSQL</application>.
</para>
@ -5197,7 +5197,7 @@ $$ LANGUAGE plpgsql;
</example>
<para>
<xref linkend="plpgsql-porting-ex2"> shows how to port a
<xref linkend="plpgsql-porting-ex2"/> shows how to port a
function that creates another function and how to handle the
ensuing quoting problems.
</para>
@ -5292,12 +5292,12 @@ $func$ LANGUAGE plpgsql;
</example>
<para>
<xref linkend="plpgsql-porting-ex3"> shows how to port a function
<xref linkend="plpgsql-porting-ex3"/> shows how to port a function
with <literal>OUT</literal> parameters and string manipulation.
<productname>PostgreSQL</productname> does not have a built-in
<function>instr</function> function, but you can create one
using a combination of other
functions. In <xref linkend="plpgsql-porting-appendix"> there is a
functions. In <xref linkend="plpgsql-porting-appendix"/> there is a
<application>PL/pgSQL</application> implementation of
<function>instr</function> that you can use to make your porting
easier.
@ -5406,7 +5406,7 @@ SELECT * FROM cs_parse_url('http://foobar.com/query.cgi?baz');
</example>
<para>
<xref linkend="plpgsql-porting-ex4"> shows how to port a procedure
<xref linkend="plpgsql-porting-ex4"/> shows how to port a procedure
that uses numerous features that are specific to Oracle.
</para>
@ -5419,14 +5419,14 @@ SELECT * FROM cs_parse_url('http://foobar.com/query.cgi?baz');
<programlisting>
CREATE OR REPLACE PROCEDURE cs_create_job(v_job_id IN INTEGER) IS
a_running_job_count INTEGER;
PRAGMA AUTONOMOUS_TRANSACTION; -- <co id="co.plpgsql-porting-pragma">
PRAGMA AUTONOMOUS_TRANSACTION; -- <co id="co.plpgsql-porting-pragma"/>
BEGIN
LOCK TABLE cs_jobs IN EXCLUSIVE MODE; -- <co id="co.plpgsql-porting-locktable">
LOCK TABLE cs_jobs IN EXCLUSIVE MODE; -- <co id="co.plpgsql-porting-locktable"/>
SELECT count(*) INTO a_running_job_count FROM cs_jobs WHERE end_stamp IS NULL;
IF a_running_job_count &gt; 0 THEN
COMMIT; -- free lock <co id="co.plpgsql-porting-commit">
COMMIT; -- free lock <co id="co.plpgsql-porting-commit"/>
raise_application_error(-20000,
'Unable to create a new job: a job is currently running.');
END IF;
@ -5493,7 +5493,7 @@ BEGIN
SELECT count(*) INTO a_running_job_count FROM cs_jobs WHERE end_stamp IS NULL;
IF a_running_job_count &gt; 0 THEN
RAISE EXCEPTION 'Unable to create a new job: a job is currently running'; -- <co id="co.plpgsql-porting-raise">
RAISE EXCEPTION 'Unable to create a new job: a job is currently running'; -- <co id="co.plpgsql-porting-raise"/>
END IF;
DELETE FROM cs_active_job;
@ -5502,7 +5502,7 @@ BEGIN
BEGIN
INSERT INTO cs_jobs (job_id, start_stamp) VALUES (v_job_id, now());
EXCEPTION
WHEN unique_violation THEN -- <co id="co.plpgsql-porting-exception">
WHEN unique_violation THEN -- <co id="co.plpgsql-porting-exception"/>
-- don't worry if it already exists
END;
END;
@ -5522,7 +5522,7 @@ $$ LANGUAGE plpgsql;
<para>
The exception names supported by <application>PL/pgSQL</application> are
different from Oracle's. The set of built-in exception names
is much larger (see <xref linkend="errcodes-appendix">). There
is much larger (see <xref linkend="errcodes-appendix"/>). There
is not currently a way to declare user-defined exception names,
although you can throw user-chosen SQLSTATE values instead.
</para>
@ -5588,7 +5588,7 @@ END;
<application>PL/SQL</application> version, but you have to remember to use
<function>quote_literal</function> and
<function>quote_ident</function> as described in <xref
linkend="plpgsql-statements-executing-dyn">. Constructs of the
linkend="plpgsql-statements-executing-dyn"/>. Constructs of the
type <literal>EXECUTE 'SELECT * FROM $1';</literal> will not work
reliably unless you use these functions.
</para>
@ -5603,7 +5603,7 @@ END;
the function always returns the same result when given the same
arguments) and <quote>strictness</quote> (whether the function
returns null if any argument is null). Consult the <xref
linkend="sql-createfunction">
linkend="sql-createfunction"/>
reference page for details.
</para>

View File

@ -15,7 +15,7 @@
<para>
To install PL/Python in a particular database, use
<literal>CREATE EXTENSION plpythonu</literal> (but
see also <xref linkend="plpython-python23">).
see also <xref linkend="plpython-python23"/>).
</para>
<tip>
@ -103,7 +103,7 @@
The built variant depends on which Python version was found during
the installation or which version was explicitly set using
the <envar>PYTHON</envar> environment variable;
see <xref linkend="install-procedure">. To make both variants of
see <xref linkend="install-procedure"/>. To make both variants of
PL/Python available in one installation, the source tree has to be
configured and built twice.
</para>
@ -186,7 +186,7 @@
<para>
Functions in PL/Python are declared via the
standard <xref linkend="sql-createfunction"> syntax:
standard <xref linkend="sql-createfunction"/> syntax:
<programlisting>
CREATE FUNCTION <replaceable>funcname</replaceable> (<replaceable>argument-list</replaceable>)
@ -420,7 +420,7 @@ $$ LANGUAGE plpythonu;
sortas="PL/Python">in PL/Python</secondary></indexterm> is passed to a
function, the argument value will appear as <symbol>None</symbol> in
Python. For example, the function definition of <function>pymax</function>
shown in <xref linkend="plpython-funcs"> will return the wrong answer for null
shown in <xref linkend="plpython-funcs"/> will return the wrong answer for null
inputs. We could add <literal>STRICT</literal> to the function definition
to make <productname>PostgreSQL</productname> do something more reasonable:
if a null value is passed, the function will not be called at all,
@ -774,7 +774,7 @@ SELECT * FROM multiout_simple_setof(3);
<para>
PL/Python also supports anonymous code blocks called with the
<xref linkend="sql-do"> statement:
<xref linkend="sql-do"/> statement:
<programlisting>
DO $$
@ -1056,16 +1056,16 @@ rv = plan.execute(["name"], 5)
<para>
Query parameters and result row fields are converted between PostgreSQL
and Python data types as described in <xref linkend="plpython-data">.
and Python data types as described in <xref linkend="plpython-data"/>.
</para>
<para>
When you prepare a plan using the PL/Python module it is automatically
saved. Read the SPI documentation (<xref linkend="spi">) for a
saved. Read the SPI documentation (<xref linkend="spi"/>) for a
description of what this means. In order to make effective use of this
across function calls one needs to use one of the persistent storage
dictionaries <literal>SD</literal> or <literal>GD</literal> (see
<xref linkend="plpython-sharing">). For example:
<xref linkend="plpython-sharing"/>). For example:
<programlisting>
CREATE FUNCTION usesavedplan() RETURNS trigger AS $$
if "plan" in SD:
@ -1190,7 +1190,7 @@ $$ LANGUAGE plpythonu;
<para>
The actual class of the exception being raised corresponds to the
specific condition that caused the error. Refer
to <xref linkend="errcodes-table"> for a list of possible
to <xref linkend="errcodes-table"/> for a list of possible
conditions. The module
<literal>plpy.spiexceptions</literal> defines an exception class
for each <productname>PostgreSQL</productname> condition, deriving
@ -1241,7 +1241,7 @@ $$ LANGUAGE plpythonu;
<para>
Recovering from errors caused by database access as described in
<xref linkend="plpython-trapping"> can lead to an undesirable
<xref linkend="plpython-trapping"/> can lead to an undesirable
situation where some operations succeed before one of them fails,
and after recovering from that error the data is left in an
inconsistent state. PL/Python offers a solution to this problem in
@ -1391,9 +1391,9 @@ $$ LANGUAGE plpythonu;
The other functions only generate messages of different priority levels.
Whether messages of a particular priority are reported to the client,
written to the server log, or both is controlled by the
<xref linkend="guc-log-min-messages"> and
<xref linkend="guc-client-min-messages"> configuration
variables. See <xref linkend="runtime-config"> for more information.
<xref linkend="guc-log-min-messages"/> and
<xref linkend="guc-client-min-messages"/> configuration
variables. See <xref linkend="runtime-config"/> for more information.
</para>
<para>
@ -1442,9 +1442,9 @@ PL/Python function "raise_custom_exception"
<literal>plpy.quote_nullable(<replaceable>string</replaceable>)</literal>, and
<literal>plpy.quote_ident(<replaceable>string</replaceable>)</literal>. They
are equivalent to the built-in quoting functions described in <xref
linkend="functions-string">. They are useful when constructing
linkend="functions-string"/>. They are useful when constructing
ad-hoc queries. A PL/Python equivalent of dynamic SQL from <xref
linkend="plpgsql-quote-literal-example"> would be:
linkend="plpgsql-quote-literal-example"/> would be:
<programlisting>
plpy.execute("UPDATE tbl SET %s = %s WHERE key = %s" % (
plpy.quote_ident(colname),

View File

@ -79,7 +79,7 @@
<para>
To create a function in the <application>PL/Tcl</application> language, use
the standard <xref linkend="sql-createfunction"> syntax:
the standard <xref linkend="sql-createfunction"/> syntax:
<programlisting>
CREATE FUNCTION <replaceable>funcname</replaceable> (<replaceable>argument-types</replaceable>) RETURNS <replaceable>return-type</replaceable> AS $$
@ -483,7 +483,7 @@ $$ LANGUAGE pltcl;
executed within a SQL subtransaction. If the script returns an
error, that entire subtransaction is rolled back before returning the
error out to the surrounding Tcl code.
See <xref linkend="pltcl-subtransactions"> for more details and an
See <xref linkend="pltcl-subtransactions"/> for more details and an
example.
</para>
</listitem>
@ -559,10 +559,10 @@ SELECT 'doesn''t' AS ret
priority levels.
Whether messages of a particular priority are reported to the client,
written to the server log, or both is controlled by the
<xref linkend="guc-log-min-messages"> and
<xref linkend="guc-client-min-messages"> configuration
variables. See <xref linkend="runtime-config">
and <xref linkend="pltcl-error-handling">
<xref linkend="guc-log-min-messages"/> and
<xref linkend="guc-client-min-messages"/> configuration
variables. See <xref linkend="runtime-config"/>
and <xref linkend="pltcl-error-handling"/>
for more information.
</para>
</listitem>
@ -888,7 +888,7 @@ CREATE EVENT TRIGGER tcl_a_snitch ON ddl_command_start EXECUTE PROCEDURE tclsnit
Fields <varname>SQLSTATE</varname>, <varname>condition</varname>,
and <varname>message</varname> are always supplied
(the first two represent the error code and condition name as shown
in <xref linkend="errcodes-appendix">).
in <xref linkend="errcodes-appendix"/>).
Fields that may be present include
<varname>detail</varname>, <varname>hint</varname>, <varname>context</varname>,
<varname>schema</varname>, <varname>table</varname>, <varname>column</varname>,
@ -929,7 +929,7 @@ if {[catch { spi_exec $sql_command }]} {
<para>
Recovering from errors caused by database access as described in
<xref linkend="pltcl-error-handling"> can lead to an undesirable
<xref linkend="pltcl-error-handling"/> can lead to an undesirable
situation where some operations succeed before one of them fails,
and after recovering from that error the data is left in an
inconsistent state. PL/Tcl offers a solution to this problem in

View File

@ -15,7 +15,7 @@
<para>
The functionality provided by this module overlaps substantially
with the functionality of the older <xref linkend="dblink"> module.
with the functionality of the older <xref linkend="dblink"/> module.
But <filename>postgres_fdw</filename> provides more transparent and
standards-compliant syntax for accessing remote tables, and can give
better performance in many cases.
@ -27,12 +27,12 @@
<listitem>
<para>
Install the <filename>postgres_fdw</filename> extension using <xref
linkend="sql-createextension">.
linkend="sql-createextension"/>.
</para>
</listitem>
<listitem>
<para>
Create a foreign server object, using <xref linkend="sql-createserver">,
Create a foreign server object, using <xref linkend="sql-createserver"/>,
to represent each remote database you want to connect to.
Specify connection information, except <literal>user</literal> and
<literal>password</literal>, as options of the server object.
@ -40,7 +40,7 @@
</listitem>
<listitem>
<para>
Create a user mapping, using <xref linkend="sql-createusermapping">, for
Create a user mapping, using <xref linkend="sql-createusermapping"/>, for
each database user you want to allow to access each foreign server.
Specify the remote user name and password to use as
<literal>user</literal> and <literal>password</literal> options of the
@ -49,8 +49,8 @@
</listitem>
<listitem>
<para>
Create a foreign table, using <xref linkend="sql-createforeigntable">
or <xref linkend="sql-importforeignschema">,
Create a foreign table, using <xref linkend="sql-createforeigntable"/>
or <xref linkend="sql-importforeignschema"/>,
for each remote table you want to access. The columns of the foreign
table must match the referenced remote table. You can, however, use
table and/or column names different from the remote table's, if you
@ -101,7 +101,7 @@
<para>
A foreign server using the <filename>postgres_fdw</filename> foreign data wrapper
can have the same options that <application>libpq</application> accepts in
connection strings, as described in <xref linkend="libpq-paramkeywords">,
connection strings, as described in <xref linkend="libpq-paramkeywords"/>,
except that these options are not allowed:
<itemizedlist spacing="compact">
@ -254,7 +254,7 @@
<literal>fdw_tuple_cost</literal> to the cost estimates. This local
estimation is unlikely to be very accurate unless local copies of the
remote table's statistics are available. Running
<xref linkend="sql-analyze"> on the foreign table is the way to update
<xref linkend="sql-analyze"/> on the foreign table is the way to update
the local statistics; this will perform a scan of the remote table and
then calculate and store statistics just as though the table were local.
Keeping local statistics can be a useful way to reduce per-query planning
@ -359,7 +359,7 @@
<para>
<filename>postgres_fdw</filename> is able to import foreign table definitions
using <xref linkend="sql-importforeignschema">. This command creates
using <xref linkend="sql-importforeignschema"/>. This command creates
foreign table definitions on the local server that match tables or
views present on the remote server. If the remote tables to be imported
have columns of user-defined data types, the local server must have
@ -423,7 +423,7 @@
So if you wish to import <literal>CHECK</literal> constraints, you must do so
manually, and you should verify the semantics of each one carefully.
For more detail about the treatment of <literal>CHECK</literal> constraints on
foreign tables, see <xref linkend="sql-createforeigntable">.
foreign tables, see <xref linkend="sql-createforeigntable"/>.
</para>
<para>
@ -528,7 +528,7 @@
<para>
In the remote sessions opened by <filename>postgres_fdw</filename>,
the <xref linkend="guc-search-path"> parameter is set to
the <xref linkend="guc-search-path"/> parameter is set to
just <literal>pg_catalog</literal>, so that only built-in objects are visible
without schema qualification. This is not an issue for queries
generated by <filename>postgres_fdw</filename> itself, because it always
@ -538,7 +538,7 @@
any functions used in that view will be executed with the restricted
search path. It is recommended to schema-qualify all names in such
functions, or else attach <literal>SET search_path</literal> options
(see <xref linkend="sql-createfunction">) to such functions
(see <xref linkend="sql-createfunction"/>) to such functions
to establish their expected search path environment.
</para>
@ -548,22 +548,22 @@
<itemizedlist spacing="compact">
<listitem>
<para>
<xref linkend="guc-timezone"> is set to <literal>UTC</literal>
<xref linkend="guc-timezone"/> is set to <literal>UTC</literal>
</para>
</listitem>
<listitem>
<para>
<xref linkend="guc-datestyle"> is set to <literal>ISO</literal>
<xref linkend="guc-datestyle"/> is set to <literal>ISO</literal>
</para>
</listitem>
<listitem>
<para>
<xref linkend="guc-intervalstyle"> is set to <literal>postgres</literal>
<xref linkend="guc-intervalstyle"/> is set to <literal>postgres</literal>
</para>
</listitem>
<listitem>
<para>
<xref linkend="guc-extra-float-digits"> is set to <literal>3</literal> for remote
<xref linkend="guc-extra-float-digits"/> is set to <literal>3</literal> for remote
servers 9.0 and newer and is set to <literal>2</literal> for older versions
</para>
</listitem>
@ -612,7 +612,7 @@ CREATE EXTENSION postgres_fdw;
</programlisting>
<para>
Then create a foreign server using <xref linkend="sql-createserver">.
Then create a foreign server using <xref linkend="sql-createserver"/>.
In this example we wish to connect to a <productname>PostgreSQL</productname> server
on host <literal>192.83.123.89</literal> listening on
port <literal>5432</literal>. The database to which the connection is made
@ -626,7 +626,7 @@ CREATE SERVER foreign_server
</para>
<para>
A user mapping, defined with <xref linkend="sql-createusermapping">, is
A user mapping, defined with <xref linkend="sql-createusermapping"/>, is
needed as well to identify the role that will be used on the remote
server:
@ -639,7 +639,7 @@ CREATE USER MAPPING FOR local_user
<para>
Now it is possible to create a foreign table with
<xref linkend="sql-createforeigntable">. In this example we
<xref linkend="sql-createforeigntable"/>. In this example we
wish to access the table named <structname>some_schema.some_table</structname>
on the remote server. The local name for it will
be <structname>foreign_table</structname>:
@ -658,7 +658,7 @@ CREATE FOREIGN TABLE foreign_table (
Column names must match as well, unless you attach <literal>column_name</literal>
options to the individual columns to show how they are named in the remote
table.
In many cases, use of <xref linkend="sql-importforeignschema"> is
In many cases, use of <xref linkend="sql-importforeignschema"/> is
preferable to constructing foreign table definitions manually.
</para>
</sect2>

View File

@ -1,6 +1,8 @@
<!-- doc/src/sgml/postgres.sgml -->
<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook V4.2//EN" [
<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
"http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd"
[
<!ENTITY % version SYSTEM "version.sgml">
%version;
@ -42,11 +44,11 @@
<para>
After you have worked through this tutorial you might want to move
on to reading <xref linkend="sql"> to gain a more formal knowledge
of the SQL language, or <xref linkend="client-interfaces"> for
on to reading <xref linkend="sql"/> to gain a more formal knowledge
of the SQL language, or <xref linkend="client-interfaces"/> for
information about developing applications for
<productname>PostgreSQL</productname>. Those who set up and
manage their own server should also read <xref linkend="admin">.
manage their own server should also read <xref linkend="admin"/>.
</para>
</partintro>
@ -80,14 +82,14 @@
chapters individually as they choose. The information in this
part is presented in a narrative fashion in topical units.
Readers looking for a complete description of a particular command
should see <xref linkend="reference">.
should see <xref linkend="reference"/>.
</para>
<para>
Readers of this part should know how to connect to a
<productname>PostgreSQL</productname> database and issue
<acronym>SQL</acronym> commands. Readers that are unfamiliar with
these issues are encouraged to read <xref linkend="tutorial">
these issues are encouraged to read <xref linkend="tutorial"/>
first. <acronym>SQL</acronym> commands are typically entered
using the <productname>PostgreSQL</productname> interactive terminal
<application>psql</application>, but other programs that have
@ -130,7 +132,7 @@
self-contained and can be read individually as desired. The
information in this part is presented in a narrative fashion in
topical units. Readers looking for a complete description of a
particular command should see <xref linkend="reference">.
particular command should see <xref linkend="reference"/>.
</para>
<para>
@ -140,8 +142,8 @@
The rest of this part is about tuning and management; that material
assumes that the reader is familiar with the general use of
the <productname>PostgreSQL</productname> database system. Readers are
encouraged to look at <xref linkend="tutorial"> and <xref
linkend="sql"> for additional information.
encouraged to look at <xref linkend="tutorial"/> and <xref
linkend="sql"/> for additional information.
</para>
</partintro>
@ -174,10 +176,10 @@
with <productname>PostgreSQL</productname>. Each of these chapters can be
read independently. Note that there are many other programming
interfaces for client programs that are distributed separately and
contain their own documentation (<xref linkend="external-projects">
contain their own documentation (<xref linkend="external-projects"/>
lists some of the more popular ones). Readers of this part should be
familiar with using <acronym>SQL</acronym> commands to manipulate
and query the database (see <xref linkend="sql">) and of course
and query the database (see <xref linkend="sql"/>) and of course
with the programming language that the interface uses.
</para>
</partintro>
@ -203,7 +205,7 @@
<productname>PostgreSQL</productname> distribution as well as
general issues concerning server-side programming languages. It
is essential to read at least the earlier sections of <xref
linkend="extend"> (covering functions) before diving into the
linkend="extend"/> (covering functions) before diving into the
material about server-side programming languages.
</para>
</partintro>

View File

@ -170,7 +170,7 @@
form of the message. In <application>psql</application>, say <literal>\set
VERBOSITY verbose</literal> beforehand. If you are extracting the message
from the server log, set the run-time parameter
<xref linkend="guc-log-error-verbosity"> to <literal>verbose</literal> so that all
<xref linkend="guc-log-error-verbosity"/> to <literal>verbose</literal> so that all
details are logged.
</para>
</note>

View File

@ -207,7 +207,7 @@
<para>
This section describes the message flow and the semantics of each
message type. (Details of the exact representation of each message
appear in <xref linkend="protocol-message-formats">.) There are
appear in <xref linkend="protocol-message-formats"/>.) There are
several different sub-protocols depending on the state of the
connection: start-up, query, function call,
<command>COPY</command>, and termination. There are also special
@ -383,7 +383,7 @@
SASLInitialResponse with the name of the selected mechanism, and the
first part of the SASL data stream in response to this. If further
messages are needed, the server will respond with
AuthenticationSASLContinue. See <xref linkend="sasl-authentication">
AuthenticationSASLContinue. See <xref linkend="sasl-authentication"/>
for details.
</para>
</listitem>
@ -478,9 +478,9 @@
<para>
This message informs the frontend about the current (initial)
setting of backend parameters, such as <xref
linkend="guc-client-encoding"> or <xref linkend="guc-datestyle">.
linkend="guc-client-encoding"/> or <xref linkend="guc-datestyle"/>.
The frontend can ignore this message, or record the settings
for its future use; see <xref linkend="protocol-async"> for
for its future use; see <xref linkend="protocol-async"/> for
more details. The frontend should not respond to this
message, but should continue listening for a ReadyForQuery
message.
@ -564,7 +564,7 @@
<listitem>
<para>
The backend is ready to copy data from the frontend to a
table; see <xref linkend="protocol-copy">.
table; see <xref linkend="protocol-copy"/>.
</para>
</listitem>
</varlistentry>
@ -574,7 +574,7 @@
<listitem>
<para>
The backend is ready to copy data from a table to the
frontend; see <xref linkend="protocol-copy">.
frontend; see <xref linkend="protocol-copy"/>.
</para>
</listitem>
</varlistentry>
@ -654,7 +654,7 @@
normally consists of RowDescription, zero or more
DataRow messages, and then CommandComplete.
<command>COPY</command> to or from the frontend invokes special protocol
as described in <xref linkend="protocol-copy">.
as described in <xref linkend="protocol-copy"/>.
All other query types normally produce only
a CommandComplete message.
</para>
@ -691,7 +691,7 @@
<para>
A frontend must be prepared to accept ErrorResponse and
NoticeResponse messages whenever it is expecting any other type of
message. See also <xref linkend="protocol-async"> concerning messages
message. See also <xref linkend="protocol-async"/> concerning messages
that the backend might generate due to outside events.
</para>
@ -1198,7 +1198,7 @@ SELCT 1/0;
It is possible for NoticeResponse and ParameterStatus messages to be
interspersed between CopyData messages; frontends must handle these cases,
and should be prepared for other asynchronous message types as well (see
<xref linkend="protocol-async">). Otherwise, any message type other than
<xref linkend="protocol-async"/>). Otherwise, any message type other than
CopyData or CopyDone may be treated as terminating copy-out mode.
</para>
@ -1221,7 +1221,7 @@ SELCT 1/0;
until a Sync message is received, and then issue ReadyForQuery and return
to normal processing. The frontend should treat receipt of ErrorResponse
as terminating the copy in both directions; no CopyDone should be sent
in this case. See <xref linkend="protocol-replication"> for more
in this case. See <xref linkend="protocol-replication"/> for more
information on the subprotocol transmitted over copy-both mode.
</para>
@ -1435,7 +1435,7 @@ SELCT 1/0;
communication security in environments where attackers might be
able to capture the session traffic. For more information on
encrypting <productname>PostgreSQL</productname> sessions with
<acronym>SSL</acronym>, see <xref linkend="ssl-tcp">.
<acronym>SSL</acronym>, see <xref linkend="ssl-tcp"/>.
</para>
<para>
@ -1635,7 +1635,7 @@ of <literal>true</literal> tells the backend to go into walsender mode, wherein
small set of replication commands can be issued instead of SQL statements. Only
the simple query protocol can be used in walsender mode.
Replication commands are logged in the server log when
<xref linkend="guc-log-replication-commands"> is enabled.
<xref linkend="guc-log-replication-commands"/> is enabled.
Passing <literal>database</literal> as the value instructs walsender to connect to
the database specified in the <literal>dbname</literal> parameter, which will allow
the connection to be used for logical replication from that database.
@ -1649,8 +1649,8 @@ the connection to be used for logical replication from that database.
psql "dbname=postgres replication=database" -c "IDENTIFY_SYSTEM;"
</programlisting>
However, it is often more useful to use
<xref linkend="app-pgreceivewal"> (for physical replication) or
<xref linkend="app-pgrecvlogical"> (for logical replication).
<xref linkend="app-pgreceivewal"/> (for physical replication) or
<xref linkend="app-pgrecvlogical"/> (for logical replication).
</para>
<para>
@ -1728,7 +1728,7 @@ The commands accepted in walsender mode are:
<listitem>
<para>
Requests the server to send the current setting of a run-time parameter.
This is similar to the SQL command <xref linkend="sql-show">.
This is similar to the SQL command <xref linkend="sql-show"/>.
</para>
<variablelist>
@ -1737,7 +1737,7 @@ The commands accepted in walsender mode are:
<listitem>
<para>
The name of a run-time parameter. Available parameters are documented
in <xref linkend="runtime-config">.
in <xref linkend="runtime-config"/>.
</para>
</listitem>
</varlistentry>
@ -1792,7 +1792,7 @@ The commands accepted in walsender mode are:
<listitem>
<para>
Create a physical or logical replication
slot. See <xref linkend="streaming-replication-slots"> for more about
slot. See <xref linkend="streaming-replication-slots"/> for more about
replication slots.
</para>
<variablelist>
@ -1801,7 +1801,7 @@ The commands accepted in walsender mode are:
<listitem>
<para>
The name of the slot to create. Must be a valid replication slot
name (see <xref linkend="streaming-replication-slots-manipulation">).
name (see <xref linkend="streaming-replication-slots-manipulation"/>).
</para>
</listitem>
</varlistentry>
@ -1811,7 +1811,7 @@ The commands accepted in walsender mode are:
<listitem>
<para>
The name of the output plugin used for logical decoding
(see <xref linkend="logicaldecoding-output-plugin">).
(see <xref linkend="logicaldecoding-output-plugin"/>).
</para>
</listitem>
</varlistentry>
@ -2378,7 +2378,7 @@ The commands accepted in walsender mode are:
Sets the label of the backup. If none is specified, a backup label
of <literal>base backup</literal> will be used. The quoting rules
for the label are the same as a standard SQL string with
<xref linkend="guc-standard-conforming-strings"> turned on.
<xref linkend="guc-standard-conforming-strings"/> turned on.
</para>
</listitem>
</varlistentry>
@ -2642,7 +2642,7 @@ The commands accepted in walsender mode are:
<para>
The individual protocol messages are discussed in the following
subsections. Individual messages are described in
<xref linkend="protocol-logicalrep-message-formats">.
<xref linkend="protocol-logicalrep-message-formats"/>.
</para>
<para>
@ -4006,7 +4006,7 @@ CopyInResponse (B)
characters, etc).
1 indicates the overall copy format is binary (similar
to DataRow format).
See <xref linkend="sql-copy">
See <xref linkend="sql-copy"/>
for more information.
</para>
</listitem>
@ -4080,7 +4080,7 @@ CopyOutResponse (B)
is textual (rows separated by newlines, columns
separated by separator characters, etc). 1 indicates
the overall copy format is binary (similar to DataRow
format). See <xref linkend="sql-copy"> for more information.
format). See <xref linkend="sql-copy"/> for more information.
</para>
</listitem>
</varlistentry>
@ -4153,7 +4153,7 @@ CopyBothResponse (B)
is textual (rows separated by newlines, columns
separated by separator characters, etc). 1 indicates
the overall copy format is binary (similar to DataRow
format). See <xref linkend="sql-copy"> for more information.
format). See <xref linkend="sql-copy"/> for more information.
</para>
</listitem>
</varlistentry>
@ -4394,7 +4394,7 @@ ErrorResponse (B)
A code identifying the field type; if zero, this is
the message terminator and no string follows.
The presently defined field types are listed in
<xref linkend="protocol-error-fields">.
<xref linkend="protocol-error-fields"/>.
Since more field types might be added in future,
frontends should silently ignore fields of unrecognized
type.
@ -4886,7 +4886,7 @@ NoticeResponse (B)
A code identifying the field type; if zero, this is
the message terminator and no string follows.
The presently defined field types are listed in
<xref linkend="protocol-error-fields">.
<xref linkend="protocol-error-fields"/>.
Since more field types might be added in future,
frontends should silently ignore fields of unrecognized
type.
@ -5757,7 +5757,7 @@ StartupMessage (F)
<literal>true</literal>, <literal>false</literal>, or
<literal>database</literal>, and the default is
<literal>false</literal>. See
<xref linkend="protocol-replication"> for details.
<xref linkend="protocol-replication"/> for details.
</para>
</listitem>
</varlistentry>
@ -5919,7 +5919,7 @@ message.
<listitem>
<para>
Code: the SQLSTATE code for the error (see <xref
linkend="errcodes-appendix">). Not localizable. Always present.
linkend="errcodes-appendix"/>). Not localizable. Always present.
</para>
</listitem>
</varlistentry>
@ -6124,7 +6124,7 @@ message.
<para>
The fields for schema name, table name, column name, data type name, and
constraint name are supplied only for a limited number of error types;
see <xref linkend="errcodes-appendix">. Frontends should not assume that
see <xref linkend="errcodes-appendix"/>. Frontends should not assume that
the presence of any of these fields guarantees the presence of another
field. Core error sources observe the interrelationships noted above, but
user-defined functions may use these fields in other ways. In the same
@ -6149,7 +6149,7 @@ not line breaks.
This section describes the detailed format of each logical replication message.
These messages are returned either by the replication slot SQL interface or are
sent by a walsender. In case of a walsender they are encapsulated inside the replication
protocol WAL messages as described in <xref linkend="protocol-replication">
protocol WAL messages as described in <xref linkend="protocol-replication"/>
and generally obey same message flow as physical replication.
</para>

View File

@ -24,7 +24,7 @@
<para>
The process of retrieving or the command to retrieve data from a
database is called a <firstterm>query</firstterm>. In SQL the
<xref linkend="sql-select"> command is
<xref linkend="sql-select"/> command is
used to specify queries. The general syntax of the
<command>SELECT</command> command is
<synopsis>
@ -59,7 +59,7 @@ SELECT a, b + c FROM table1;
</programlisting>
(assuming that <literal>b</literal> and <literal>c</literal> are of a numerical
data type).
See <xref linkend="queries-select-lists"> for more details.
See <xref linkend="queries-select-lists"/> for more details.
</para>
<para>
@ -110,7 +110,7 @@ SELECT random();
<title>The <literal>FROM</literal> Clause</title>
<para>
The <xref linkend="sql-from" endterm="sql-from-title"> derives a
The <xref linkend="sql-from" endterm="sql-from-title"/> derives a
table from one or more other tables given in a comma-separated
table reference list.
<synopsis>
@ -589,7 +589,7 @@ SELECT * FROM my_table AS m WHERE my_table.a &gt; 5; -- wrong
SELECT * FROM people AS mother JOIN people AS child ON mother.id = child.mother_id;
</programlisting>
Additionally, an alias is required if the table reference is a
subquery (see <xref linkend="queries-subqueries">).
subquery (see <xref linkend="queries-subqueries"/>).
</para>
<para>
@ -640,7 +640,7 @@ SELECT a.* FROM (my_table AS a JOIN your_table AS b ON ...) AS c
<para>
Subqueries specifying a derived table must be enclosed in
parentheses and <emphasis>must</emphasis> be assigned a table
alias name (as in <xref linkend="queries-table-aliases">). For
alias name (as in <xref linkend="queries-table-aliases"/>). For
example:
<programlisting>
FROM (SELECT * FROM table1) AS alias_name
@ -662,7 +662,7 @@ FROM (VALUES ('anne', 'smith'), ('bob', 'jones'), ('joe', 'blow'))
</programlisting>
Again, a table alias is required. Assigning alias names to the columns
of the <command>VALUES</command> list is optional, but is good practice.
For more information see <xref linkend="queries-values">.
For more information see <xref linkend="queries-values"/>.
</para>
</sect3>
@ -713,7 +713,7 @@ ROWS FROM( <replaceable>function_call</replaceable> <optional>, ... </optional>
The special table function <literal>UNNEST</literal> may be called with
any number of array parameters, and it returns a corresponding number of
columns, as if <literal>UNNEST</literal>
(<xref linkend="functions-array">) had been called on each parameter
(<xref linkend="functions-array"/>) had been called on each parameter
separately and combined using the <literal>ROWS FROM</literal> construct.
</para>
@ -795,8 +795,8 @@ SELECT *
AS t1(proname name, prosrc text)
WHERE proname LIKE 'bytea%';
</programlisting>
The <xref linkend="contrib-dblink-function"> function
(part of the <xref linkend="dblink"> module) executes
The <xref linkend="contrib-dblink-function"/> function
(part of the <xref linkend="dblink"/> module) executes
a remote query. It is declared to return
<type>record</type> since it might be used for any kind of query.
The actual column set must be specified in the calling query so
@ -908,12 +908,12 @@ WHERE pname IS NULL;
<para>
The syntax of the <xref linkend="sql-where"
endterm="sql-where-title"> is
endterm="sql-where-title"/> is
<synopsis>
WHERE <replaceable>search_condition</replaceable>
</synopsis>
where <replaceable>search_condition</replaceable> is any value
expression (see <xref linkend="sql-expressions">) that
expression (see <xref linkend="sql-expressions"/>) that
returns a value of type <type>boolean</type>.
</para>
@ -1014,7 +1014,7 @@ SELECT <replaceable>select_list</replaceable>
</synopsis>
<para>
The <xref linkend="sql-groupby" endterm="sql-groupby-title"> is
The <xref linkend="sql-groupby" endterm="sql-groupby-title"/> is
used to group together those rows in a table that have the same
values in all the columns listed. The order in which the columns
are listed does not matter. The effect is to combine each set
@ -1066,7 +1066,7 @@ SELECT <replaceable>select_list</replaceable>
Here <literal>sum</literal> is an aggregate function that
computes a single value over the entire group. More information
about the available aggregate functions can be found in <xref
linkend="functions-aggregate">.
linkend="functions-aggregate"/>.
</para>
<tip>
@ -1074,7 +1074,7 @@ SELECT <replaceable>select_list</replaceable>
Grouping without aggregate expressions effectively calculates the
set of distinct values in a column. This can also be achieved
using the <literal>DISTINCT</literal> clause (see <xref
linkend="queries-distinct">).
linkend="queries-distinct"/>).
</para>
</tip>
@ -1236,7 +1236,7 @@ SELECT product_id, p.name, (sum(s.units) * (p.price - p.cost)) AS profit
References to the grouping columns or expressions are replaced
by null values in result rows for grouping sets in which those
columns do not appear. To distinguish which grouping a particular output
row resulted from, see <xref linkend="functions-grouping-table">.
row resulted from, see <xref linkend="functions-grouping-table"/>.
</para>
<para>
@ -1366,9 +1366,9 @@ GROUP BY GROUPING SETS (
<para>
If the query contains any window functions (see
<xref linkend="tutorial-window">,
<xref linkend="functions-window"> and
<xref linkend="syntax-window-functions">), these functions are evaluated
<xref linkend="tutorial-window"/>,
<xref linkend="functions-window"/> and
<xref linkend="syntax-window-functions"/>), these functions are evaluated
after any grouping, aggregation, and <literal>HAVING</literal> filtering is
performed. That is, if the query uses any aggregates, <literal>GROUP
BY</literal>, or <literal>HAVING</literal>, then the rows seen by the window functions
@ -1430,7 +1430,7 @@ GROUP BY GROUPING SETS (
The simplest kind of select list is <literal>*</literal> which
emits all columns that the table expression produces. Otherwise,
a select list is a comma-separated list of value expressions (as
defined in <xref linkend="sql-expressions">). For instance, it
defined in <xref linkend="sql-expressions"/>). For instance, it
could be a list of column names:
<programlisting>
SELECT a, b, c FROM ...
@ -1438,7 +1438,7 @@ SELECT a, b, c FROM ...
The columns names <literal>a</literal>, <literal>b</literal>, and <literal>c</literal>
are either the actual names of the columns of tables referenced
in the <literal>FROM</literal> clause, or the aliases given to them as
explained in <xref linkend="queries-table-aliases">. The name
explained in <xref linkend="queries-table-aliases"/>. The name
space available in the select list is the same as in the
<literal>WHERE</literal> clause, unless grouping is used, in which case
it is the same as in the <literal>HAVING</literal> clause.
@ -1455,7 +1455,7 @@ SELECT tbl1.a, tbl2.a, tbl1.b FROM ...
<programlisting>
SELECT tbl1.*, tbl2.a FROM ...
</programlisting>
See <xref linkend="rowtypes-usage"> for more about
See <xref linkend="rowtypes-usage"/> for more about
the <replaceable>table_name</replaceable><literal>.*</literal> notation.
</para>
@ -1499,7 +1499,7 @@ SELECT a AS value, b + c AS sum FROM ...
The <literal>AS</literal> keyword is optional, but only if the new column
name does not match any
<productname>PostgreSQL</productname> keyword (see <xref
linkend="sql-keywords-appendix">). To avoid an accidental match to
linkend="sql-keywords-appendix"/>). To avoid an accidental match to
a keyword, you can double-quote the column name. For example,
<literal>VALUE</literal> is a keyword, so this does not work:
<programlisting>
@ -1518,7 +1518,7 @@ SELECT a "value", b + c AS sum FROM ...
<para>
The naming of output columns here is different from that done in
the <literal>FROM</literal> clause (see <xref
linkend="queries-table-aliases">). It is possible
linkend="queries-table-aliases"/>). It is possible
to rename the same column twice, but the name assigned in
the select list is the one that will be passed on.
</para>
@ -1663,7 +1663,7 @@ SELECT DISTINCT ON (<replaceable>expression</replaceable> <optional>, <replaceab
queries, the two queries must be <quote>union compatible</quote>,
which means that they return the same number of columns and
the corresponding columns have compatible data types, as
described in <xref linkend="typeconv-union-case">.
described in <xref linkend="typeconv-union-case"/>.
</para>
</sect1>
@ -1861,7 +1861,7 @@ VALUES ( <replaceable class="parameter">expression</replaceable> [, ...] ) [, ..
of columns in the table), and corresponding entries in each list must
have compatible data types. The actual data type assigned to each column
of the result is determined using the same rules as for <literal>UNION</literal>
(see <xref linkend="typeconv-union-case">).
(see <xref linkend="typeconv-union-case"/>).
</para>
<para>
@ -1912,7 +1912,7 @@ SELECT <replaceable>select_list</replaceable> FROM <replaceable>table_expression
</para>
<para>
For more information see <xref linkend="sql-values">.
For more information see <xref linkend="sql-values"/>.
</para>
</sect1>
@ -2261,7 +2261,7 @@ SELECT * FROM moved_rows;
<para>
Data-modifying statements in <literal>WITH</literal> usually have
<literal>RETURNING</literal> clauses (see <xref linkend="dml-returning">),
<literal>RETURNING</literal> clauses (see <xref linkend="dml-returning"/>),
as shown in the example above.
It is the output of the <literal>RETURNING</literal> clause, <emphasis>not</emphasis> the
target table of the data-modifying statement, that forms the temporary
@ -2317,7 +2317,7 @@ DELETE FROM parts
each other and with the main query. Therefore, when using data-modifying
statements in <literal>WITH</literal>, the order in which the specified updates
actually happen is unpredictable. All the statements are executed with
the same <firstterm>snapshot</firstterm> (see <xref linkend="mvcc">), so they
the same <firstterm>snapshot</firstterm> (see <xref linkend="mvcc"/>), so they
cannot <quote>see</quote> one another's effects on the target tables. This
alleviates the effects of the unpredictability of the actual order of row
updates, and means that <literal>RETURNING</literal> data is the only way to

View File

@ -12,7 +12,7 @@
tutorial is only intended to give you an introduction and is in no
way a complete tutorial on <acronym>SQL</acronym>. Numerous books
have been written on <acronym>SQL</acronym>, including <xref
linkend="melt93"> and <xref linkend="date97">.
linkend="melt93"/> and <xref linkend="date97"/>.
You should be aware that some <productname>PostgreSQL</productname>
language features are extensions to the standard.
</para>
@ -267,7 +267,7 @@ COPY weather FROM '/home/user/weather.txt';
where the file name for the source file must be available on the
machine running the backend process, not the client, since the backend process
reads the file directly. You can read more about the
<command>COPY</command> command in <xref linkend="sql-copy">.
<command>COPY</command> command in <xref linkend="sql-copy"/>.
</para>
</sect1>
@ -754,7 +754,7 @@ SELECT city, max(temp_lo)
<programlisting>
SELECT city, max(temp_lo)
FROM weather
WHERE city LIKE 'S%' -- <co id="co.tutorial-agg-like">
WHERE city LIKE 'S%' -- <co id="co.tutorial-agg-like"/>
GROUP BY city
HAVING max(temp_lo) &lt; 40;
</programlisting>
@ -762,7 +762,7 @@ SELECT city, max(temp_lo)
<callout arearefs="co.tutorial-agg-like">
<para>
The <literal>LIKE</literal> operator does pattern matching and
is explained in <xref linkend="functions-matching">.
is explained in <xref linkend="functions-matching"/>.
</para>
</callout>
</calloutlist>

View File

@ -65,7 +65,7 @@
</listitem>
</itemizedlist>
In addition, you can define your own range types;
see <xref linkend="sql-createtype"> for more information.
see <xref linkend="sql-createtype"/> for more information.
</para>
</sect2>
@ -94,8 +94,8 @@ SELECT int4range(10, 20) * int4range(15, 25);
SELECT isempty(numrange(1, 5));
</programlisting>
See <xref linkend="range-operators-table">
and <xref linkend="range-functions-table"> for complete lists of
See <xref linkend="range-operators-table"/>
and <xref linkend="range-functions-table"/> for complete lists of
operators and functions on range types.
</para>
</sect2>
@ -117,7 +117,7 @@ SELECT isempty(numrange(1, 5));
represented by <quote><literal>(</literal></quote>. Likewise, an inclusive upper bound is represented by
<quote><literal>]</literal></quote>, while an exclusive upper bound is
represented by <quote><literal>)</literal></quote>.
(See <xref linkend="rangetypes-io"> for more details.)
(See <xref linkend="rangetypes-io"/> for more details.)
</para>
<para>
@ -214,7 +214,7 @@ empty
<note>
<para>
These rules are very similar to those for writing field values in
composite-type literals. See <xref linkend="rowtypes-io-syntax"> for
composite-type literals. See <xref linkend="rowtypes-io-syntax"/> for
additional commentary.
</para>
</note>
@ -406,7 +406,7 @@ SELECT '[11:10, 23:00]'::timerange;
</programlisting>
<para>
See <xref linkend="sql-createtype"> for more information about creating
See <xref linkend="sql-createtype"/> for more information about creating
range types.
</para>
</sect2>
@ -435,7 +435,7 @@ CREATE INDEX reservation_idx ON reservation USING GIST (during);
<literal>-|-</literal>,
<literal>&amp;&lt;</literal>, and
<literal>&amp;&gt;</literal>
(see <xref linkend="range-operators-table"> for more information).
(see <xref linkend="range-operators-table"/> for more information).
</para>
<para>

View File

@ -58,7 +58,7 @@
to truncate the archive to just the minimum required to support
restarting from the current restore. <literal>%r</literal> is typically only
used by warm-standby configurations
(see <xref linkend="warm-standby">).
(see <xref linkend="warm-standby"/>).
Write <literal>%%</literal> to embed an actual <literal>%</literal> character.
</para>
@ -99,7 +99,7 @@ restore_command = 'copy "C:\\server\\archivedir\\%f" "%p"' # Windows
may be safely removed.
This information can be used to truncate the archive to just the
minimum required to support restart from the current restore.
The <xref linkend="pgarchivecleanup"> module
The <xref linkend="pgarchivecleanup"/> module
is often used in <varname>archive_cleanup_command</varname> for
single-standby configurations, for example:
<programlisting>archive_cleanup_command = 'pg_archivecleanup /mnt/server/archivedir %r'</programlisting>
@ -107,7 +107,7 @@ restore_command = 'copy "C:\\server\\archivedir\\%f" "%p"' # Windows
same archive directory, you will need to ensure that you do not delete
WAL files until they are no longer needed by any of the servers.
<varname>archive_cleanup_command</varname> would typically be used in a
warm-standby configuration (see <xref linkend="warm-standby">).
warm-standby configuration (see <xref linkend="warm-standby"/>).
Write <literal>%%</literal> to embed an actual <literal>%</literal> character in the
command.
</para>
@ -133,7 +133,7 @@ restore_command = 'copy "C:\\server\\archivedir\\%f" "%p"' # Windows
<varname>recovery_end_command</varname> is to provide a mechanism for cleanup
following replication or recovery.
Any <literal>%r</literal> is replaced by the name of the file containing the
last valid restart point, like in <xref linkend="archive-cleanup-command">.
last valid restart point, like in <xref linkend="archive-cleanup-command"/>.
</para>
<para>
If the command returns a nonzero exit status then a warning log
@ -209,7 +209,7 @@ restore_command = 'copy "C:\\server\\archivedir\\%f" "%p"' # Windows
This parameter specifies the time stamp up to which recovery
will proceed.
The precise stopping point is also influenced by
<xref linkend="recovery-target-inclusive">.
<xref linkend="recovery-target-inclusive"/>.
</para>
</listitem>
</varlistentry>
@ -229,7 +229,7 @@ restore_command = 'copy "C:\\server\\archivedir\\%f" "%p"' # Windows
The transactions that will be recovered are those that committed
before (and optionally including) the specified one.
The precise stopping point is also influenced by
<xref linkend="recovery-target-inclusive">.
<xref linkend="recovery-target-inclusive"/>.
</para>
</listitem>
</varlistentry>
@ -244,7 +244,7 @@ restore_command = 'copy "C:\\server\\archivedir\\%f" "%p"' # Windows
<para>
This parameter specifies the LSN of the write-ahead log location up
to which recovery will proceed. The precise stopping point is also
influenced by <xref linkend="recovery-target-inclusive">. This
influenced by <xref linkend="recovery-target-inclusive"/>. This
parameter is parsed using the system data type
<link linkend="datatype-pg-lsn"><type>pg_lsn</type></link>.
</para>
@ -270,9 +270,9 @@ restore_command = 'copy "C:\\server\\archivedir\\%f" "%p"' # Windows
Specifies whether to stop just after the specified recovery target
(<literal>true</literal>), or just before the recovery target
(<literal>false</literal>).
Applies when <xref linkend="recovery-target-lsn">,
<xref linkend="recovery-target-time">, or
<xref linkend="recovery-target-xid"> is specified.
Applies when <xref linkend="recovery-target-lsn"/>,
<xref linkend="recovery-target-time"/>, or
<xref linkend="recovery-target-xid"/> is specified.
This setting controls whether transactions
having exactly the target WAL location (LSN), commit time, or transaction ID, respectively, will
be included in the recovery. Default is <literal>true</literal>.
@ -296,7 +296,7 @@ restore_command = 'copy "C:\\server\\archivedir\\%f" "%p"' # Windows
a standby server. Other than that you only need to set this parameter
in complex re-recovery situations, where you need to return to
a state that itself was reached after a point-in-time recovery.
See <xref linkend="backup-timelines"> for discussion.
See <xref linkend="backup-timelines"/> for discussion.
</para>
</listitem>
</varlistentry>
@ -323,7 +323,7 @@ restore_command = 'copy "C:\\server\\archivedir\\%f" "%p"' # Windows
is the most desirable point for recovery.
The paused state can be resumed by
using <function>pg_wal_replay_resume()</function> (see
<xref linkend="functions-recovery-control-table">), which then
<xref linkend="functions-recovery-control-table"/>), which then
causes recovery to end. If this recovery target is not the
desired stopping point, then shut down the server, change the
recovery target settings to a later target and restart to
@ -344,7 +344,7 @@ restore_command = 'copy "C:\\server\\archivedir\\%f" "%p"' # Windows
</para>
<para>
This setting has no effect if no recovery target is set.
If <xref linkend="guc-hot-standby"> is not enabled, a setting of
If <xref linkend="guc-hot-standby"/> is not enabled, a setting of
<literal>pause</literal> will act the same as <literal>shutdown</literal>.
</para>
</listitem>
@ -386,9 +386,9 @@ restore_command = 'copy "C:\\server\\archivedir\\%f" "%p"' # Windows
<para>
Specifies a connection string to be used for the standby server
to connect with the primary. This string is in the format
described in <xref linkend="libpq-connstring">. If any option is
described in <xref linkend="libpq-connstring"/>. If any option is
unspecified in this string, then the corresponding environment
variable (see <xref linkend="libpq-envars">) is checked. If the
variable (see <xref linkend="libpq-envars"/>) is checked. If the
environment variable is not set either, then
defaults are used.
</para>
@ -398,7 +398,7 @@ restore_command = 'copy "C:\\server\\archivedir\\%f" "%p"' # Windows
the same as the standby server's default.
Also specify a user name corresponding to a suitably-privileged role
on the primary (see
<xref linkend="streaming-replication-authentication">).
<xref linkend="streaming-replication-authentication"/>).
A password needs to be provided too, if the primary demands password
authentication. It can be provided in the
<varname>primary_conninfo</varname> string, or in a separate
@ -423,7 +423,7 @@ restore_command = 'copy "C:\\server\\archivedir\\%f" "%p"' # Windows
Optionally specifies an existing replication slot to be used when
connecting to the primary via streaming replication to control
resource removal on the upstream node
(see <xref linkend="streaming-replication-slots">).
(see <xref linkend="streaming-replication-slots"/>).
This setting has no effect if <varname>primary_conninfo</varname> is not
set.
</para>

View File

@ -33,7 +33,7 @@ ABORT [ WORK | TRANSACTION ]
all the updates made by the transaction to be discarded.
This command is identical
in behavior to the standard <acronym>SQL</acronym> command
<xref linkend="sql-rollback">,
<xref linkend="sql-rollback"/>,
and is present only for historical reasons.
</para>
</refsect1>
@ -58,7 +58,7 @@ ABORT [ WORK | TRANSACTION ]
<title>Notes</title>
<para>
Use <xref linkend="sql-commit"> to
Use <xref linkend="sql-commit"/> to
successfully terminate a transaction.
</para>
@ -92,9 +92,9 @@ ABORT;
<title>See Also</title>
<simplelist type="inline">
<member><xref linkend="sql-begin"></member>
<member><xref linkend="sql-commit"></member>
<member><xref linkend="sql-rollback"></member>
<member><xref linkend="sql-begin"/></member>
<member><xref linkend="sql-commit"/></member>
<member><xref linkend="sql-rollback"/></member>
</simplelist>
</refsect1>
</refentry>

View File

@ -142,7 +142,7 @@ ALTER AGGREGATE <replaceable>name</replaceable> ( <replaceable>aggregate_signatu
The recommended syntax for referencing an ordered-set aggregate
is to write <literal>ORDER BY</literal> between the direct and aggregated
argument specifications, in the same style as in
<xref linkend="sql-createaggregate">. However, it will also work to
<xref linkend="sql-createaggregate"/>. However, it will also work to
omit <literal>ORDER BY</literal> and just run the direct and aggregated
argument specifications into a single list. In this abbreviated form,
if <literal>VARIADIC "any"</literal> was used in both the direct and
@ -195,8 +195,8 @@ ALTER AGGREGATE mypercentile(float8, integer) SET SCHEMA myschema;
<title>See Also</title>
<simplelist type="inline">
<member><xref linkend="sql-createaggregate"></member>
<member><xref linkend="sql-dropaggregate"></member>
<member><xref linkend="sql-createaggregate"/></member>
<member><xref linkend="sql-dropaggregate"/></member>
</simplelist>
</refsect1>
</refentry>

View File

@ -94,7 +94,7 @@ ALTER COLLATION <replaceable>name</replaceable> SET SCHEMA <replaceable>new_sche
<para>
Update the collation's version.
See <xref linkend="sql-altercollation-notes"
endterm="sql-altercollation-notes-title"> below.
endterm="sql-altercollation-notes-title"/> below.
</para>
</listitem>
</varlistentry>
@ -176,8 +176,8 @@ ALTER COLLATION "en_US" OWNER TO joe;
<title>See Also</title>
<simplelist type="inline">
<member><xref linkend="sql-createcollation"></member>
<member><xref linkend="sql-dropcollation"></member>
<member><xref linkend="sql-createcollation"/></member>
<member><xref linkend="sql-dropcollation"/></member>
</simplelist>
</refsect1>
</refentry>

View File

@ -120,8 +120,8 @@ ALTER CONVERSION iso_8859_1_to_utf8 OWNER TO joe;
<title>See Also</title>
<simplelist type="inline">
<member><xref linkend="sql-createconversion"></member>
<member><xref linkend="sql-dropconversion"></member>
<member><xref linkend="sql-createconversion"/></member>
<member><xref linkend="sql-dropconversion"/></member>
</simplelist>
</refsect1>
</refentry>

View File

@ -188,7 +188,7 @@ ALTER DATABASE <replaceable class="parameter">name</replaceable> RESET ALL
</para>
<para>
See <xref linkend="sql-set"> and <xref linkend="runtime-config">
See <xref linkend="sql-set"/> and <xref linkend="runtime-config"/>
for more information about allowed parameter names
and values.
</para>
@ -203,7 +203,7 @@ ALTER DATABASE <replaceable class="parameter">name</replaceable> RESET ALL
<para>
It is also possible to tie a session default to a specific role
rather than to a database; see
<xref linkend="sql-alterrole">.
<xref linkend="sql-alterrole"/>.
Role-specific settings override database-specific
ones if there is a conflict.
</para>
@ -234,10 +234,10 @@ ALTER DATABASE test SET enable_indexscan TO off;
<title>See Also</title>
<simplelist type="inline">
<member><xref linkend="sql-createdatabase"></member>
<member><xref linkend="sql-dropdatabase"></member>
<member><xref linkend="sql-set"></member>
<member><xref linkend="sql-createtablespace"></member>
<member><xref linkend="sql-createdatabase"/></member>
<member><xref linkend="sql-dropdatabase"/></member>
<member><xref linkend="sql-set"/></member>
<member><xref linkend="sql-createtablespace"/></member>
</simplelist>
</refsect1>
</refentry>

View File

@ -106,7 +106,7 @@ REVOKE [ GRANT OPTION FOR ]
</para>
<para>
As explained under <xref linkend="sql-grant">,
As explained under <xref linkend="sql-grant"/>,
the default privileges for any object type normally grant all grantable
permissions to the object owner, and may grant some privileges to
<literal>PUBLIC</literal> as well. However, this behavior can be changed by
@ -150,8 +150,8 @@ REVOKE [ GRANT OPTION FOR ]
This parameter, and all the other parameters in
<replaceable class="parameter">abbreviated_grant_or_revoke</replaceable>,
act as described under
<xref linkend="sql-grant"> or
<xref linkend="sql-revoke">,
<xref linkend="sql-grant"/> or
<xref linkend="sql-revoke"/>,
except that one is setting permissions for a whole class of objects
rather than specific named objects.
</para>
@ -165,11 +165,11 @@ REVOKE [ GRANT OPTION FOR ]
<title>Notes</title>
<para>
Use <xref linkend="app-psql">'s <command>\ddp</command> command
Use <xref linkend="app-psql"/>'s <command>\ddp</command> command
to obtain information about existing assignments of default privileges.
The meaning of the privilege values is the same as explained for
<command>\dp</command> under
<xref linkend="sql-grant">.
<xref linkend="sql-grant"/>.
</para>
<para>
@ -226,8 +226,8 @@ ALTER DEFAULT PRIVILEGES FOR ROLE admin REVOKE EXECUTE ON FUNCTIONS FROM PUBLIC;
<title>See Also</title>
<simplelist type="inline">
<member><xref linkend="sql-grant"></member>
<member><xref linkend="sql-revoke"></member>
<member><xref linkend="sql-grant"/></member>
<member><xref linkend="sql-revoke"/></member>
</simplelist>
</refsect1>

View File

@ -80,7 +80,7 @@ ALTER DOMAIN <replaceable class="parameter">name</replaceable>
<listitem>
<para>
This form adds a new constraint to a domain using the same syntax as
<xref linkend="sql-createdomain">.
<xref linkend="sql-createdomain"/>.
When a new constraint is added to a domain, all columns using that
domain will be checked against the newly added constraint. These
checks can be suppressed by adding the new constraint using the
@ -214,7 +214,7 @@ ALTER DOMAIN <replaceable class="parameter">name</replaceable>
<para>
Automatically drop objects that depend on the constraint,
and in turn all objects that depend on those objects
(see <xref linkend="ddl-depend">).
(see <xref linkend="ddl-depend"/>).
</para>
</listitem>
</varlistentry>
@ -342,8 +342,8 @@ ALTER DOMAIN zipcode SET SCHEMA customers;
<title>See Also</title>
<simplelist type="inline">
<member><xref linkend="sql-createdomain"></member>
<member><xref linkend="sql-dropdomain"></member>
<member><xref linkend="sql-createdomain"/></member>
<member><xref linkend="sql-dropdomain"/></member>
</simplelist>
</refsect1>

Some files were not shown because too many files have changed in this diff Show More