You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

18453 lines
558 KiB

/*-------------------------------------------------------------------------
*
* pg_dump.c
* pg_dump is a utility for dumping out a postgres database
* into a script file.
*
* Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* pg_dump will read the system catalogs in a database and dump out a
* script that reproduces the schema in terms of SQL that is understood
* by PostgreSQL
*
* Note that pg_dump runs in a transaction-snapshot mode transaction,
* so it sees a consistent snapshot of the database including system
* catalogs. However, it relies in part on various specialized backend
* functions like pg_get_indexdef(), and those things tend to look at
* the currently committed state. So it is possible to get 'cache
* lookup failed' error if someone performs DDL changes while a dump is
* happening. The window for this sort of thing is from the acquisition
* of the transaction snapshot to getSchemaData() (when pg_dump acquires
* AccessShareLock on every table it intends to dump). It isn't very large,
* but it can happen.
*
* http://archives.postgresql.org/pgsql-bugs/2010-02/msg00187.php
*
* IDENTIFICATION
* src/bin/pg_dump/pg_dump.c
*
*-------------------------------------------------------------------------
*/
#include "postgres_fe.h"
#include <unistd.h>
#include <ctype.h>
#include <limits.h>
#ifdef HAVE_TERMIOS_H
#include <termios.h>
#endif
#include "access/attnum.h"
#include "access/sysattr.h"
#include "access/transam.h"
#include "catalog/pg_aggregate_d.h"
#include "catalog/pg_am_d.h"
#include "catalog/pg_attribute_d.h"
#include "catalog/pg_cast_d.h"
#include "catalog/pg_class_d.h"
#include "catalog/pg_default_acl_d.h"
#include "catalog/pg_largeobject_d.h"
#include "catalog/pg_largeobject_metadata_d.h"
#include "catalog/pg_proc_d.h"
#include "catalog/pg_trigger_d.h"
#include "catalog/pg_type_d.h"
#include "dumputils.h"
#include "fe_utils/connect.h"
#include "fe_utils/string_utils.h"
#include "getopt_long.h"
#include "libpq/libpq-fs.h"
#include "parallel.h"
#include "pg_backup_db.h"
#include "pg_backup_utils.h"
#include "pg_dump.h"
#include "storage/block.h"
typedef struct
{
const char *descr; /* comment for an object */
Oid classoid; /* object class (catalog OID) */
Oid objoid; /* object OID */
int objsubid; /* subobject (table column #) */
} CommentItem;
typedef struct
{
const char *provider; /* label provider of this security label */
const char *label; /* security label for an object */
Oid classoid; /* object class (catalog OID) */
Oid objoid; /* object OID */
int objsubid; /* subobject (table column #) */
} SecLabelItem;
typedef enum OidOptions
{
zeroAsOpaque = 1,
zeroAsAny = 2,
zeroAsStar = 4,
zeroAsNone = 8
} OidOptions;
/* global decls */
static bool dosync = true; /* Issue fsync() to make dump durable on disk. */
/* subquery used to convert user ID (eg, datdba) to user name */
static const char *username_subquery;
/*
* For 8.0 and earlier servers, pulled from pg_database, for 8.1+ we use
* FirstNormalObjectId - 1.
*/
static Oid g_last_builtin_oid; /* value of the last builtin oid */
/* The specified names/patterns should to match at least one entity */
static int strict_names = 0;
/*
* Object inclusion/exclusion lists
*
* The string lists record the patterns given by command-line switches,
* which we then convert to lists of OIDs of matching objects.
*/
static SimpleStringList schema_include_patterns = {NULL, NULL};
static SimpleOidList schema_include_oids = {NULL, NULL};
static SimpleStringList schema_exclude_patterns = {NULL, NULL};
static SimpleOidList schema_exclude_oids = {NULL, NULL};
static SimpleStringList table_include_patterns = {NULL, NULL};
static SimpleOidList table_include_oids = {NULL, NULL};
static SimpleStringList table_exclude_patterns = {NULL, NULL};
static SimpleOidList table_exclude_oids = {NULL, NULL};
static SimpleStringList tabledata_exclude_patterns = {NULL, NULL};
static SimpleOidList tabledata_exclude_oids = {NULL, NULL};
char g_opaque_type[10]; /* name for the opaque type */
/* placeholders for the delimiters for comments */
char g_comment_start[10];
char g_comment_end[10];
static const CatalogId nilCatalogId = {0, 0};
/* override for standard extra_float_digits setting */
static bool have_extra_float_digits = false;
static int extra_float_digits;
/*
* The default number of rows per INSERT when
* --inserts is specified without --rows-per-insert
*/
#define DUMP_DEFAULT_ROWS_PER_INSERT 1
/*
* Macro for producing quoted, schema-qualified name of a dumpable object.
*/
#define fmtQualifiedDumpable(obj) \
fmtQualifiedId((obj)->dobj.namespace->dobj.name, \
(obj)->dobj.name)
static void help(const char *progname);
static void setup_connection(Archive *AH,
const char *dumpencoding, const char *dumpsnapshot,
char *use_role);
static ArchiveFormat parseArchiveFormat(const char *format, ArchiveMode *mode);
static void expand_schema_name_patterns(Archive *fout,
SimpleStringList *patterns,
SimpleOidList *oids,
bool strict_names);
static void expand_table_name_patterns(Archive *fout,
SimpleStringList *patterns,
SimpleOidList *oids,
bool strict_names);
static NamespaceInfo *findNamespace(Archive *fout, Oid nsoid);
static void dumpTableData(Archive *fout, TableDataInfo *tdinfo);
static void refreshMatViewData(Archive *fout, TableDataInfo *tdinfo);
static void guessConstraintInheritance(TableInfo *tblinfo, int numTables);
static void dumpComment(Archive *fout, const char *type, const char *name,
const char *namespace, const char *owner,
CatalogId catalogId, int subid, DumpId dumpId);
static int findComments(Archive *fout, Oid classoid, Oid objoid,
CommentItem **items);
static int collectComments(Archive *fout, CommentItem **items);
static void dumpSecLabel(Archive *fout, const char *type, const char *name,
const char *namespace, const char *owner,
CatalogId catalogId, int subid, DumpId dumpId);
static int findSecLabels(Archive *fout, Oid classoid, Oid objoid,
SecLabelItem **items);
static int collectSecLabels(Archive *fout, SecLabelItem **items);
static void dumpDumpableObject(Archive *fout, DumpableObject *dobj);
static void dumpNamespace(Archive *fout, NamespaceInfo *nspinfo);
static void dumpExtension(Archive *fout, ExtensionInfo *extinfo);
static void dumpType(Archive *fout, TypeInfo *tyinfo);
static void dumpBaseType(Archive *fout, TypeInfo *tyinfo);
static void dumpEnumType(Archive *fout, TypeInfo *tyinfo);
static void dumpRangeType(Archive *fout, TypeInfo *tyinfo);
static void dumpUndefinedType(Archive *fout, TypeInfo *tyinfo);
static void dumpDomain(Archive *fout, TypeInfo *tyinfo);
static void dumpCompositeType(Archive *fout, TypeInfo *tyinfo);
static void dumpCompositeTypeColComments(Archive *fout, TypeInfo *tyinfo);
static void dumpShellType(Archive *fout, ShellTypeInfo *stinfo);
static void dumpProcLang(Archive *fout, ProcLangInfo *plang);
static void dumpFunc(Archive *fout, FuncInfo *finfo);
static void dumpCast(Archive *fout, CastInfo *cast);
static void dumpTransform(Archive *fout, TransformInfo *transform);
static void dumpOpr(Archive *fout, OprInfo *oprinfo);
static void dumpAccessMethod(Archive *fout, AccessMethodInfo *oprinfo);
static void dumpOpclass(Archive *fout, OpclassInfo *opcinfo);
static void dumpOpfamily(Archive *fout, OpfamilyInfo *opfinfo);
static void dumpCollation(Archive *fout, CollInfo *collinfo);
static void dumpConversion(Archive *fout, ConvInfo *convinfo);
static void dumpRule(Archive *fout, RuleInfo *rinfo);
static void dumpAgg(Archive *fout, AggInfo *agginfo);
static void dumpTrigger(Archive *fout, TriggerInfo *tginfo);
static void dumpEventTrigger(Archive *fout, EventTriggerInfo *evtinfo);
static void dumpTable(Archive *fout, TableInfo *tbinfo);
static void dumpTableSchema(Archive *fout, TableInfo *tbinfo);
static void dumpAttrDef(Archive *fout, AttrDefInfo *adinfo);
static void dumpSequence(Archive *fout, TableInfo *tbinfo);
static void dumpSequenceData(Archive *fout, TableDataInfo *tdinfo);
static void dumpIndex(Archive *fout, IndxInfo *indxinfo);
static void dumpIndexAttach(Archive *fout, IndexAttachInfo *attachinfo);
static void dumpStatisticsExt(Archive *fout, StatsExtInfo *statsextinfo);
static void dumpConstraint(Archive *fout, ConstraintInfo *coninfo);
static void dumpTableConstraintComment(Archive *fout, ConstraintInfo *coninfo);
static void dumpTSParser(Archive *fout, TSParserInfo *prsinfo);
static void dumpTSDictionary(Archive *fout, TSDictInfo *dictinfo);
static void dumpTSTemplate(Archive *fout, TSTemplateInfo *tmplinfo);
static void dumpTSConfig(Archive *fout, TSConfigInfo *cfginfo);
static void dumpForeignDataWrapper(Archive *fout, FdwInfo *fdwinfo);
static void dumpForeignServer(Archive *fout, ForeignServerInfo *srvinfo);
static void dumpUserMappings(Archive *fout,
const char *servername, const char *namespace,
const char *owner, CatalogId catalogId, DumpId dumpId);
static void dumpDefaultACL(Archive *fout, DefaultACLInfo *daclinfo);
static void dumpACL(Archive *fout, CatalogId objCatId, DumpId objDumpId,
const char *type, const char *name, const char *subname,
const char *nspname, const char *owner,
const char *acls, const char *racls,
const char *initacls, const char *initracls);
static void getDependencies(Archive *fout);
static void BuildArchiveDependencies(Archive *fout);
static void findDumpableDependencies(ArchiveHandle *AH, DumpableObject *dobj,
DumpId **dependencies, int *nDeps, int *allocDeps);
static DumpableObject *createBoundaryObjects(void);
static void addBoundaryDependencies(DumpableObject **dobjs, int numObjs,
DumpableObject *boundaryObjs);
static void getDomainConstraints(Archive *fout, TypeInfo *tyinfo);
static void getTableData(DumpOptions *dopt, TableInfo *tblinfo, int numTables, char relkind);
static void makeTableDataInfo(DumpOptions *dopt, TableInfo *tbinfo);
static void buildMatViewRefreshDependencies(Archive *fout);
static void getTableDataFKConstraints(void);
static char *format_function_arguments(FuncInfo *finfo, char *funcargs,
bool is_agg);
static char *format_function_arguments_old(Archive *fout,
FuncInfo *finfo, int nallargs,
char **allargtypes,
char **argmodes,
char **argnames);
static char *format_function_signature(Archive *fout,
FuncInfo *finfo, bool honor_quotes);
static char *convertRegProcReference(Archive *fout,
const char *proc);
static char *getFormattedOperatorName(Archive *fout, const char *oproid);
static char *convertTSFunction(Archive *fout, Oid funcOid);
static Oid findLastBuiltinOid_V71(Archive *fout);
static char *getFormattedTypeName(Archive *fout, Oid oid, OidOptions opts);
static void getBlobs(Archive *fout);
static void dumpBlob(Archive *fout, BlobInfo *binfo);
static int dumpBlobs(Archive *fout, void *arg);
static void dumpPolicy(Archive *fout, PolicyInfo *polinfo);
static void dumpPublication(Archive *fout, PublicationInfo *pubinfo);
static void dumpPublicationTable(Archive *fout, PublicationRelInfo *pubrinfo);
static void dumpSubscription(Archive *fout, SubscriptionInfo *subinfo);
static void dumpDatabase(Archive *AH);
static void dumpDatabaseConfig(Archive *AH, PQExpBuffer outbuf,
const char *dbname, Oid dboid);
static void dumpEncoding(Archive *AH);
static void dumpStdStrings(Archive *AH);
static void dumpSearchPath(Archive *AH);
static void binary_upgrade_set_type_oids_by_type_oid(Archive *fout,
PQExpBuffer upgrade_buffer,
Oid pg_type_oid,
bool force_array_type);
static bool binary_upgrade_set_type_oids_by_rel_oid(Archive *fout,
PQExpBuffer upgrade_buffer, Oid pg_rel_oid);
static void binary_upgrade_set_pg_class_oids(Archive *fout,
PQExpBuffer upgrade_buffer,
Oid pg_class_oid, bool is_index);
static void binary_upgrade_extension_member(PQExpBuffer upgrade_buffer,
DumpableObject *dobj,
const char *objtype,
const char *objname,
const char *objnamespace);
static const char *getAttrName(int attrnum, TableInfo *tblInfo);
static const char *fmtCopyColumnList(const TableInfo *ti, PQExpBuffer buffer);
static bool nonemptyReloptions(const char *reloptions);
static void appendReloptionsArrayAH(PQExpBuffer buffer, const char *reloptions,
const char *prefix, Archive *fout);
static char *get_synchronized_snapshot(Archive *fout);
static void setupDumpWorker(Archive *AHX);
static TableInfo *getRootTableInfo(TableInfo *tbinfo);
int
main(int argc, char **argv)
{
int c;
const char *filename = NULL;
const char *format = "p";
TableInfo *tblinfo;
int numTables;
DumpableObject **dobjs;
int numObjs;
DumpableObject *boundaryObjs;
int i;
int optindex;
char *endptr;
RestoreOptions *ropt;
Archive *fout; /* the script file */
bool g_verbose = false;
const char *dumpencoding = NULL;
const char *dumpsnapshot = NULL;
char *use_role = NULL;
long rowsPerInsert;
int numWorkers = 1;
trivalue prompt_password = TRI_DEFAULT;
int compressLevel = -1;
int plainText = 0;
ArchiveFormat archiveFormat = archUnknown;
ArchiveMode archiveMode;
static DumpOptions dopt;
static struct option long_options[] = {
{"data-only", no_argument, NULL, 'a'},
{"blobs", no_argument, NULL, 'b'},
{"no-blobs", no_argument, NULL, 'B'},
{"clean", no_argument, NULL, 'c'},
{"create", no_argument, NULL, 'C'},
{"dbname", required_argument, NULL, 'd'},
{"file", required_argument, NULL, 'f'},
{"format", required_argument, NULL, 'F'},
{"host", required_argument, NULL, 'h'},
{"jobs", 1, NULL, 'j'},
{"no-reconnect", no_argument, NULL, 'R'},
{"no-owner", no_argument, NULL, 'O'},
{"port", required_argument, NULL, 'p'},
{"schema", required_argument, NULL, 'n'},
{"exclude-schema", required_argument, NULL, 'N'},
{"schema-only", no_argument, NULL, 's'},
{"superuser", required_argument, NULL, 'S'},
{"table", required_argument, NULL, 't'},
{"exclude-table", required_argument, NULL, 'T'},
{"no-password", no_argument, NULL, 'w'},
{"password", no_argument, NULL, 'W'},
{"username", required_argument, NULL, 'U'},
{"verbose", no_argument, NULL, 'v'},
{"no-privileges", no_argument, NULL, 'x'},
{"no-acl", no_argument, NULL, 'x'},
{"compress", required_argument, NULL, 'Z'},
{"encoding", required_argument, NULL, 'E'},
{"help", no_argument, NULL, '?'},
{"version", no_argument, NULL, 'V'},
/*
* the following options don't have an equivalent short option letter
*/
{"attribute-inserts", no_argument, &dopt.column_inserts, 1},
{"binary-upgrade", no_argument, &dopt.binary_upgrade, 1},
{"column-inserts", no_argument, &dopt.column_inserts, 1},
{"disable-dollar-quoting", no_argument, &dopt.disable_dollar_quoting, 1},
{"disable-triggers", no_argument, &dopt.disable_triggers, 1},
{"enable-row-security", no_argument, &dopt.enable_row_security, 1},
{"exclude-table-data", required_argument, NULL, 4},
{"extra-float-digits", required_argument, NULL, 8},
{"if-exists", no_argument, &dopt.if_exists, 1},
{"inserts", no_argument, NULL, 9},
{"lock-wait-timeout", required_argument, NULL, 2},
{"no-tablespaces", no_argument, &dopt.outputNoTablespaces, 1},
{"quote-all-identifiers", no_argument, &quote_all_identifiers, 1},
{"load-via-partition-root", no_argument, &dopt.load_via_partition_root, 1},
{"role", required_argument, NULL, 3},
{"section", required_argument, NULL, 5},
{"serializable-deferrable", no_argument, &dopt.serializable_deferrable, 1},
{"snapshot", required_argument, NULL, 6},
{"strict-names", no_argument, &strict_names, 1},
{"use-set-session-authorization", no_argument, &dopt.use_setsessauth, 1},
{"no-comments", no_argument, &dopt.no_comments, 1},
{"no-publications", no_argument, &dopt.no_publications, 1},
{"no-security-labels", no_argument, &dopt.no_security_labels, 1},
{"no-synchronized-snapshots", no_argument, &dopt.no_synchronized_snapshots, 1},
{"no-unlogged-table-data", no_argument, &dopt.no_unlogged_table_data, 1},
{"no-subscriptions", no_argument, &dopt.no_subscriptions, 1},
{"no-sync", no_argument, NULL, 7},
{"on-conflict-do-nothing", no_argument, &dopt.do_nothing, 1},
{"rows-per-insert", required_argument, NULL, 10},
{NULL, 0, NULL, 0}
};
pg_logging_init(argv[0]);
pg_logging_set_level(PG_LOG_WARNING);
set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("pg_dump"));
/*
* Initialize what we need for parallel execution, especially for thread
* support on Windows.
*/
init_parallel_dump_utils();
strcpy(g_comment_start, "-- ");
g_comment_end[0] = '\0';
strcpy(g_opaque_type, "opaque");
progname = get_progname(argv[0]);
if (argc > 1)
{
if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0)
{
help(progname);
exit_nicely(0);
}
if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0)
{
puts("pg_dump (PostgreSQL) " PG_VERSION);
exit_nicely(0);
}
}
InitDumpOptions(&dopt);
while ((c = getopt_long(argc, argv, "abBcCd:E:f:F:h:j:n:N:Op:RsS:t:T:U:vwWxZ:",
long_options, &optindex)) != -1)
{
switch (c)
{
case 'a': /* Dump data only */
dopt.dataOnly = true;
break;
case 'b': /* Dump blobs */
dopt.outputBlobs = true;
break;
case 'B': /* Don't dump blobs */
dopt.dontOutputBlobs = true;
break;
case 'c': /* clean (i.e., drop) schema prior to create */
dopt.outputClean = 1;
break;
case 'C': /* Create DB */
dopt.outputCreateDB = 1;
break;
case 'd': /* database name */
dopt.dbname = pg_strdup(optarg);
break;
case 'E': /* Dump encoding */
dumpencoding = pg_strdup(optarg);
break;
case 'f':
filename = pg_strdup(optarg);
break;
case 'F':
format = pg_strdup(optarg);
break;
case 'h': /* server host */
dopt.pghost = pg_strdup(optarg);
break;
case 'j': /* number of dump jobs */
numWorkers = atoi(optarg);
break;
case 'n': /* include schema(s) */
simple_string_list_append(&schema_include_patterns, optarg);
dopt.include_everything = false;
break;
case 'N': /* exclude schema(s) */
simple_string_list_append(&schema_exclude_patterns, optarg);
break;
case 'O': /* Don't reconnect to match owner */
dopt.outputNoOwner = 1;
break;
case 'p': /* server port */
dopt.pgport = pg_strdup(optarg);
break;
case 'R':
/* no-op, still accepted for backwards compatibility */
break;
case 's': /* dump schema only */
dopt.schemaOnly = true;
break;
case 'S': /* Username for superuser in plain text output */
dopt.outputSuperuser = pg_strdup(optarg);
break;
case 't': /* include table(s) */
simple_string_list_append(&table_include_patterns, optarg);
dopt.include_everything = false;
break;
case 'T': /* exclude table(s) */
simple_string_list_append(&table_exclude_patterns, optarg);
break;
case 'U':
dopt.username = pg_strdup(optarg);
break;
case 'v': /* verbose */
g_verbose = true;
pg_logging_set_level(PG_LOG_INFO);
break;
case 'w':
prompt_password = TRI_NO;
break;
case 'W':
prompt_password = TRI_YES;
break;
case 'x': /* skip ACL dump */
dopt.aclsSkip = true;
break;
case 'Z': /* Compression Level */
compressLevel = atoi(optarg);
if (compressLevel < 0 || compressLevel > 9)
{
pg_log_error("compression level must be in range 0..9");
exit_nicely(1);
}
break;
case 0:
/* This covers the long options. */
break;
case 2: /* lock-wait-timeout */
dopt.lockWaitTimeout = pg_strdup(optarg);
break;
case 3: /* SET ROLE */
use_role = pg_strdup(optarg);
break;
case 4: /* exclude table(s) data */
simple_string_list_append(&tabledata_exclude_patterns, optarg);
break;
case 5: /* section */
set_dump_section(optarg, &dopt.dumpSections);
break;
case 6: /* snapshot */
dumpsnapshot = pg_strdup(optarg);
break;
case 7: /* no-sync */
dosync = false;
break;
case 8:
have_extra_float_digits = true;
extra_float_digits = atoi(optarg);
if (extra_float_digits < -15 || extra_float_digits > 3)
{
pg_log_error("extra_float_digits must be in range -15..3");
exit_nicely(1);
}
break;
case 9: /* inserts */
/*
* dump_inserts also stores --rows-per-insert, careful not to
* overwrite that.
*/
if (dopt.dump_inserts == 0)
dopt.dump_inserts = DUMP_DEFAULT_ROWS_PER_INSERT;
break;
case 10: /* rows per insert */
errno = 0;
rowsPerInsert = strtol(optarg, &endptr, 10);
if (endptr == optarg || *endptr != '\0' ||
rowsPerInsert <= 0 || rowsPerInsert > INT_MAX ||
errno == ERANGE)
{
pg_log_error("rows-per-insert must be in range %d..%d",
1, INT_MAX);
exit_nicely(1);
}
dopt.dump_inserts = (int) rowsPerInsert;
break;
default:
fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
exit_nicely(1);
}
}
/*
* Non-option argument specifies database name as long as it wasn't
* already specified with -d / --dbname
*/
if (optind < argc && dopt.dbname == NULL)
dopt.dbname = argv[optind++];
/* Complain if any arguments remain */
if (optind < argc)
{
pg_log_error("too many command-line arguments (first is \"%s\")",
argv[optind]);
fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
progname);
exit_nicely(1);
}
/* --column-inserts implies --inserts */
if (dopt.column_inserts && dopt.dump_inserts == 0)
dopt.dump_inserts = DUMP_DEFAULT_ROWS_PER_INSERT;
/*
* Binary upgrade mode implies dumping sequence data even in schema-only
* mode. This is not exposed as a separate option, but kept separate
* internally for clarity.
*/
if (dopt.binary_upgrade)
dopt.sequence_data = 1;
if (dopt.dataOnly && dopt.schemaOnly)
{
pg_log_error("options -s/--schema-only and -a/--data-only cannot be used together");
exit_nicely(1);
}
if (dopt.dataOnly && dopt.outputClean)
{
pg_log_error("options -c/--clean and -a/--data-only cannot be used together");
exit_nicely(1);
}
if (dopt.if_exists && !dopt.outputClean)
fatal("option --if-exists requires option -c/--clean");
/*
* --inserts are already implied above if --column-inserts or
* --rows-per-insert were specified.
*/
if (dopt.do_nothing && dopt.dump_inserts == 0)
fatal("option --on-conflict-do-nothing requires option --inserts, --rows-per-insert, or --column-inserts");
/* Identify archive format to emit */
archiveFormat = parseArchiveFormat(format, &archiveMode);
/* archiveFormat specific setup */
if (archiveFormat == archNull)
plainText = 1;
/* Custom and directory formats are compressed by default, others not */
if (compressLevel == -1)
{
#ifdef HAVE_LIBZ
if (archiveFormat == archCustom || archiveFormat == archDirectory)
compressLevel = Z_DEFAULT_COMPRESSION;
else
#endif
compressLevel = 0;
}
#ifndef HAVE_LIBZ
if (compressLevel != 0)
pg_log_warning("requested compression not available in this installation -- archive will be uncompressed");
compressLevel = 0;
#endif
/*
* If emitting an archive format, we always want to emit a DATABASE item,
* in case --create is specified at pg_restore time.
*/
if (!plainText)
dopt.outputCreateDB = 1;
/*
* On Windows we can only have at most MAXIMUM_WAIT_OBJECTS (= 64 usually)
* parallel jobs because that's the maximum limit for the
* WaitForMultipleObjects() call.
*/
if (numWorkers <= 0
#ifdef WIN32
|| numWorkers > MAXIMUM_WAIT_OBJECTS
#endif
)
fatal("invalid number of parallel jobs");
/* Parallel backup only in the directory archive format so far */
if (archiveFormat != archDirectory && numWorkers > 1)
fatal("parallel backup only supported by the directory format");
/* Open the output file */
fout = CreateArchive(filename, archiveFormat, compressLevel, dosync,
archiveMode, setupDumpWorker);
/* Make dump options accessible right away */
SetArchiveOptions(fout, &dopt, NULL);
/* Register the cleanup hook */
on_exit_close_archive(fout);
/* Let the archiver know how noisy to be */
fout->verbose = g_verbose;
/*
* We allow the server to be back to 8.0, and up to any minor release of
* our own major version. (See also version check in pg_dumpall.c.)
*/
fout->minRemoteVersion = 80000;
fout->maxRemoteVersion = (PG_VERSION_NUM / 100) * 100 + 99;
fout->numWorkers = numWorkers;
/*
* Open the database using the Archiver, so it knows about it. Errors mean
* death.
*/
ConnectDatabase(fout, dopt.dbname, dopt.pghost, dopt.pgport, dopt.username, prompt_password);
setup_connection(fout, dumpencoding, dumpsnapshot, use_role);
/*
* Disable security label support if server version < v9.1.x (prevents
* access to nonexistent pg_seclabel catalog)
*/
if (fout->remoteVersion < 90100)
dopt.no_security_labels = 1;
/*
* On hot standbys, never try to dump unlogged table data, since it will
* just throw an error.
*/
if (fout->isStandby)
dopt.no_unlogged_table_data = true;
/* Select the appropriate subquery to convert user IDs to names */
if (fout->remoteVersion >= 80100)
username_subquery = "SELECT rolname FROM pg_catalog.pg_roles WHERE oid =";
else
username_subquery = "SELECT usename FROM pg_catalog.pg_user WHERE usesysid =";
/* check the version for the synchronized snapshots feature */
if (numWorkers > 1 && fout->remoteVersion < 90200
&& !dopt.no_synchronized_snapshots)
fatal("Synchronized snapshots are not supported by this server version.\n"
"Run with --no-synchronized-snapshots instead if you do not need\n"
"synchronized snapshots.");
/* check the version when a snapshot is explicitly specified by user */
if (dumpsnapshot && fout->remoteVersion < 90200)
fatal("Exported snapshots are not supported by this server version.");
/*
* Find the last built-in OID, if needed (prior to 8.1)
*
* With 8.1 and above, we can just use FirstNormalObjectId - 1.
*/
if (fout->remoteVersion < 80100)
g_last_builtin_oid = findLastBuiltinOid_V71(fout);
else
g_last_builtin_oid = FirstNormalObjectId - 1;
pg_log_info("last built-in OID is %u", g_last_builtin_oid);
/* Expand schema selection patterns into OID lists */
if (schema_include_patterns.head != NULL)
{
expand_schema_name_patterns(fout, &schema_include_patterns,
&schema_include_oids,
strict_names);
if (schema_include_oids.head == NULL)
fatal("no matching schemas were found");
}
expand_schema_name_patterns(fout, &schema_exclude_patterns,
&schema_exclude_oids,
false);
/* non-matching exclusion patterns aren't an error */
/* Expand table selection patterns into OID lists */
if (table_include_patterns.head != NULL)
{
expand_table_name_patterns(fout, &table_include_patterns,
&table_include_oids,
strict_names);
if (table_include_oids.head == NULL)
fatal("no matching tables were found");
}
expand_table_name_patterns(fout, &table_exclude_patterns,
&table_exclude_oids,
false);
expand_table_name_patterns(fout, &tabledata_exclude_patterns,
&tabledata_exclude_oids,
false);
/* non-matching exclusion patterns aren't an error */
/*
* Dumping blobs is the default for dumps where an inclusion switch is not
* used (an "include everything" dump). -B can be used to exclude blobs
* from those dumps. -b can be used to include blobs even when an
* inclusion switch is used.
*
* -s means "schema only" and blobs are data, not schema, so we never
* include blobs when -s is used.
*/
if (dopt.include_everything && !dopt.schemaOnly && !dopt.dontOutputBlobs)
dopt.outputBlobs = true;
/*
* Now scan the database and create DumpableObject structs for all the
* objects we intend to dump.
*/
tblinfo = getSchemaData(fout, &numTables);
if (fout->remoteVersion < 80400)
guessConstraintInheritance(tblinfo, numTables);
if (!dopt.schemaOnly)
{
getTableData(&dopt, tblinfo, numTables, 0);
buildMatViewRefreshDependencies(fout);
if (dopt.dataOnly)
getTableDataFKConstraints();
}
if (dopt.schemaOnly && dopt.sequence_data)
getTableData(&dopt, tblinfo, numTables, RELKIND_SEQUENCE);
/*
* In binary-upgrade mode, we do not have to worry about the actual blob
* data or the associated metadata that resides in the pg_largeobject and
* pg_largeobject_metadata tables, respectively.
*
* However, we do need to collect blob information as there may be
* comments or other information on blobs that we do need to dump out.
*/
if (dopt.outputBlobs || dopt.binary_upgrade)
getBlobs(fout);
/*
* Collect dependency data to assist in ordering the objects.
*/
getDependencies(fout);
/* Lastly, create dummy objects to represent the section boundaries */
boundaryObjs = createBoundaryObjects();
/* Get pointers to all the known DumpableObjects */
getDumpableObjects(&dobjs, &numObjs);
/*
* Add dummy dependencies to enforce the dump section ordering.
*/
addBoundaryDependencies(dobjs, numObjs, boundaryObjs);
/*
* Sort the objects into a safe dump order (no forward references).
*
* We rely on dependency information to help us determine a safe order, so
* the initial sort is mostly for cosmetic purposes: we sort by name to
* ensure that logically identical schemas will dump identically.
*/
sortDumpableObjectsByTypeName(dobjs, numObjs);
sortDumpableObjects(dobjs, numObjs,
boundaryObjs[0].dumpId, boundaryObjs[1].dumpId);
/*
* Create archive TOC entries for all the objects to be dumped, in a safe
* order.
*/
/* First the special ENCODING, STDSTRINGS, and SEARCHPATH entries. */
dumpEncoding(fout);
dumpStdStrings(fout);
dumpSearchPath(fout);
/* The database items are always next, unless we don't want them at all */
if (dopt.outputCreateDB)
dumpDatabase(fout);
/* Now the rearrangeable objects. */
for (i = 0; i < numObjs; i++)
dumpDumpableObject(fout, dobjs[i]);
/*
* Set up options info to ensure we dump what we want.
*/
ropt = NewRestoreOptions();
ropt->filename = filename;
/* if you change this list, see dumpOptionsFromRestoreOptions */
ropt->dropSchema = dopt.outputClean;
ropt->dataOnly = dopt.dataOnly;
ropt->schemaOnly = dopt.schemaOnly;
ropt->if_exists = dopt.if_exists;
ropt->column_inserts = dopt.column_inserts;
ropt->dumpSections = dopt.dumpSections;
ropt->aclsSkip = dopt.aclsSkip;
ropt->superuser = dopt.outputSuperuser;
ropt->createDB = dopt.outputCreateDB;
ropt->noOwner = dopt.outputNoOwner;
ropt->noTablespace = dopt.outputNoTablespaces;
ropt->disable_triggers = dopt.disable_triggers;
ropt->use_setsessauth = dopt.use_setsessauth;
ropt->disable_dollar_quoting = dopt.disable_dollar_quoting;
ropt->dump_inserts = dopt.dump_inserts;
ropt->no_comments = dopt.no_comments;
ropt->no_publications = dopt.no_publications;
ropt->no_security_labels = dopt.no_security_labels;
ropt->no_subscriptions = dopt.no_subscriptions;
ropt->lockWaitTimeout = dopt.lockWaitTimeout;
ropt->include_everything = dopt.include_everything;
ropt->enable_row_security = dopt.enable_row_security;
ropt->sequence_data = dopt.sequence_data;
ropt->binary_upgrade = dopt.binary_upgrade;
if (compressLevel == -1)
ropt->compression = 0;
else
ropt->compression = compressLevel;
ropt->suppressDumpWarnings = true; /* We've already shown them */
SetArchiveOptions(fout, &dopt, ropt);
/* Mark which entries should be output */
ProcessArchiveRestoreOptions(fout);
/*
* The archive's TOC entries are now marked as to which ones will actually
* be output, so we can set up their dependency lists properly. This isn't
* necessary for plain-text output, though.
*/
if (!plainText)
BuildArchiveDependencies(fout);
/*
* And finally we can do the actual output.
*
* Note: for non-plain-text output formats, the output file is written
* inside CloseArchive(). This is, um, bizarre; but not worth changing
* right now.
*/
if (plainText)
RestoreArchive(fout);
CloseArchive(fout);
exit_nicely(0);
}
static void
help(const char *progname)
{
printf(_("%s dumps a database as a text file or to other formats.\n\n"), progname);
printf(_("Usage:\n"));
printf(_(" %s [OPTION]... [DBNAME]\n"), progname);
printf(_("\nGeneral options:\n"));
printf(_(" -f, --file=FILENAME output file or directory name\n"));
printf(_(" -F, --format=c|d|t|p output file format (custom, directory, tar,\n"
" plain text (default))\n"));
printf(_(" -j, --jobs=NUM use this many parallel jobs to dump\n"));
printf(_(" -v, --verbose verbose mode\n"));
printf(_(" -V, --version output version information, then exit\n"));
printf(_(" -Z, --compress=0-9 compression level for compressed formats\n"));
printf(_(" --lock-wait-timeout=TIMEOUT fail after waiting TIMEOUT for a table lock\n"));
printf(_(" --no-sync do not wait for changes to be written safely to disk\n"));
printf(_(" -?, --help show this help, then exit\n"));
printf(_("\nOptions controlling the output content:\n"));
printf(_(" -a, --data-only dump only the data, not the schema\n"));
printf(_(" -b, --blobs include large objects in dump\n"));
printf(_(" -B, --no-blobs exclude large objects in dump\n"));
printf(_(" -c, --clean clean (drop) database objects before recreating\n"));
printf(_(" -C, --create include commands to create database in dump\n"));
printf(_(" -E, --encoding=ENCODING dump the data in encoding ENCODING\n"));
printf(_(" -n, --schema=PATTERN dump the specified schema(s) only\n"));
printf(_(" -N, --exclude-schema=PATTERN do NOT dump the specified schema(s)\n"));
printf(_(" -O, --no-owner skip restoration of object ownership in\n"
" plain-text format\n"));
printf(_(" -s, --schema-only dump only the schema, no data\n"));
printf(_(" -S, --superuser=NAME superuser user name to use in plain-text format\n"));
printf(_(" -t, --table=PATTERN dump the specified table(s) only\n"));
printf(_(" -T, --exclude-table=PATTERN do NOT dump the specified table(s)\n"));
printf(_(" -x, --no-privileges do not dump privileges (grant/revoke)\n"));
printf(_(" --binary-upgrade for use by upgrade utilities only\n"));
printf(_(" --column-inserts dump data as INSERT commands with column names\n"));
printf(_(" --disable-dollar-quoting disable dollar quoting, use SQL standard quoting\n"));
printf(_(" --disable-triggers disable triggers during data-only restore\n"));
printf(_(" --enable-row-security enable row security (dump only content user has\n"
" access to)\n"));
printf(_(" --exclude-table-data=PATTERN do NOT dump data for the specified table(s)\n"));
printf(_(" --extra-float-digits=NUM override default setting for extra_float_digits\n"));
printf(_(" --if-exists use IF EXISTS when dropping objects\n"));
printf(_(" --inserts dump data as INSERT commands, rather than COPY\n"));
printf(_(" --load-via-partition-root load partitions via the root table\n"));
printf(_(" --no-comments do not dump comments\n"));
printf(_(" --no-publications do not dump publications\n"));
printf(_(" --no-security-labels do not dump security label assignments\n"));
printf(_(" --no-subscriptions do not dump subscriptions\n"));
printf(_(" --no-synchronized-snapshots do not use synchronized snapshots in parallel jobs\n"));
printf(_(" --no-tablespaces do not dump tablespace assignments\n"));
printf(_(" --no-unlogged-table-data do not dump unlogged table data\n"));
printf(_(" --on-conflict-do-nothing add ON CONFLICT DO NOTHING to INSERT commands\n"));
printf(_(" --quote-all-identifiers quote all identifiers, even if not key words\n"));
printf(_(" --rows-per-insert=NROWS number of rows per INSERT; implies --inserts\n"));
printf(_(" --section=SECTION dump named section (pre-data, data, or post-data)\n"));
printf(_(" --serializable-deferrable wait until the dump can run without anomalies\n"));
printf(_(" --snapshot=SNAPSHOT use given snapshot for the dump\n"));
printf(_(" --strict-names require table and/or schema include patterns to\n"
" match at least one entity each\n"));
printf(_(" --use-set-session-authorization\n"
" use SET SESSION AUTHORIZATION commands instead of\n"
" ALTER OWNER commands to set ownership\n"));
printf(_("\nConnection options:\n"));
printf(_(" -d, --dbname=DBNAME database to dump\n"));
printf(_(" -h, --host=HOSTNAME database server host or socket directory\n"));
printf(_(" -p, --port=PORT database server port number\n"));
printf(_(" -U, --username=NAME connect as specified database user\n"));
printf(_(" -w, --no-password never prompt for password\n"));
printf(_(" -W, --password force password prompt (should happen automatically)\n"));
printf(_(" --role=ROLENAME do SET ROLE before dump\n"));
printf(_("\nIf no database name is supplied, then the PGDATABASE environment\n"
"variable value is used.\n\n"));
printf(_("Report bugs to <%s>.\n"), PACKAGE_BUGREPORT);
printf(_("%s home page: <%s>\n"), PACKAGE_NAME, PACKAGE_URL);
}
static void
setup_connection(Archive *AH, const char *dumpencoding,
const char *dumpsnapshot, char *use_role)
{
DumpOptions *dopt = AH->dopt;
PGconn *conn = GetConnection(AH);
const char *std_strings;
PQclear(ExecuteSqlQueryForSingleRow(AH, ALWAYS_SECURE_SEARCH_PATH_SQL));
/*
* Set the client encoding if requested.
*/
if (dumpencoding)
{
if (PQsetClientEncoding(conn, dumpencoding) < 0)
fatal("invalid client encoding \"%s\" specified",
dumpencoding);
}
/*
* Get the active encoding and the standard_conforming_strings setting, so
* we know how to escape strings.
*/
AH->encoding = PQclientEncoding(conn);
std_strings = PQparameterStatus(conn, "standard_conforming_strings");
AH->std_strings = (std_strings && strcmp(std_strings, "on") == 0);
/*
* Set the role if requested. In a parallel dump worker, we'll be passed
* use_role == NULL, but AH->use_role is already set (if user specified it
* originally) and we should use that.
*/
if (!use_role && AH->use_role)
use_role = AH->use_role;
/* Set the role if requested */
if (use_role && AH->remoteVersion >= 80100)
{
PQExpBuffer query = createPQExpBuffer();
appendPQExpBuffer(query, "SET ROLE %s", fmtId(use_role));
ExecuteSqlStatement(AH, query->data);
destroyPQExpBuffer(query);
/* save it for possible later use by parallel workers */
if (!AH->use_role)
AH->use_role = pg_strdup(use_role);
}
/* Set the datestyle to ISO to ensure the dump's portability */
ExecuteSqlStatement(AH, "SET DATESTYLE = ISO");
/* Likewise, avoid using sql_standard intervalstyle */
if (AH->remoteVersion >= 80400)
ExecuteSqlStatement(AH, "SET INTERVALSTYLE = POSTGRES");
/*
* Use an explicitly specified extra_float_digits if it has been provided.
* Otherwise, set extra_float_digits so that we can dump float data
* exactly (given correctly implemented float I/O code, anyway).
*/
if (have_extra_float_digits)
{
PQExpBuffer q = createPQExpBuffer();
appendPQExpBuffer(q, "SET extra_float_digits TO %d",
extra_float_digits);
ExecuteSqlStatement(AH, q->data);
destroyPQExpBuffer(q);
}
else if (AH->remoteVersion >= 90000)
ExecuteSqlStatement(AH, "SET extra_float_digits TO 3");
else
ExecuteSqlStatement(AH, "SET extra_float_digits TO 2");
/*
* If synchronized scanning is supported, disable it, to prevent
* unpredictable changes in row ordering across a dump and reload.
*/
if (AH->remoteVersion >= 80300)
ExecuteSqlStatement(AH, "SET synchronize_seqscans TO off");
/*
* Disable timeouts if supported.
*/
ExecuteSqlStatement(AH, "SET statement_timeout = 0");
if (AH->remoteVersion >= 90300)
ExecuteSqlStatement(AH, "SET lock_timeout = 0");
if (AH->remoteVersion >= 90600)
ExecuteSqlStatement(AH, "SET idle_in_transaction_session_timeout = 0");
/*
* Quote all identifiers, if requested.
*/
if (quote_all_identifiers && AH->remoteVersion >= 90100)
ExecuteSqlStatement(AH, "SET quote_all_identifiers = true");
/*
* Adjust row-security mode, if supported.
*/
if (AH->remoteVersion >= 90500)
{
if (dopt->enable_row_security)
ExecuteSqlStatement(AH, "SET row_security = on");
else
ExecuteSqlStatement(AH, "SET row_security = off");
}
/*
* Start transaction-snapshot mode transaction to dump consistent data.
*/
ExecuteSqlStatement(AH, "BEGIN");
if (AH->remoteVersion >= 90100)
{
/*
* To support the combination of serializable_deferrable with the jobs
* option we use REPEATABLE READ for the worker connections that are
* passed a snapshot. As long as the snapshot is acquired in a
* SERIALIZABLE, READ ONLY, DEFERRABLE transaction, its use within a
* REPEATABLE READ transaction provides the appropriate integrity
* guarantees. This is a kluge, but safe for back-patching.
*/
if (dopt->serializable_deferrable && AH->sync_snapshot_id == NULL)
ExecuteSqlStatement(AH,
"SET TRANSACTION ISOLATION LEVEL "
"SERIALIZABLE, READ ONLY, DEFERRABLE");
else
ExecuteSqlStatement(AH,
"SET TRANSACTION ISOLATION LEVEL "
"REPEATABLE READ, READ ONLY");
}
else
{
ExecuteSqlStatement(AH,
"SET TRANSACTION ISOLATION LEVEL "
"SERIALIZABLE, READ ONLY");
}
/*
* If user specified a snapshot to use, select that. In a parallel dump
* worker, we'll be passed dumpsnapshot == NULL, but AH->sync_snapshot_id
* is already set (if the server can handle it) and we should use that.
*/
if (dumpsnapshot)
AH->sync_snapshot_id = pg_strdup(dumpsnapshot);
if (AH->sync_snapshot_id)
{
PQExpBuffer query = createPQExpBuffer();
appendPQExpBufferStr(query, "SET TRANSACTION SNAPSHOT ");
appendStringLiteralConn(query, AH->sync_snapshot_id, conn);
ExecuteSqlStatement(AH, query->data);
destroyPQExpBuffer(query);
}
else if (AH->numWorkers > 1 &&
AH->remoteVersion >= 90200 &&
!dopt->no_synchronized_snapshots)
{
if (AH->isStandby && AH->remoteVersion < 100000)
fatal("Synchronized snapshots on standby servers are not supported by this server version.\n"
"Run with --no-synchronized-snapshots instead if you do not need\n"
"synchronized snapshots.");
AH->sync_snapshot_id = get_synchronized_snapshot(AH);
}
}
/* Set up connection for a parallel worker process */
static void
setupDumpWorker(Archive *AH)
{
/*
* We want to re-select all the same values the master connection is
* using. We'll have inherited directly-usable values in
* AH->sync_snapshot_id and AH->use_role, but we need to translate the
* inherited encoding value back to a string to pass to setup_connection.
*/
setup_connection(AH,
pg_encoding_to_char(AH->encoding),
NULL,
NULL);
}
static char *
get_synchronized_snapshot(Archive *fout)
{
char *query = "SELECT pg_catalog.pg_export_snapshot()";
char *result;
PGresult *res;
res = ExecuteSqlQueryForSingleRow(fout, query);
result = pg_strdup(PQgetvalue(res, 0, 0));
PQclear(res);
return result;
}
static ArchiveFormat
parseArchiveFormat(const char *format, ArchiveMode *mode)
{
ArchiveFormat archiveFormat;
*mode = archModeWrite;
if (pg_strcasecmp(format, "a") == 0 || pg_strcasecmp(format, "append") == 0)
{
/* This is used by pg_dumpall, and is not documented */
archiveFormat = archNull;
*mode = archModeAppend;
}
else if (pg_strcasecmp(format, "c") == 0)
archiveFormat = archCustom;
else if (pg_strcasecmp(format, "custom") == 0)
archiveFormat = archCustom;
else if (pg_strcasecmp(format, "d") == 0)
archiveFormat = archDirectory;
else if (pg_strcasecmp(format, "directory") == 0)
archiveFormat = archDirectory;
else if (pg_strcasecmp(format, "p") == 0)
archiveFormat = archNull;
else if (pg_strcasecmp(format, "plain") == 0)
archiveFormat = archNull;
else if (pg_strcasecmp(format, "t") == 0)
archiveFormat = archTar;
else if (pg_strcasecmp(format, "tar") == 0)
archiveFormat = archTar;
else
fatal("invalid output format \"%s\" specified", format);
return archiveFormat;
}
/*
* Find the OIDs of all schemas matching the given list of patterns,
* and append them to the given OID list.
*/
static void
expand_schema_name_patterns(Archive *fout,
SimpleStringList *patterns,
SimpleOidList *oids,
bool strict_names)
{
PQExpBuffer query;
PGresult *res;
SimpleStringListCell *cell;
int i;
if (patterns->head == NULL)
return; /* nothing to do */
query = createPQExpBuffer();
/*
* The loop below runs multiple SELECTs might sometimes result in
* duplicate entries in the OID list, but we don't care.
*/
for (cell = patterns->head; cell; cell = cell->next)
{
appendPQExpBufferStr(query,
"SELECT oid FROM pg_catalog.pg_namespace n\n");
processSQLNamePattern(GetConnection(fout), query, cell->val, false,
false, NULL, "n.nspname", NULL, NULL);
res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
if (strict_names && PQntuples(res) == 0)
fatal("no matching schemas were found for pattern \"%s\"", cell->val);
for (i = 0; i < PQntuples(res); i++)
{
simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
}
PQclear(res);
resetPQExpBuffer(query);
}
destroyPQExpBuffer(query);
}
/*
* Find the OIDs of all tables matching the given list of patterns,
* and append them to the given OID list. See also expand_dbname_patterns()
* in pg_dumpall.c
*/
static void
expand_table_name_patterns(Archive *fout,
SimpleStringList *patterns, SimpleOidList *oids,
bool strict_names)
{
PQExpBuffer query;
PGresult *res;
SimpleStringListCell *cell;
int i;
if (patterns->head == NULL)
return; /* nothing to do */
query = createPQExpBuffer();
/*
* this might sometimes result in duplicate entries in the OID list, but
* we don't care.
*/
for (cell = patterns->head; cell; cell = cell->next)
{
/*
* Query must remain ABSOLUTELY devoid of unqualified names. This
* would be unnecessary given a pg_table_is_visible() variant taking a
* search_path argument.
*/
appendPQExpBuffer(query,
"SELECT c.oid"
"\nFROM pg_catalog.pg_class c"
"\n LEFT JOIN pg_catalog.pg_namespace n"
"\n ON n.oid OPERATOR(pg_catalog.=) c.relnamespace"
"\nWHERE c.relkind OPERATOR(pg_catalog.=) ANY"
"\n (array['%c', '%c', '%c', '%c', '%c', '%c'])\n",
RELKIND_RELATION, RELKIND_SEQUENCE, RELKIND_VIEW,
RELKIND_MATVIEW, RELKIND_FOREIGN_TABLE,
RELKIND_PARTITIONED_TABLE);
processSQLNamePattern(GetConnection(fout), query, cell->val, true,
false, "n.nspname", "c.relname", NULL,
"pg_catalog.pg_table_is_visible(c.oid)");
ExecuteSqlStatement(fout, "RESET search_path");
res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
PQclear(ExecuteSqlQueryForSingleRow(fout,
ALWAYS_SECURE_SEARCH_PATH_SQL));
if (strict_names && PQntuples(res) == 0)
fatal("no matching tables were found for pattern \"%s\"", cell->val);
for (i = 0; i < PQntuples(res); i++)
{
simple_oid_list_append(oids, atooid(PQgetvalue(res, i, 0)));
}
PQclear(res);
resetPQExpBuffer(query);
}
destroyPQExpBuffer(query);
}
/*
* checkExtensionMembership
* Determine whether object is an extension member, and if so,
* record an appropriate dependency and set the object's dump flag.
*
* It's important to call this for each object that could be an extension
* member. Generally, we integrate this with determining the object's
* to-be-dumped-ness, since extension membership overrides other rules for that.
*
* Returns true if object is an extension member, else false.
*/
static bool
checkExtensionMembership(DumpableObject *dobj, Archive *fout)
{
ExtensionInfo *ext = findOwningExtension(dobj->catId);
if (ext == NULL)
return false;
dobj->ext_member = true;
/* Record dependency so that getDependencies needn't deal with that */
addObjectDependency(dobj, ext->dobj.dumpId);
/*
* In 9.6 and above, mark the member object to have any non-initial ACL,
* policies, and security labels dumped.
*
* Note that any initial ACLs (see pg_init_privs) will be removed when we
* extract the information about the object. We don't provide support for
* initial policies and security labels and it seems unlikely for those to
* ever exist, but we may have to revisit this later.
*
* Prior to 9.6, we do not include any extension member components.
*
* In binary upgrades, we still dump all components of the members
* individually, since the idea is to exactly reproduce the database
* contents rather than replace the extension contents with something
* different.
*/
if (fout->dopt->binary_upgrade)
dobj->dump = ext->dobj.dump;
else
{
if (fout->remoteVersion < 90600)
dobj->dump = DUMP_COMPONENT_NONE;
else
dobj->dump = ext->dobj.dump_contains & (DUMP_COMPONENT_ACL |
DUMP_COMPONENT_SECLABEL |
DUMP_COMPONENT_POLICY);
}
return true;
}
/*
* selectDumpableNamespace: policy-setting subroutine
* Mark a namespace as to be dumped or not
*/
static void
selectDumpableNamespace(NamespaceInfo *nsinfo, Archive *fout)
{
/*
* If specific tables are being dumped, do not dump any complete
* namespaces. If specific namespaces are being dumped, dump just those
* namespaces. Otherwise, dump all non-system namespaces.
*/
if (table_include_oids.head != NULL)
nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
else if (schema_include_oids.head != NULL)
nsinfo->dobj.dump_contains = nsinfo->dobj.dump =
simple_oid_list_member(&schema_include_oids,
nsinfo->dobj.catId.oid) ?
DUMP_COMPONENT_ALL : DUMP_COMPONENT_NONE;
else if (fout->remoteVersion >= 90600 &&
strcmp(nsinfo->dobj.name, "pg_catalog") == 0)
{
/*
* In 9.6 and above, we dump out any ACLs defined in pg_catalog, if
* they are interesting (and not the original ACLs which were set at
* initdb time, see pg_init_privs).
*/
nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_ACL;
}
else if (strncmp(nsinfo->dobj.name, "pg_", 3) == 0 ||
strcmp(nsinfo->dobj.name, "information_schema") == 0)
{
/* Other system schemas don't get dumped */
nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
}
else if (strcmp(nsinfo->dobj.name, "public") == 0)
{
/*
* The public schema is a strange beast that sits in a sort of
* no-mans-land between being a system object and a user object. We
* don't want to dump creation or comment commands for it, because
* that complicates matters for non-superuser use of pg_dump. But we
* should dump any ACL changes that have occurred for it, and of
* course we should dump contained objects.
*/
nsinfo->dobj.dump = DUMP_COMPONENT_ACL;
nsinfo->dobj.dump_contains = DUMP_COMPONENT_ALL;
}
else
nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_ALL;
/*
* In any case, a namespace can be excluded by an exclusion switch
*/
if (nsinfo->dobj.dump_contains &&
simple_oid_list_member(&schema_exclude_oids,
nsinfo->dobj.catId.oid))
nsinfo->dobj.dump_contains = nsinfo->dobj.dump = DUMP_COMPONENT_NONE;
/*
* If the schema belongs to an extension, allow extension membership to
* override the dump decision for the schema itself. However, this does
* not change dump_contains, so this won't change what we do with objects
* within the schema. (If they belong to the extension, they'll get
* suppressed by it, otherwise not.)
*/
(void) checkExtensionMembership(&nsinfo->dobj, fout);
}
/*
* selectDumpableTable: policy-setting subroutine
* Mark a table as to be dumped or not
*/
static void
selectDumpableTable(TableInfo *tbinfo, Archive *fout)
{
if (checkExtensionMembership(&tbinfo->dobj, fout))
return; /* extension membership overrides all else */
/*
* If specific tables are being dumped, dump just those tables; else, dump
* according to the parent namespace's dump flag.
*/
if (table_include_oids.head != NULL)
tbinfo->dobj.dump = simple_oid_list_member(&table_include_oids,
tbinfo->dobj.catId.oid) ?
DUMP_COMPONENT_ALL : DUMP_COMPONENT_NONE;
else
tbinfo->dobj.dump = tbinfo->dobj.namespace->dobj.dump_contains;
/*
* In any case, a table can be excluded by an exclusion switch
*/
if (tbinfo->dobj.dump &&
simple_oid_list_member(&table_exclude_oids,
tbinfo->dobj.catId.oid))
tbinfo->dobj.dump = DUMP_COMPONENT_NONE;
}
/*
* selectDumpableType: policy-setting subroutine
* Mark a type as to be dumped or not
*
* If it's a table's rowtype or an autogenerated array type, we also apply a
* special type code to facilitate sorting into the desired order. (We don't
* want to consider those to be ordinary types because that would bring tables
* up into the datatype part of the dump order.) We still set the object's
* dump flag; that's not going to cause the dummy type to be dumped, but we
* need it so that casts involving such types will be dumped correctly -- see
* dumpCast. This means the flag should be set the same as for the underlying
* object (the table or base type).
*/
static void
selectDumpableType(TypeInfo *tyinfo, Archive *fout)
{
/* skip complex types, except for standalone composite types */
if (OidIsValid(tyinfo->typrelid) &&
tyinfo->typrelkind != RELKIND_COMPOSITE_TYPE)
{
TableInfo *tytable = findTableByOid(tyinfo->typrelid);
tyinfo->dobj.objType = DO_DUMMY_TYPE;
if (tytable != NULL)
tyinfo->dobj.dump = tytable->dobj.dump;
else
tyinfo->dobj.dump = DUMP_COMPONENT_NONE;
return;
}
/* skip auto-generated array types */
if (tyinfo->isArray)
{
tyinfo->dobj.objType = DO_DUMMY_TYPE;
/*
* Fall through to set the dump flag; we assume that the subsequent
* rules will do the same thing as they would for the array's base
* type. (We cannot reliably look up the base type here, since
* getTypes may not have processed it yet.)
*/
}
if (checkExtensionMembership(&tyinfo->dobj, fout))
return; /* extension membership overrides all else */
/* Dump based on if the contents of the namespace are being dumped */
tyinfo->dobj.dump = tyinfo->dobj.namespace->dobj.dump_contains;
}
/*
* selectDumpableDefaultACL: policy-setting subroutine
* Mark a default ACL as to be dumped or not
*
* For per-schema default ACLs, dump if the schema is to be dumped.
* Otherwise dump if we are dumping "everything". Note that dataOnly
* and aclsSkip are checked separately.
*/
static void
selectDumpableDefaultACL(DefaultACLInfo *dinfo, DumpOptions *dopt)
{
/* Default ACLs can't be extension members */
if (dinfo->dobj.namespace)
/* default ACLs are considered part of the namespace */
dinfo->dobj.dump = dinfo->dobj.namespace->dobj.dump_contains;
else
dinfo->dobj.dump = dopt->include_everything ?
DUMP_COMPONENT_ALL : DUMP_COMPONENT_NONE;
}
/*
* selectDumpableCast: policy-setting subroutine
* Mark a cast as to be dumped or not
*
* Casts do not belong to any particular namespace (since they haven't got
* names), nor do they have identifiable owners. To distinguish user-defined
* casts from built-in ones, we must resort to checking whether the cast's
* OID is in the range reserved for initdb.
*/
static void
selectDumpableCast(CastInfo *cast, Archive *fout)
{
if (checkExtensionMembership(&cast->dobj, fout))
return; /* extension membership overrides all else */
/*
* This would be DUMP_COMPONENT_ACL for from-initdb casts, but they do not
* support ACLs currently.
*/
if (cast->dobj.catId.oid <= (Oid) g_last_builtin_oid)
cast->dobj.dump = DUMP_COMPONENT_NONE;
else
cast->dobj.dump = fout->dopt->include_everything ?
DUMP_COMPONENT_ALL : DUMP_COMPONENT_NONE;
}
/*
* selectDumpableProcLang: policy-setting subroutine
* Mark a procedural language as to be dumped or not
*
* Procedural languages do not belong to any particular namespace. To
* identify built-in languages, we must resort to checking whether the
* language's OID is in the range reserved for initdb.
*/
static void
selectDumpableProcLang(ProcLangInfo *plang, Archive *fout)
{
if (checkExtensionMembership(&plang->dobj, fout))
return; /* extension membership overrides all else */
/*
* Only include procedural languages when we are dumping everything.
*
* For from-initdb procedural languages, only include ACLs, as we do for
* the pg_catalog namespace. We need this because procedural languages do
* not live in any namespace.
*/
if (!fout->dopt->include_everything)
plang->dobj.dump = DUMP_COMPONENT_NONE;
else
{
if (plang->dobj.catId.oid <= (Oid) g_last_builtin_oid)
plang->dobj.dump = fout->remoteVersion < 90600 ?
DUMP_COMPONENT_NONE : DUMP_COMPONENT_ACL;
else
plang->dobj.dump = DUMP_COMPONENT_ALL;
}
}
/*
* selectDumpableAccessMethod: policy-setting subroutine
* Mark an access method as to be dumped or not
*
* Access methods do not belong to any particular namespace. To identify
* built-in access methods, we must resort to checking whether the
* method's OID is in the range reserved for initdb.
*/
static void
selectDumpableAccessMethod(AccessMethodInfo *method, Archive *fout)
{
if (checkExtensionMembership(&method->dobj, fout))
return; /* extension membership overrides all else */
/*
* This would be DUMP_COMPONENT_ACL for from-initdb access methods, but
* they do not support ACLs currently.
*/
if (method->dobj.catId.oid <= (Oid) g_last_builtin_oid)
method->dobj.dump = DUMP_COMPONENT_NONE;
else
method->dobj.dump = fout->dopt->include_everything ?
DUMP_COMPONENT_ALL : DUMP_COMPONENT_NONE;
}
/*
* selectDumpableExtension: policy-setting subroutine
* Mark an extension as to be dumped or not
*
* Built-in extensions should be skipped except for checking ACLs, since we
* assume those will already be installed in the target database. We identify
* such extensions by their having OIDs in the range reserved for initdb.
* We dump all user-added extensions by default, or none of them if
* include_everything is false (i.e., a --schema or --table switch was given).
*/
static void
selectDumpableExtension(ExtensionInfo *extinfo, DumpOptions *dopt)
{
/*
* Use DUMP_COMPONENT_ACL for built-in extensions, to allow users to
* change permissions on their member objects, if they wish to, and have
* those changes preserved.
*/
if (extinfo->dobj.catId.oid <= (Oid) g_last_builtin_oid)
extinfo->dobj.dump = extinfo->dobj.dump_contains = DUMP_COMPONENT_ACL;
else
extinfo->dobj.dump = extinfo->dobj.dump_contains =
dopt->include_everything ? DUMP_COMPONENT_ALL :
DUMP_COMPONENT_NONE;
}
/*
* selectDumpablePublicationTable: policy-setting subroutine
* Mark a publication table as to be dumped or not
*
* Publication tables have schemas, but those are ignored in decision making,
* because publications are only dumped when we are dumping everything.
*/
static void
selectDumpablePublicationTable(DumpableObject *dobj, Archive *fout)
{
if (checkExtensionMembership(dobj, fout))
return; /* extension membership overrides all else */
dobj->dump = fout->dopt->include_everything ?
DUMP_COMPONENT_ALL : DUMP_COMPONENT_NONE;
}
/*
* selectDumpableObject: policy-setting subroutine
* Mark a generic dumpable object as to be dumped or not
*
* Use this only for object types without a special-case routine above.
*/
static void
selectDumpableObject(DumpableObject *dobj, Archive *fout)
{
if (checkExtensionMembership(dobj, fout))
return; /* extension membership overrides all else */
/*
* Default policy is to dump if parent namespace is dumpable, or for
* non-namespace-associated items, dump if we're dumping "everything".
*/
if (dobj->namespace)
dobj->dump = dobj->namespace->dobj.dump_contains;
else
dobj->dump = fout->dopt->include_everything ?
DUMP_COMPONENT_ALL : DUMP_COMPONENT_NONE;
}
/*
* Dump a table's contents for loading using the COPY command
* - this routine is called by the Archiver when it wants the table
* to be dumped.
*/
static int
dumpTableData_copy(Archive *fout, void *dcontext)
{
TableDataInfo *tdinfo = (TableDataInfo *) dcontext;
TableInfo *tbinfo = tdinfo->tdtable;
const char *classname = tbinfo->dobj.name;
PQExpBuffer q = createPQExpBuffer();
/*
* Note: can't use getThreadLocalPQExpBuffer() here, we're calling fmtId
* which uses it already.
*/
PQExpBuffer clistBuf = createPQExpBuffer();
PGconn *conn = GetConnection(fout);
PGresult *res;
int ret;
char *copybuf;
const char *column_list;
pg_log_info("dumping contents of table \"%s.%s\"",
tbinfo->dobj.namespace->dobj.name, classname);
/*
* Specify the column list explicitly so that we have no possibility of
* retrieving data in the wrong column order. (The default column
* ordering of COPY will not be what we want in certain corner cases
* involving ADD COLUMN and inheritance.)
*/
column_list = fmtCopyColumnList(tbinfo, clistBuf);
if (tdinfo->filtercond)
{
/* Note: this syntax is only supported in 8.2 and up */
appendPQExpBufferStr(q, "COPY (SELECT ");
/* klugery to get rid of parens in column list */
if (strlen(column_list) > 2)
{
appendPQExpBufferStr(q, column_list + 1);
q->data[q->len - 1] = ' ';
}
else
appendPQExpBufferStr(q, "* ");
appendPQExpBuffer(q, "FROM %s %s) TO stdout;",
fmtQualifiedDumpable(tbinfo),
tdinfo->filtercond);
}
else
{
appendPQExpBuffer(q, "COPY %s %s TO stdout;",
fmtQualifiedDumpable(tbinfo),
column_list);
}
res = ExecuteSqlQuery(fout, q->data, PGRES_COPY_OUT);
PQclear(res);
destroyPQExpBuffer(clistBuf);
for (;;)
{
ret = PQgetCopyData(conn, &copybuf, 0);
if (ret < 0)
break; /* done or error */
if (copybuf)
{
WriteData(fout, copybuf, ret);
PQfreemem(copybuf);
}
/* ----------
* THROTTLE:
*
* There was considerable discussion in late July, 2000 regarding
* slowing down pg_dump when backing up large tables. Users with both
* slow & fast (multi-processor) machines experienced performance
* degradation when doing a backup.
*
* Initial attempts based on sleeping for a number of ms for each ms
* of work were deemed too complex, then a simple 'sleep in each loop'
* implementation was suggested. The latter failed because the loop
* was too tight. Finally, the following was implemented:
*
* If throttle is non-zero, then
* See how long since the last sleep.
* Work out how long to sleep (based on ratio).
* If sleep is more than 100ms, then
* sleep
* reset timer
* EndIf
* EndIf
*
* where the throttle value was the number of ms to sleep per ms of
* work. The calculation was done in each loop.
*
* Most of the hard work is done in the backend, and this solution
* still did not work particularly well: on slow machines, the ratio
* was 50:1, and on medium paced machines, 1:1, and on fast
* multi-processor machines, it had little or no effect, for reasons
* that were unclear.
*
* Further discussion ensued, and the proposal was dropped.
*
* For those people who want this feature, it can be implemented using
* gettimeofday in each loop, calculating the time since last sleep,
* multiplying that by the sleep ratio, then if the result is more
* than a preset 'minimum sleep time' (say 100ms), call the 'select'
* function to sleep for a subsecond period ie.
*
* select(0, NULL, NULL, NULL, &tvi);
*
* This will return after the interval specified in the structure tvi.
* Finally, call gettimeofday again to save the 'last sleep time'.
* ----------
*/
}
archprintf(fout, "\\.\n\n\n");
if (ret == -2)
{
/* copy data transfer failed */
pg_log_error("Dumping the contents of table \"%s\" failed: PQgetCopyData() failed.", classname);
pg_log_error("Error message from server: %s", PQerrorMessage(conn));
pg_log_error("The command was: %s", q->data);
exit_nicely(1);
}
/* Check command status and return to normal libpq state */
res = PQgetResult(conn);
if (PQresultStatus(res) != PGRES_COMMAND_OK)
{
pg_log_error("Dumping the contents of table \"%s\" failed: PQgetResult() failed.", classname);
pg_log_error("Error message from server: %s", PQerrorMessage(conn));
pg_log_error("The command was: %s", q->data);
exit_nicely(1);
}
PQclear(res);
/* Do this to ensure we've pumped libpq back to idle state */
if (PQgetResult(conn) != NULL)
pg_log_warning("unexpected extra results during COPY of table \"%s\"",
classname);
destroyPQExpBuffer(q);
return 1;
}
/*
* Dump table data using INSERT commands.
*
* Caution: when we restore from an archive file direct to database, the
* INSERT commands emitted by this function have to be parsed by
* pg_backup_db.c's ExecuteSimpleCommands(), which will not handle comments,
* E'' strings, or dollar-quoted strings. So don't emit anything like that.
*/
static int
dumpTableData_insert(Archive *fout, void *dcontext)
{
TableDataInfo *tdinfo = (TableDataInfo *) dcontext;
TableInfo *tbinfo = tdinfo->tdtable;
DumpOptions *dopt = fout->dopt;
PQExpBuffer q = createPQExpBuffer();
PQExpBuffer insertStmt = NULL;
PGresult *res;
int nfields;
int rows_per_statement = dopt->dump_inserts;
int rows_this_statement = 0;
appendPQExpBuffer(q, "DECLARE _pg_dump_cursor CURSOR FOR "
"SELECT * FROM ONLY %s",
fmtQualifiedDumpable(tbinfo));
if (tdinfo->filtercond)
appendPQExpBuffer(q, " %s", tdinfo->filtercond);
ExecuteSqlStatement(fout, q->data);
while (1)
{
res = ExecuteSqlQuery(fout, "FETCH 100 FROM _pg_dump_cursor",
PGRES_TUPLES_OK);
nfields = PQnfields(res);
/*
* First time through, we build as much of the INSERT statement as
* possible in "insertStmt", which we can then just print for each
* statement. If the table happens to have zero columns then this will
* be a complete statement, otherwise it will end in "VALUES" and be
* ready to have the row's column values printed.
*/
if (insertStmt == NULL)
{
TableInfo *targettab;
insertStmt = createPQExpBuffer();
/*
* When load-via-partition-root is set, get the root table name
* for the partition table, so that we can reload data through the
* root table.
*/
if (dopt->load_via_partition_root && tbinfo->ispartition)
targettab = getRootTableInfo(tbinfo);
else
targettab = tbinfo;
appendPQExpBuffer(insertStmt, "INSERT INTO %s ",
fmtQualifiedDumpable(targettab));
/* corner case for zero-column table */
if (nfields == 0)
{
appendPQExpBufferStr(insertStmt, "DEFAULT VALUES;\n");
}
else
{
/* append the list of column names if required */
if (dopt->column_inserts)
{
appendPQExpBufferChar(insertStmt, '(');
for (int field = 0; field < nfields; field++)
{
if (field > 0)
appendPQExpBufferStr(insertStmt, ", ");
appendPQExpBufferStr(insertStmt,
fmtId(PQfname(res, field)));
}
appendPQExpBufferStr(insertStmt, ") ");
}
if (tbinfo->needs_override)
appendPQExpBufferStr(insertStmt, "OVERRIDING SYSTEM VALUE ");
appendPQExpBufferStr(insertStmt, "VALUES");
}
}
for (int tuple = 0; tuple < PQntuples(res); tuple++)
{
/* Write the INSERT if not in the middle of a multi-row INSERT. */
if (rows_this_statement == 0)
archputs(insertStmt->data, fout);
/*
* If it is zero-column table then we've already written the
* complete statement, which will mean we've disobeyed
* --rows-per-insert when it's set greater than 1. We do support
* a way to make this multi-row with: SELECT UNION ALL SELECT
* UNION ALL ... but that's non-standard so we should avoid it
* given that using INSERTs is mostly only ever needed for
* cross-database exports.
*/
if (nfields == 0)
continue;
/* Emit a row heading */
if (rows_per_statement == 1)
archputs(" (", fout);
else if (rows_this_statement > 0)
archputs(",\n\t(", fout);
else
archputs("\n\t(", fout);
for (int field = 0; field < nfields; field++)
{
if (field > 0)
archputs(", ", fout);
if (tbinfo->attgenerated[field])
{
archputs("DEFAULT", fout);
continue;
}
if (PQgetisnull(res, tuple, field))
{
archputs("NULL", fout);
continue;
}
/* XXX This code is partially duplicated in ruleutils.c */
switch (PQftype(res, field))
{
case INT2OID:
case INT4OID:
case INT8OID:
case OIDOID:
case FLOAT4OID:
case FLOAT8OID:
case NUMERICOID:
{
/*
* These types are printed without quotes unless
* they contain values that aren't accepted by the
* scanner unquoted (e.g., 'NaN'). Note that
* strtod() and friends might accept NaN, so we
* can't use that to test.
*
* In reality we only need to defend against
* infinity and NaN, so we need not get too crazy
* about pattern matching here.
*/
const char *s = PQgetvalue(res, tuple, field);
if (strspn(s, "0123456789 +-eE.") == strlen(s))
archputs(s, fout);
else
archprintf(fout, "'%s'", s);
}
break;
case BITOID:
case VARBITOID:
archprintf(fout, "B'%s'",
PQgetvalue(res, tuple, field));
break;
case BOOLOID:
if (strcmp(PQgetvalue(res, tuple, field), "t") == 0)
archputs("true", fout);
else
archputs("false", fout);
break;
default:
/* All other types are printed as string literals. */
resetPQExpBuffer(q);
appendStringLiteralAH(q,
PQgetvalue(res, tuple, field),
fout);
archputs(q->data, fout);
break;
}
}
/* Terminate the row ... */
archputs(")", fout);
/* ... and the statement, if the target no. of rows is reached */
if (++rows_this_statement >= rows_per_statement)
{
if (dopt->do_nothing)
archputs(" ON CONFLICT DO NOTHING;\n", fout);
else
archputs(";\n", fout);
/* Reset the row counter */
rows_this_statement = 0;
}
}
if (PQntuples(res) <= 0)
{
PQclear(res);
break;
}
PQclear(res);
}
/* Terminate any statements that didn't make the row count. */
if (rows_this_statement > 0)
{
if (dopt->do_nothing)
archputs(" ON CONFLICT DO NOTHING;\n", fout);
else
archputs(";\n", fout);
}
archputs("\n\n", fout);
ExecuteSqlStatement(fout, "CLOSE _pg_dump_cursor");
destroyPQExpBuffer(q);
if (insertStmt != NULL)
destroyPQExpBuffer(insertStmt);
return 1;
}
/*
* getRootTableInfo:
* get the root TableInfo for the given partition table.
*/
static TableInfo *
getRootTableInfo(TableInfo *tbinfo)
{
TableInfo *parentTbinfo;
Assert(tbinfo->ispartition);
Assert(tbinfo->numParents == 1);
parentTbinfo = tbinfo->parents[0];
while (parentTbinfo->ispartition)
{
Assert(parentTbinfo->numParents == 1);
parentTbinfo = parentTbinfo->parents[0];
}
return parentTbinfo;
}
/*
* dumpTableData -
* dump the contents of a single table
*
* Actually, this just makes an ArchiveEntry for the table contents.
*/
static void
dumpTableData(Archive *fout, TableDataInfo *tdinfo)
{
DumpOptions *dopt = fout->dopt;
TableInfo *tbinfo = tdinfo->tdtable;
PQExpBuffer copyBuf = createPQExpBuffer();
PQExpBuffer clistBuf = createPQExpBuffer();
DataDumperPtr dumpFn;
char *copyStmt;
const char *copyFrom;
if (!dopt->dump_inserts)
{
/* Dump/restore using COPY */
dumpFn = dumpTableData_copy;
/*
* When load-via-partition-root is set, get the root table name for
* the partition table, so that we can reload data through the root
* table.
*/
if (dopt->load_via_partition_root && tbinfo->ispartition)
{
TableInfo *parentTbinfo;
parentTbinfo = getRootTableInfo(tbinfo);
copyFrom = fmtQualifiedDumpable(parentTbinfo);
}
else
copyFrom = fmtQualifiedDumpable(tbinfo);
/* must use 2 steps here 'cause fmtId is nonreentrant */
appendPQExpBuffer(copyBuf, "COPY %s ",
copyFrom);
appendPQExpBuffer(copyBuf, "%s FROM stdin;\n",
fmtCopyColumnList(tbinfo, clistBuf));
copyStmt = copyBuf->data;
}
else
{
/* Restore using INSERT */
dumpFn = dumpTableData_insert;
copyStmt = NULL;
}
/*
* Note: although the TableDataInfo is a full DumpableObject, we treat its
* dependency on its table as "special" and pass it to ArchiveEntry now.
* See comments for BuildArchiveDependencies.
*/
if (tdinfo->dobj.dump & DUMP_COMPONENT_DATA)
{
TocEntry *te;
te = ArchiveEntry(fout, tdinfo->dobj.catId, tdinfo->dobj.dumpId,
ARCHIVE_OPTS(.tag = tbinfo->dobj.name,
.namespace = tbinfo->dobj.namespace->dobj.name,
.owner = tbinfo->rolname,
.description = "TABLE DATA",
.section = SECTION_DATA,
.copyStmt = copyStmt,
.deps = &(tbinfo->dobj.dumpId),
.nDeps = 1,
.dumpFn = dumpFn,
.dumpArg = tdinfo));
/*
* Set the TocEntry's dataLength in case we are doing a parallel dump
* and want to order dump jobs by table size. We choose to measure
* dataLength in table pages during dump, so no scaling is needed.
* However, relpages is declared as "integer" in pg_class, and hence
* also in TableInfo, but it's really BlockNumber a/k/a unsigned int.
* Cast so that we get the right interpretation of table sizes
* exceeding INT_MAX pages.
*/
te->dataLength = (BlockNumber) tbinfo->relpages;
}
destroyPQExpBuffer(copyBuf);
destroyPQExpBuffer(clistBuf);
}
/*
* refreshMatViewData -
* load or refresh the contents of a single materialized view
*
* Actually, this just makes an ArchiveEntry for the REFRESH MATERIALIZED VIEW
* statement.
*/
static void
refreshMatViewData(Archive *fout, TableDataInfo *tdinfo)
{
TableInfo *tbinfo = tdinfo->tdtable;
PQExpBuffer q;
/* If the materialized view is not flagged as populated, skip this. */
if (!tbinfo->relispopulated)
return;
q = createPQExpBuffer();
appendPQExpBuffer(q, "REFRESH MATERIALIZED VIEW %s;\n",
fmtQualifiedDumpable(tbinfo));
if (tdinfo->dobj.dump & DUMP_COMPONENT_DATA)
ArchiveEntry(fout,
tdinfo->dobj.catId, /* catalog ID */
tdinfo->dobj.dumpId, /* dump ID */
ARCHIVE_OPTS(.tag = tbinfo->dobj.name,
.namespace = tbinfo->dobj.namespace->dobj.name,
.owner = tbinfo->rolname,
.description = "MATERIALIZED VIEW DATA",
.section = SECTION_POST_DATA,
.createStmt = q->data,
.deps = tdinfo->dobj.dependencies,
.nDeps = tdinfo->dobj.nDeps));
destroyPQExpBuffer(q);
}
/*
* getTableData -
* set up dumpable objects representing the contents of tables
*/
static void
getTableData(DumpOptions *dopt, TableInfo *tblinfo, int numTables, char relkind)
{
int i;
for (i = 0; i < numTables; i++)
{
if (tblinfo[i].dobj.dump & DUMP_COMPONENT_DATA &&
(!relkind || tblinfo[i].relkind == relkind))
makeTableDataInfo(dopt, &(tblinfo[i]));
}
}
/*
* Make a dumpable object for the data of this specific table
*
* Note: we make a TableDataInfo if and only if we are going to dump the
* table data; the "dump" flag in such objects isn't used.
*/
static void
makeTableDataInfo(DumpOptions *dopt, TableInfo *tbinfo)
{
TableDataInfo *tdinfo;
/*
* Nothing to do if we already decided to dump the table. This will
* happen for "config" tables.
*/
if (tbinfo->dataObj != NULL)
return;
/* Skip VIEWs (no data to dump) */
if (tbinfo->relkind == RELKIND_VIEW)
return;
/* Skip FOREIGN TABLEs (no data to dump) */
if (tbinfo->relkind == RELKIND_FOREIGN_TABLE)
return;
/* Skip partitioned tables (data in partitions) */
if (tbinfo->relkind == RELKIND_PARTITIONED_TABLE)
return;
/* Don't dump data in unlogged tables, if so requested */
if (tbinfo->relpersistence == RELPERSISTENCE_UNLOGGED &&
dopt->no_unlogged_table_data)
return;
/* Check that the data is not explicitly excluded */
if (simple_oid_list_member(&tabledata_exclude_oids,
tbinfo->dobj.catId.oid))
return;
/* OK, let's dump it */
tdinfo = (TableDataInfo *) pg_malloc(sizeof(TableDataInfo));
if (tbinfo->relkind == RELKIND_MATVIEW)
tdinfo->dobj.objType = DO_REFRESH_MATVIEW;
else if (tbinfo->relkind == RELKIND_SEQUENCE)
tdinfo->dobj.objType = DO_SEQUENCE_SET;
else
tdinfo->dobj.objType = DO_TABLE_DATA;
/*
* Note: use tableoid 0 so that this object won't be mistaken for
* something that pg_depend entries apply to.
*/
tdinfo->dobj.catId.tableoid = 0;
tdinfo->dobj.catId.oid = tbinfo->dobj.catId.oid;
AssignDumpId(&tdinfo->dobj);
tdinfo->dobj.name = tbinfo->dobj.name;
tdinfo->dobj.namespace = tbinfo->dobj.namespace;
tdinfo->tdtable = tbinfo;
tdinfo->filtercond = NULL; /* might get set later */
addObjectDependency(&tdinfo->dobj, tbinfo->dobj.dumpId);
tbinfo->dataObj = tdinfo;
}
/*
* The refresh for a materialized view must be dependent on the refresh for
* any materialized view that this one is dependent on.
*
* This must be called after all the objects are created, but before they are
* sorted.
*/
static void
buildMatViewRefreshDependencies(Archive *fout)
{
PQExpBuffer query;
PGresult *res;
int ntups,
i;
int i_classid,
i_objid,
i_refobjid;
/* No Mat Views before 9.3. */
if (fout->remoteVersion < 90300)
return;
query = createPQExpBuffer();
appendPQExpBufferStr(query, "WITH RECURSIVE w AS "
"( "
"SELECT d1.objid, d2.refobjid, c2.relkind AS refrelkind "
"FROM pg_depend d1 "
"JOIN pg_class c1 ON c1.oid = d1.objid "
"AND c1.relkind = " CppAsString2(RELKIND_MATVIEW)
" JOIN pg_rewrite r1 ON r1.ev_class = d1.objid "
"JOIN pg_depend d2 ON d2.classid = 'pg_rewrite'::regclass "
"AND d2.objid = r1.oid "
"AND d2.refobjid <> d1.objid "
"JOIN pg_class c2 ON c2.oid = d2.refobjid "
"AND c2.relkind IN (" CppAsString2(RELKIND_MATVIEW) ","
CppAsString2(RELKIND_VIEW) ") "
"WHERE d1.classid = 'pg_class'::regclass "
"UNION "
"SELECT w.objid, d3.refobjid, c3.relkind "
"FROM w "
"JOIN pg_rewrite r3 ON r3.ev_class = w.refobjid "
"JOIN pg_depend d3 ON d3.classid = 'pg_rewrite'::regclass "
"AND d3.objid = r3.oid "
"AND d3.refobjid <> w.refobjid "
"JOIN pg_class c3 ON c3.oid = d3.refobjid "
"AND c3.relkind IN (" CppAsString2(RELKIND_MATVIEW) ","
CppAsString2(RELKIND_VIEW) ") "
") "
"SELECT 'pg_class'::regclass::oid AS classid, objid, refobjid "
"FROM w "
"WHERE refrelkind = " CppAsString2(RELKIND_MATVIEW));
res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
ntups = PQntuples(res);
i_classid = PQfnumber(res, "classid");
i_objid = PQfnumber(res, "objid");
i_refobjid = PQfnumber(res, "refobjid");
for (i = 0; i < ntups; i++)
{
CatalogId objId;
CatalogId refobjId;
DumpableObject *dobj;
DumpableObject *refdobj;
TableInfo *tbinfo;
TableInfo *reftbinfo;
objId.tableoid = atooid(PQgetvalue(res, i, i_classid));
objId.oid = atooid(PQgetvalue(res, i, i_objid));
refobjId.tableoid = objId.tableoid;
refobjId.oid = atooid(PQgetvalue(res, i, i_refobjid));
dobj = findObjectByCatalogId(objId);
if (dobj == NULL)
continue;
Assert(dobj->objType == DO_TABLE);
tbinfo = (TableInfo *) dobj;
Assert(tbinfo->relkind == RELKIND_MATVIEW);
dobj = (DumpableObject *) tbinfo->dataObj;
if (dobj == NULL)
continue;
Assert(dobj->objType == DO_REFRESH_MATVIEW);
refdobj = findObjectByCatalogId(refobjId);
if (refdobj == NULL)
continue;
Assert(refdobj->objType == DO_TABLE);
reftbinfo = (TableInfo *) refdobj;
Assert(reftbinfo->relkind == RELKIND_MATVIEW);
refdobj = (DumpableObject *) reftbinfo->dataObj;
if (refdobj == NULL)
continue;
Assert(refdobj->objType == DO_REFRESH_MATVIEW);
addObjectDependency(dobj, refdobj->dumpId);
if (!reftbinfo->relispopulated)
tbinfo->relispopulated = false;
}
PQclear(res);
destroyPQExpBuffer(query);
}
/*
* getTableDataFKConstraints -
* add dump-order dependencies reflecting foreign key constraints
*
* This code is executed only in a data-only dump --- in schema+data dumps
* we handle foreign key issues by not creating the FK constraints until
* after the data is loaded. In a data-only dump, however, we want to
* order the table data objects in such a way that a table's referenced
* tables are restored first. (In the presence of circular references or
* self-references this may be impossible; we'll detect and complain about
* that during the dependency sorting step.)
*/
static void
getTableDataFKConstraints(void)
{
DumpableObject **dobjs;
int numObjs;
int i;
/* Search through all the dumpable objects for FK constraints */
getDumpableObjects(&dobjs, &numObjs);
for (i = 0; i < numObjs; i++)
{
if (dobjs[i]->objType == DO_FK_CONSTRAINT)
{
ConstraintInfo *cinfo = (ConstraintInfo *) dobjs[i];
TableInfo *ftable;
/* Not interesting unless both tables are to be dumped */
if (cinfo->contable == NULL ||
cinfo->contable->dataObj == NULL)
continue;
ftable = findTableByOid(cinfo->confrelid);
if (ftable == NULL ||
ftable->dataObj == NULL)
continue;
/*
* Okay, make referencing table's TABLE_DATA object depend on the
* referenced table's TABLE_DATA object.
*/
addObjectDependency(&cinfo->contable->dataObj->dobj,
ftable->dataObj->dobj.dumpId);
}
}
free(dobjs);
}
/*
* guessConstraintInheritance:
* In pre-8.4 databases, we can't tell for certain which constraints
* are inherited. We assume a CHECK constraint is inherited if its name
* matches the name of any constraint in the parent. Originally this code
* tried to compare the expression texts, but that can fail for various
* reasons --- for example, if the parent and child tables are in different
* schemas, reverse-listing of function calls may produce different text
* (schema-qualified or not) depending on search path.
*
* In 8.4 and up we can rely on the conislocal field to decide which
* constraints must be dumped; much safer.
*
* This function assumes all conislocal flags were initialized to true.
* It clears the flag on anything that seems to be inherited.
*/
static void
guessConstraintInheritance(TableInfo *tblinfo, int numTables)
{
int i,
j,
k;
for (i = 0; i < numTables; i++)
{
TableInfo *tbinfo = &(tblinfo[i]);
int numParents;
TableInfo **parents;
TableInfo *parent;
/* Sequences and views never have parents */
if (tbinfo->relkind == RELKIND_SEQUENCE ||
tbinfo->relkind == RELKIND_VIEW)
continue;
/* Don't bother computing anything for non-target tables, either */
if (!(tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION))
continue;