Add filtering capability for cross-version pg_upgrade tests

This commit expands the TAP tests of pg_upgrade when running these with
different major versions for the "old" cluster (to-be-upgraded) and the
"new" cluster (upgraded-to), by backporting some of the buildfarm
facilities directory into the script:
- Remove comments from the dump files, avoiding version-dependent
information.
- Remove empty lines from the dump files.
- Use --extra-float-digits=0 in the pg_dump command, when using an "old"
cluster with version equal to or lower than v11.
- Use --wal-segsize and --allow-group-access in initdb only when the
"old" cluster is equal to or higher than v11.

This allows the tests to pass down to v14 with the main regression test
suite, while v9.5~13 still generate some diffs, but these are minimal
compared to what happened before this commit.  Much more could be done,
especially around dump differences with function and procedures (these
can also be avoided with direct manipulation of the dumps loaded, for
example, in a way similar to the buildfarm), but at least the basics are
in place now.

Reviewed-by: Justin Pryzby, Anton A. Melnikov
Discussion: https://postgr.es/m/Yox1ME99GhAemMq1@paquier.xyz
This commit is contained in:
Michael Paquier 2022-10-04 10:11:08 +09:00
parent 908e17151b
commit 62be9e4cdc
1 changed files with 70 additions and 33 deletions

View File

@ -30,6 +30,27 @@ sub generate_db
"created database with ASCII characters from $from_char to $to_char");
}
# Filter the contents of a dump before its use in a content comparison.
# This returns the path to the filtered dump.
sub filter_dump
{
my ($node, $dump_file) = @_;
my $dump_contents = slurp_file($dump_file);
# Remove the comments.
$dump_contents =~ s/^\-\-.*//mgx;
# Remove empty lines.
$dump_contents =~ s/^\n//mgx;
my $dump_file_filtered = "${dump_file}_filtered";
open(my $dh, '>', $dump_file_filtered)
|| die "opening $dump_file_filtered";
print $dh $dump_contents;
close($dh);
return $dump_file_filtered;
}
# The test of pg_upgrade requires two clusters, an old one and a new one
# that gets upgraded. Before running the upgrade, a logical dump of the
# old cluster is taken, and a second logical dump of the new one is taken
@ -49,8 +70,10 @@ if ( (defined($ENV{olddump}) && !defined($ENV{oldinstall}))
die "olddump or oldinstall is undefined";
}
# Temporary location for the dumps taken
my $tempdir = PostgreSQL::Test::Utils::tempdir;
# Paths to the dumps taken during the tests.
my $tempdir = PostgreSQL::Test::Utils::tempdir;
my $dump1_file = "$tempdir/dump1.sql";
my $dump2_file = "$tempdir/dump2.sql";
# Initialize node to upgrade
my $oldnode =
@ -60,7 +83,10 @@ my $oldnode =
# To increase coverage of non-standard segment size and group access without
# increasing test runtime, run these tests with a custom setting.
# --allow-group-access and --wal-segsize have been added in v11.
$oldnode->init(extra => [ '--wal-segsize', '1', '--allow-group-access' ]);
my %node_params = ();
$node_params{extra} = [ '--wal-segsize', '1', '--allow-group-access' ]
if $oldnode->pg_version >= 11;
$oldnode->init(%node_params);
$oldnode->start;
# The default location of the source code is the root of this directory.
@ -129,37 +155,38 @@ else
is($rc, 0, 'regression tests pass');
}
# Initialize a new node for the upgrade.
my $newnode = PostgreSQL::Test::Cluster->new('new_node');
$newnode->init(%node_params);
my $newbindir = $newnode->config_data('--bindir');
my $oldbindir = $oldnode->config_data('--bindir');
# Before dumping, get rid of objects not existing or not supported in later
# versions. This depends on the version of the old server used, and matters
# only if different major versions are used for the dump.
if (defined($ENV{oldinstall}))
{
# Note that upgrade_adapt.sql from the new version is used, to
# cope with an upgrade to this version.
$oldnode->command_ok(
# Note that upgrade_adapt.sql and psql from the new version are used,
# to cope with an upgrade to this version.
$newnode->command_ok(
[
'psql', '-X',
'-f', "$srcdir/src/bin/pg_upgrade/upgrade_adapt.sql",
'regression'
'-f', "$srcdir/src/bin/pg_upgrade/upgrade_adapt.sql",
'-d', $oldnode->connstr('regression'),
],
'ran adapt script');
}
# Initialize a new node for the upgrade.
my $newnode = PostgreSQL::Test::Cluster->new('new_node');
$newnode->init(extra => [ '--wal-segsize', '1', '--allow-group-access' ]);
my $newbindir = $newnode->config_data('--bindir');
my $oldbindir = $oldnode->config_data('--bindir');
# Take a dump before performing the upgrade as a base comparison. Note
# that we need to use pg_dumpall from the new node here.
$newnode->command_ok(
[
'pg_dumpall', '--no-sync',
'-d', $oldnode->connstr('postgres'),
'-f', "$tempdir/dump1.sql"
],
'dump before running pg_upgrade');
my @dump_command = (
'pg_dumpall', '--no-sync', '-d', $oldnode->connstr('postgres'),
'-f', $dump1_file);
# --extra-float-digits is needed when upgrading from a version older than 11.
push(@dump_command, '--extra-float-digits', '0')
if ($oldnode->pg_version < 12);
$newnode->command_ok(\@dump_command, 'dump before running pg_upgrade');
# After dumping, update references to the old source tree's regress.so
# to point to the new tree.
@ -173,7 +200,7 @@ if (defined($ENV{oldinstall}))
chomp($output);
my @libpaths = split("\n", $output);
my $dump_data = slurp_file("$tempdir/dump1.sql");
my $dump_data = slurp_file($dump1_file);
my $newregresssrc = "$srcdir/src/test/regress";
foreach (@libpaths)
@ -183,7 +210,7 @@ if (defined($ENV{oldinstall}))
$dump_data =~ s/$libpath/$newregresssrc/g;
}
open my $fh, ">", "$tempdir/dump1.sql" or die "could not open dump file";
open my $fh, ">", $dump1_file or die "could not open dump file";
print $fh $dump_data;
close $fh;
@ -284,24 +311,34 @@ if (-d $log_path)
}
# Second dump from the upgraded instance.
$newnode->command_ok(
[
'pg_dumpall', '--no-sync',
'-d', $newnode->connstr('postgres'),
'-f', "$tempdir/dump2.sql"
],
'dump after running pg_upgrade');
@dump_command = (
'pg_dumpall', '--no-sync', '-d', $newnode->connstr('postgres'),
'-f', $dump2_file);
# --extra-float-digits is needed when upgrading from a version older than 11.
push(@dump_command, '--extra-float-digits', '0')
if ($oldnode->pg_version < 12);
$newnode->command_ok(\@dump_command, 'dump after running pg_upgrade');
# No need to apply filters on the dumps if working on the same version
# for the old and new nodes.
my $dump1_filtered = $dump1_file;
my $dump2_filtered = $dump2_file;
if ($oldnode->pg_version != $newnode->pg_version)
{
$dump1_filtered = filter_dump($oldnode, $dump1_file);
$dump2_filtered = filter_dump($newnode, $dump2_file);
}
# Compare the two dumps, there should be no differences.
my $compare_res = compare("$tempdir/dump1.sql", "$tempdir/dump2.sql");
my $compare_res = compare($dump1_filtered, $dump2_filtered);
is($compare_res, 0, 'old and new dumps match after pg_upgrade');
# Provide more context if the dumps do not match.
if ($compare_res != 0)
{
my ($stdout, $stderr) =
run_command([ 'diff', "$tempdir/dump1.sql", "$tempdir/dump2.sql" ]);
print "=== diff of $tempdir/dump1.sql and $tempdir/dump2.sql\n";
run_command([ 'diff', $dump1_filtered, $dump2_filtered ]);
print "=== diff of $dump1_filtered and $dump2_filtered\n";
print "=== stdout ===\n";
print $stdout;
print "=== stderr ===\n";