Initial pgperltidy run for v12.

Make all the perl code look nice, too (for some value of "nice").
This commit is contained in:
Tom Lane 2019-05-22 13:36:19 -04:00
parent 8255c7a5ee
commit db6e2b4c52
38 changed files with 487 additions and 384 deletions

View File

@ -20,7 +20,7 @@ use Getopt::Long;
use File::Basename;
use File::Spec;
BEGIN { use lib File::Spec->rel2abs(dirname(__FILE__)); }
BEGIN { use lib File::Spec->rel2abs(dirname(__FILE__)); }
use Catalog;
@ -34,7 +34,7 @@ GetOptions(
'include-path:s' => \$include_path) || usage();
# Sanity check arguments.
die "No input files.\n" unless @ARGV;
die "No input files.\n" unless @ARGV;
die "--set-version must be specified.\n" unless $major_version;
die "Invalid version string: $major_version\n"
unless $major_version =~ /^\d+$/;
@ -301,7 +301,7 @@ foreach my $row (@{ $catalog_data{pg_type} })
$typeoids{ $row->{typname} } = $row->{oid};
# for pg_attribute copies of pg_type values
$types{ $row->{typname} } = $row;
$types{ $row->{typname} } = $row;
}
# Encoding identifier lookup. This uses the same replacement machinery
@ -313,7 +313,7 @@ open(my $ef, '<', $encfile) || die "$encfile: $!";
# We're parsing an enum, so start with 0 and increment
# every time we find an enum member.
my $encid = 0;
my $encid = 0;
my $collect_encodings = 0;
while (<$ef>)
{

View File

@ -203,7 +203,7 @@ $bmap{'t'} = 'true';
$bmap{'f'} = 'false';
my @fmgr_builtin_oid_index;
my $last_builtin_oid = 0;
my $fmgr_count = 0;
my $fmgr_count = 0;
foreach my $s (sort { $a->{oid} <=> $b->{oid} } @fmgr)
{
print $tfh

View File

@ -60,14 +60,14 @@ mkdir $datadir;
}
# Control file should tell that data checksums are disabled by default.
command_like(['pg_controldata', $datadir],
qr/Data page checksum version:.*0/,
'checksums are disabled in control file');
command_like(
[ 'pg_controldata', $datadir ],
qr/Data page checksum version:.*0/,
'checksums are disabled in control file');
# pg_checksums fails with checksums disabled by default. This is
# not part of the tests included in pg_checksums to save from
# the creation of an extra instance.
command_fails(
[ 'pg_checksums', '-D', $datadir],
command_fails([ 'pg_checksums', '-D', $datadir ],
"pg_checksums fails with data checksum disabled");
command_ok([ 'initdb', '-S', $datadir ], 'sync only');

View File

@ -359,7 +359,7 @@ SKIP:
$node->command_ok([ 'pg_basebackup', '-D', "$tempdir/backupR", '-R' ],
'pg_basebackup -R runs');
ok(-f "$tempdir/backupR/postgresql.auto.conf", 'postgresql.auto.conf exists');
ok(-f "$tempdir/backupR/standby.signal", 'standby.signal was created');
ok(-f "$tempdir/backupR/standby.signal", 'standby.signal was created');
my $recovery_conf = slurp_file "$tempdir/backupR/postgresql.auto.conf";
rmtree("$tempdir/backupR");

View File

@ -14,21 +14,22 @@ use Test::More tests => 62;
# at the end.
sub check_relation_corruption
{
my $node = shift;
my $table = shift;
my $node = shift;
my $table = shift;
my $tablespace = shift;
my $pgdata = $node->data_dir;
my $pgdata = $node->data_dir;
$node->safe_psql('postgres',
$node->safe_psql(
'postgres',
"SELECT a INTO $table FROM generate_series(1,10000) AS a;
ALTER TABLE $table SET (autovacuum_enabled=false);");
$node->safe_psql('postgres',
"ALTER TABLE ".$table." SET TABLESPACE ".$tablespace.";");
"ALTER TABLE " . $table . " SET TABLESPACE " . $tablespace . ";");
my $file_corrupted = $node->safe_psql('postgres',
"SELECT pg_relation_filepath('$table');");
my $relfilenode_corrupted = $node->safe_psql('postgres',
my $file_corrupted =
$node->safe_psql('postgres', "SELECT pg_relation_filepath('$table');");
my $relfilenode_corrupted = $node->safe_psql('postgres',
"SELECT relfilenode FROM pg_class WHERE relname = '$table';");
# Set page header and block size
@ -38,9 +39,14 @@ sub check_relation_corruption
# Checksums are correct for single relfilenode as the table is not
# corrupted yet.
command_ok(['pg_checksums', '--check', '-D', $pgdata, '-r',
$relfilenode_corrupted],
"succeeds for single relfilenode on tablespace $tablespace with offline cluster");
command_ok(
[
'pg_checksums', '--check',
'-D', $pgdata,
'-r', $relfilenode_corrupted
],
"succeeds for single relfilenode on tablespace $tablespace with offline cluster"
);
# Time to create some corruption
open my $file, '+<', "$pgdata/$file_corrupted";
@ -49,26 +55,32 @@ sub check_relation_corruption
close $file;
# Checksum checks on single relfilenode fail
$node->command_checks_all([ 'pg_checksums', '--check', '-D', $pgdata,
'-r', $relfilenode_corrupted],
1,
[qr/Bad checksums:.*1/],
[qr/checksum verification failed/],
"fails with corrupted data for single relfilenode on tablespace $tablespace");
$node->command_checks_all(
[
'pg_checksums', '--check',
'-D', $pgdata,
'-r', $relfilenode_corrupted
],
1,
[qr/Bad checksums:.*1/],
[qr/checksum verification failed/],
"fails with corrupted data for single relfilenode on tablespace $tablespace"
);
# Global checksum checks fail as well
$node->command_checks_all([ 'pg_checksums', '--check', '-D', $pgdata],
1,
[qr/Bad checksums:.*1/],
[qr/checksum verification failed/],
"fails with corrupted data on tablespace $tablespace");
$node->command_checks_all(
[ 'pg_checksums', '--check', '-D', $pgdata ],
1,
[qr/Bad checksums:.*1/],
[qr/checksum verification failed/],
"fails with corrupted data on tablespace $tablespace");
# Drop corrupted table again and make sure there is no more corruption.
$node->start;
$node->safe_psql('postgres', "DROP TABLE $table;");
$node->stop;
$node->command_ok(['pg_checksums', '--check', '-D', $pgdata],
"succeeds again after table drop on tablespace $tablespace");
$node->command_ok([ 'pg_checksums', '--check', '-D', $pgdata ],
"succeeds again after table drop on tablespace $tablespace");
$node->start;
return;
@ -80,19 +92,20 @@ $node->init();
my $pgdata = $node->data_dir;
# Control file should know that checksums are disabled.
command_like(['pg_controldata', $pgdata],
qr/Data page checksum version:.*0/,
'checksums disabled in control file');
command_like(
[ 'pg_controldata', $pgdata ],
qr/Data page checksum version:.*0/,
'checksums disabled in control file');
# These are correct but empty files, so they should pass through.
append_to_file "$pgdata/global/99999", "";
append_to_file "$pgdata/global/99999.123", "";
append_to_file "$pgdata/global/99999_fsm", "";
append_to_file "$pgdata/global/99999_init", "";
append_to_file "$pgdata/global/99999_vm", "";
append_to_file "$pgdata/global/99999", "";
append_to_file "$pgdata/global/99999.123", "";
append_to_file "$pgdata/global/99999_fsm", "";
append_to_file "$pgdata/global/99999_init", "";
append_to_file "$pgdata/global/99999_vm", "";
append_to_file "$pgdata/global/99999_init.123", "";
append_to_file "$pgdata/global/99999_fsm.123", "";
append_to_file "$pgdata/global/99999_vm.123", "";
append_to_file "$pgdata/global/99999_fsm.123", "";
append_to_file "$pgdata/global/99999_vm.123", "";
# These are temporary files and folders with dummy contents, which
# should be ignored by the scan.
@ -101,67 +114,75 @@ mkdir "$pgdata/global/pgsql_tmp";
append_to_file "$pgdata/global/pgsql_tmp/1.1", "foo";
# Enable checksums.
command_ok(['pg_checksums', '--enable', '--no-sync', '-D', $pgdata],
"checksums successfully enabled in cluster");
command_ok([ 'pg_checksums', '--enable', '--no-sync', '-D', $pgdata ],
"checksums successfully enabled in cluster");
# Successive attempt to enable checksums fails.
command_fails(['pg_checksums', '--enable', '--no-sync', '-D', $pgdata],
"enabling checksums fails if already enabled");
command_fails([ 'pg_checksums', '--enable', '--no-sync', '-D', $pgdata ],
"enabling checksums fails if already enabled");
# Control file should know that checksums are enabled.
command_like(['pg_controldata', $pgdata],
qr/Data page checksum version:.*1/,
'checksums enabled in control file');
command_like(
[ 'pg_controldata', $pgdata ],
qr/Data page checksum version:.*1/,
'checksums enabled in control file');
# Disable checksums again. Flush result here as that should be cheap.
command_ok(['pg_checksums', '--disable', '-D', $pgdata],
"checksums successfully disabled in cluster");
command_ok(
[ 'pg_checksums', '--disable', '-D', $pgdata ],
"checksums successfully disabled in cluster");
# Successive attempt to disable checksums fails.
command_fails(['pg_checksums', '--disable', '--no-sync', '-D', $pgdata],
"disabling checksums fails if already disabled");
command_fails(
[ 'pg_checksums', '--disable', '--no-sync', '-D', $pgdata ],
"disabling checksums fails if already disabled");
# Control file should know that checksums are disabled.
command_like(['pg_controldata', $pgdata],
qr/Data page checksum version:.*0/,
'checksums disabled in control file');
command_like(
[ 'pg_controldata', $pgdata ],
qr/Data page checksum version:.*0/,
'checksums disabled in control file');
# Enable checksums again for follow-up tests.
command_ok(['pg_checksums', '--enable', '--no-sync', '-D', $pgdata],
"checksums successfully enabled in cluster");
command_ok([ 'pg_checksums', '--enable', '--no-sync', '-D', $pgdata ],
"checksums successfully enabled in cluster");
# Control file should know that checksums are enabled.
command_like(['pg_controldata', $pgdata],
qr/Data page checksum version:.*1/,
'checksums enabled in control file');
command_like(
[ 'pg_controldata', $pgdata ],
qr/Data page checksum version:.*1/,
'checksums enabled in control file');
# Checksums pass on a newly-created cluster
command_ok(['pg_checksums', '--check', '-D', $pgdata],
"succeeds with offline cluster");
command_ok([ 'pg_checksums', '--check', '-D', $pgdata ],
"succeeds with offline cluster");
# Checksums are verified if no other arguments are specified
command_ok(['pg_checksums', '-D', $pgdata],
"verifies checksums as default action");
command_ok(
[ 'pg_checksums', '-D', $pgdata ],
"verifies checksums as default action");
# Specific relation files cannot be requested when action is --disable
# or --enable.
command_fails(['pg_checksums', '--disable', '-r', '1234', '-D', $pgdata],
"fails when relfilenodes are requested and action is --disable");
command_fails(['pg_checksums', '--enable', '-r', '1234', '-D', $pgdata],
"fails when relfilenodes are requested and action is --enable");
command_fails(
[ 'pg_checksums', '--disable', '-r', '1234', '-D', $pgdata ],
"fails when relfilenodes are requested and action is --disable");
command_fails(
[ 'pg_checksums', '--enable', '-r', '1234', '-D', $pgdata ],
"fails when relfilenodes are requested and action is --enable");
# Checks cannot happen with an online cluster
$node->start;
command_fails(['pg_checksums', '--check', '-D', $pgdata],
"fails with online cluster");
command_fails([ 'pg_checksums', '--check', '-D', $pgdata ],
"fails with online cluster");
# Check corruption of table on default tablespace.
check_relation_corruption($node, 'corrupt1', 'pg_default');
# Create tablespace to check corruptions in a non-default tablespace.
my $basedir = $node->basedir;
my $basedir = $node->basedir;
my $tablespace_dir = "$basedir/ts_corrupt_dir";
mkdir ($tablespace_dir);
mkdir($tablespace_dir);
$tablespace_dir = TestLib::real_dir($tablespace_dir);
$node->safe_psql('postgres',
"CREATE TABLESPACE ts_corrupt LOCATION '$tablespace_dir';");
@ -171,19 +192,20 @@ check_relation_corruption($node, 'corrupt2', 'ts_corrupt');
# correctly-named relation files filled with some corrupted data.
sub fail_corrupt
{
my $node = shift;
my $file = shift;
my $node = shift;
my $file = shift;
my $pgdata = $node->data_dir;
# Create the file with some dummy data in it.
my $file_name = "$pgdata/global/$file";
append_to_file $file_name, "foo";
$node->command_checks_all([ 'pg_checksums', '--check', '-D', $pgdata],
1,
[qr/^$/],
[qr/could not read block 0 in file.*$file\":/],
"fails for corrupted data in $file");
$node->command_checks_all(
[ 'pg_checksums', '--check', '-D', $pgdata ],
1,
[qr/^$/],
[qr/could not read block 0 in file.*$file\":/],
"fails for corrupted data in $file");
# Remove file to prevent future lookup errors on conflicts.
unlink $file_name;

View File

@ -26,6 +26,7 @@ open my $conf, '>>', "$tempdir/data/postgresql.conf";
print $conf "fsync = off\n";
print $conf TestLib::slurp_file($ENV{TEMP_CONFIG})
if defined $ENV{TEMP_CONFIG};
if (!$windows_os)
{
print $conf "listen_addresses = ''\n";

View File

@ -25,8 +25,10 @@ my $current_logfiles = slurp_file($node->data_dir . '/current_logfiles');
note "current_logfiles = $current_logfiles";
like($current_logfiles, qr|^stderr log/postgresql-.*log$|,
'current_logfiles is sane');
like(
$current_logfiles,
qr|^stderr log/postgresql-.*log$|,
'current_logfiles is sane');
my $lfname = $current_logfiles;
$lfname =~ s/^stderr //;
@ -43,8 +45,7 @@ for (my $attempts = 0; $attempts < $max_attempts; $attempts++)
usleep(100_000);
}
like($first_logfile, qr/division by zero/,
'found expected log file content');
like($first_logfile, qr/division by zero/, 'found expected log file content');
# Sleep 2 seconds and ask for log rotation; this should result in
# output into a different log file name.
@ -63,8 +64,10 @@ for (my $attempts = 0; $attempts < $max_attempts; $attempts++)
note "now current_logfiles = $new_current_logfiles";
like($new_current_logfiles, qr|^stderr log/postgresql-.*log$|,
'new current_logfiles is sane');
like(
$new_current_logfiles,
qr|^stderr log/postgresql-.*log$|,
'new current_logfiles is sane');
$lfname = $new_current_logfiles;
$lfname =~ s/^stderr //;
@ -82,7 +85,9 @@ for (my $attempts = 0; $attempts < $max_attempts; $attempts++)
usleep(100_000);
}
like($second_logfile, qr/syntax error/,
'found expected log file content in new log file');
like(
$second_logfile,
qr/syntax error/,
'found expected log file content in new log file');
$node->stop();

View File

@ -50,10 +50,9 @@ command_fails_like(
);
command_fails_like(
[ 'pg_restore' ],
['pg_restore'],
qr{\Qpg_restore: error: one of -d/--dbname and -f/--file must be specified\E},
'pg_restore: error: one of -d/--dbname and -f/--file must be specified'
);
'pg_restore: error: one of -d/--dbname and -f/--file must be specified');
command_fails_like(
[ 'pg_restore', '-s', '-a', '-f -' ],
@ -125,7 +124,8 @@ command_fails_like(
command_fails_like(
[ 'pg_dump', '--on-conflict-do-nothing' ],
qr/pg_dump: error: option --on-conflict-do-nothing requires option --inserts, --rows-per-insert or --column-inserts/,
'pg_dump: --on-conflict-do-nothing requires --inserts, --rows-per-insert, --column-inserts');
'pg_dump: --on-conflict-do-nothing requires --inserts, --rows-per-insert, --column-inserts'
);
# pg_dumpall command-line argument checks
command_fails_like(
@ -161,4 +161,5 @@ command_fails_like(
command_fails_like(
[ 'pg_dumpall', '--exclude-database=foo', '--globals-only' ],
qr/\Qpg_dumpall: error: option --exclude-database cannot be used together with -g\/--globals-only\E/,
'pg_dumpall: option --exclude-database cannot be used together with -g/--globals-only');
'pg_dumpall: option --exclude-database cannot be used together with -g/--globals-only'
);

View File

@ -810,7 +810,8 @@ my %tests = (
},
'ALTER TABLE test_second_table OWNER TO' => {
regexp => qr/^\QALTER TABLE dump_test.test_second_table OWNER TO \E.+;/m,
regexp =>
qr/^\QALTER TABLE dump_test.test_second_table OWNER TO \E.+;/m,
like =>
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
@ -2427,7 +2428,7 @@ my %tests = (
\QALTER INDEX dump_test.index_with_stats ALTER COLUMN 3 SET STATISTICS 500;\E\n
/xms,
like =>
{ %full_runs, %dump_test_schema_runs, section_post_data => 1, },
{ %full_runs, %dump_test_schema_runs, section_post_data => 1, },
unlike => { exclude_dump_test_schema => 1, },
},
@ -2900,12 +2901,12 @@ my %tests = (
data_only => 1,
section_pre_data => 1,
test_schema_plus_blobs => 1,
binary_upgrade => 1,
binary_upgrade => 1,
},
unlike => {
no_blobs => 1,
no_privs => 1,
schema_only => 1,
no_blobs => 1,
no_privs => 1,
schema_only => 1,
},
},
@ -3116,13 +3117,13 @@ my %tests = (
'CREATE ACCESS METHOD regress_test_table_am' => {
create_order => 11,
create_sql => 'CREATE ACCESS METHOD regress_table_am TYPE TABLE HANDLER heap_tableam_handler;',
create_sql =>
'CREATE ACCESS METHOD regress_table_am TYPE TABLE HANDLER heap_tableam_handler;',
regexp => qr/^
\QCREATE ACCESS METHOD regress_table_am TYPE TABLE HANDLER heap_tableam_handler;\E
\n/xm,
like => {
%full_runs,
section_pre_data => 1,
%full_runs, section_pre_data => 1,
},
},
@ -3134,7 +3135,7 @@ my %tests = (
# pretty, but seems hard to do better in this framework.
'CREATE TABLE regress_pg_dump_table_am' => {
create_order => 12,
create_sql => '
create_sql => '
CREATE TABLE dump_test.regress_pg_dump_table_am_0() USING heap;
CREATE TABLE dump_test.regress_pg_dump_table_am_1 (col1 int) USING regress_table_am;
CREATE TABLE dump_test.regress_pg_dump_table_am_2() USING heap;',
@ -3145,16 +3146,14 @@ my %tests = (
\n\s+\Qcol1 integer\E
\n\);/xm,
like => {
%full_runs,
%dump_test_schema_runs,
section_pre_data => 1,
%full_runs, %dump_test_schema_runs, section_pre_data => 1,
},
unlike => { exclude_dump_test_schema => 1},
unlike => { exclude_dump_test_schema => 1 },
},
'CREATE MATERIALIZED VIEW regress_pg_dump_matview_am' => {
create_order => 13,
create_sql => '
create_sql => '
CREATE MATERIALIZED VIEW dump_test.regress_pg_dump_matview_am_0 USING heap AS SELECT 1;
CREATE MATERIALIZED VIEW dump_test.regress_pg_dump_matview_am_1
USING regress_table_am AS SELECT count(*) FROM pg_class;
@ -3167,13 +3166,10 @@ my %tests = (
\n\s+\QFROM pg_class\E
\n\s+\QWITH NO DATA;\E\n/xm,
like => {
%full_runs,
%dump_test_schema_runs,
section_pre_data => 1,
%full_runs, %dump_test_schema_runs, section_pre_data => 1,
},
unlike => { exclude_dump_test_schema => 1},
}
);
unlike => { exclude_dump_test_schema => 1 },
});
#########################################
# Create a PG instance to test actually dumping from
@ -3330,8 +3326,7 @@ foreach my $db (sort keys %create_sql)
command_fails_like(
[ 'pg_dump', '-p', "$port", 'qqq' ],
qr/\Qpg_dump: error: connection to database "qqq" failed: FATAL: database "qqq" does not exist\E/,
'connecting to a non-existent database'
);
'connecting to a non-existent database');
#########################################
# Test connecting with an unprivileged user

View File

@ -25,17 +25,17 @@ sub run_test
# replicated to the standby.
master_psql('CREATE DATABASE beforepromotion');
master_psql('CREATE TABLE beforepromotion_tab (a int)',
'beforepromotion');
'beforepromotion');
RewindTest::promote_standby();
# Create databases in the old master and the new promoted standby.
master_psql('CREATE DATABASE master_afterpromotion');
master_psql('CREATE TABLE master_promotion_tab (a int)',
'master_afterpromotion');
'master_afterpromotion');
standby_psql('CREATE DATABASE standby_afterpromotion');
standby_psql('CREATE TABLE standby_promotion_tab (a int)',
'standby_afterpromotion');
'standby_afterpromotion');
# The clusters are now diverged.

View File

@ -133,8 +133,10 @@ sub setup_cluster
# Set up pg_hba.conf and pg_ident.conf for the role running
# pg_rewind. This role is used for all the tests, and has
# minimal permissions enough to rewind from an online source.
$node_master->init(allows_streaming => 1, extra => $extra,
auth_extra => ['--create-role', 'rewind_user']);
$node_master->init(
allows_streaming => 1,
extra => $extra,
auth_extra => [ '--create-role', 'rewind_user' ]);
# Set wal_keep_segments to prevent WAL segment recycling after enforced
# checkpoints in the tests.
@ -151,7 +153,8 @@ sub start_master
# Create custom role which is used to run pg_rewind, and adjust its
# permissions to the minimum necessary.
$node_master->psql('postgres', "
$node_master->psql(
'postgres', "
CREATE ROLE rewind_user LOGIN;
GRANT EXECUTE ON function pg_catalog.pg_ls_dir(text, boolean, boolean)
TO rewind_user;
@ -265,10 +268,9 @@ sub run_pg_rewind
# Do rewind using a remote connection as source
command_ok(
[
'pg_rewind', "--debug",
"--source-server", $standby_connstr,
"--target-pgdata=$master_pgdata",
"--no-sync"
'pg_rewind', "--debug",
"--source-server", $standby_connstr,
"--target-pgdata=$master_pgdata", "--no-sync"
],
'pg_rewind remote');
}

View File

@ -63,7 +63,7 @@ sub pgbench
# makes a 5-MiB table.
$node->safe_psql('postgres',
'CREATE UNLOGGED TABLE insert_tbl (id serial primary key); ');
'CREATE UNLOGGED TABLE insert_tbl (id serial primary key); ');
pgbench(
'--no-vacuum --client=5 --protocol=prepared --transactions=25',
@ -286,7 +286,7 @@ pgbench(
qr{command=15.: double 15\b},
qr{command=16.: double 16\b},
qr{command=17.: double 17\b},
qr{command=20.: int 1\b}, # zipfian random
qr{command=20.: int 1\b}, # zipfian random
qr{command=21.: double -27\b},
qr{command=22.: double 1024\b},
qr{command=23.: double 1\b},
@ -326,9 +326,9 @@ pgbench(
qr{command=86.: int 86\b},
qr{command=93.: int 93\b},
qr{command=95.: int 0\b},
qr{command=96.: int 1\b}, # :scale
qr{command=97.: int 0\b}, # :client_id
qr{command=98.: int 5432\b}, # :random_seed
qr{command=96.: int 1\b}, # :scale
qr{command=97.: int 0\b}, # :client_id
qr{command=98.: int 5432\b}, # :random_seed
qr{command=99.: int -9223372036854775808\b}, # min int
qr{command=100.: int 9223372036854775807\b}, # max int
],
@ -542,14 +542,17 @@ pgbench(
pgbench(
'-t 1', 0,
[ qr{type: .*/001_pgbench_gset}, qr{processed: 1/1} ],
[ qr{command=3.: int 0\b},
[
qr{command=3.: int 0\b},
qr{command=5.: int 1\b},
qr{command=6.: int 2\b},
qr{command=8.: int 3\b},
qr{command=10.: int 4\b},
qr{command=12.: int 5\b} ],
qr{command=12.: int 5\b}
],
'pgbench gset command',
{ '001_pgbench_gset' => q{-- test gset
{
'001_pgbench_gset' => q{-- test gset
-- no columns
SELECT \gset
-- one value
@ -568,7 +571,8 @@ SELECT 0 AS i4, 4 AS i4 \gset
-- work on the last SQL command under \;
\; \; SELECT 0 AS i5 \; SELECT 5 AS i5 \; \; \gset
\set i debug(:i5)
} });
}
});
# trigger many expression errors
my @errors = (
@ -587,10 +591,11 @@ my @errors = (
}
],
[
'sql too many args', 1, [qr{statement has too many arguments.*\b255\b}],
'sql too many args', 1,
[qr{statement has too many arguments.*\b255\b}],
q{-- MAX_ARGS=256 for prepared
\set i 0
SELECT LEAST(}.join(', ', (':i') x 256).q{)}
SELECT LEAST(} . join(', ', (':i') x 256) . q{)}
],
# SHELL
@ -609,7 +614,7 @@ SELECT LEAST(}.join(', ', (':i') x 256).q{)}
[
'shell too many args', 1, [qr{too many arguments in command "shell"}],
q{-- 256 arguments to \shell
\shell echo }.join(' ', ('arg') x 255)
\shell echo } . join(' ', ('arg') x 255)
],
# SET
@ -625,11 +630,9 @@ SELECT LEAST(}.join(', ', (':i') x 256).q{)}
'set invalid variable name', 2,
[qr{invalid variable name}], q{\set . 1}
],
[ 'set division by zero', 2, [qr{division by zero}], q{\set i 1/0} ],
[
'set division by zero', 2,
[qr{division by zero}], q{\set i 1/0}
],
[ 'set undefined variable',
'set undefined variable',
2,
[qr{undefined variable "nosuchvariable"}],
q{\set i :nosuchvariable}
@ -646,10 +649,8 @@ SELECT LEAST(}.join(', ', (':i') x 256).q{)}
[qr{empty range given to random}], q{\set i random(5,3)}
],
[
'set random range too large',
2,
[qr{random range is too large}],
q{\set i random(:minint, :maxint)}
'set random range too large', 2,
[qr{random range is too large}], q{\set i random(:minint, :maxint)}
],
[
'set gaussian param too small',
@ -713,16 +714,26 @@ SELECT LEAST(}.join(', ', (':i') x 256).q{)}
],
# SET: ARITHMETIC OVERFLOW DETECTION
[ 'set double to int overflow', 2,
[ qr{double to int overflow for 100} ], q{\set i int(1E32)} ],
[ 'set bigint add overflow', 2,
[ qr{int add out} ], q{\set i (1<<62) + (1<<62)} ],
[ 'set bigint sub overflow', 2,
[ qr{int sub out} ], q{\set i 0 - (1<<62) - (1<<62) - (1<<62)} ],
[ 'set bigint mul overflow', 2,
[ qr{int mul out} ], q{\set i 2 * (1<<62)} ],
[ 'set bigint div out of range', 2,
[ qr{bigint div out of range} ], q{\set i :minint / -1} ],
[
'set double to int overflow', 2,
[qr{double to int overflow for 100}], q{\set i int(1E32)}
],
[
'set bigint add overflow', 2,
[qr{int add out}], q{\set i (1<<62) + (1<<62)}
],
[
'set bigint sub overflow',
2, [qr{int sub out}], q{\set i 0 - (1<<62) - (1<<62) - (1<<62)}
],
[
'set bigint mul overflow', 2,
[qr{int mul out}], q{\set i 2 * (1<<62)}
],
[
'set bigint div out of range', 2,
[qr{bigint div out of range}], q{\set i :minint / -1}
],
# SETSHELL
[
@ -759,31 +770,47 @@ SELECT LEAST(}.join(', ', (':i') x 256).q{)}
[qr{invalid command .* "nosuchcommand"}], q{\nosuchcommand}
],
[ 'misc empty script', 1, [qr{empty command list for script}], q{} ],
[ 'bad boolean', 2,
[qr{malformed variable.*trueXXX}], q{\set b :badtrue or true} ],
[
'bad boolean', 2,
[qr{malformed variable.*trueXXX}], q{\set b :badtrue or true}
],
# GSET
[ 'gset no row', 2,
[qr{expected one row, got 0\b}], q{SELECT WHERE FALSE \gset} ],
[
'gset no row', 2,
[qr{expected one row, got 0\b}], q{SELECT WHERE FALSE \gset}
],
[ 'gset alone', 1, [qr{gset must follow a SQL command}], q{\gset} ],
[ 'gset no SQL', 1,
[
'gset no SQL', 1,
[qr{gset must follow a SQL command}], q{\set i +1
\gset} ],
[ 'gset too many arguments', 1,
[qr{too many arguments}], q{SELECT 1 \gset a b} ],
[ 'gset after gset', 1,
[qr{gset must follow a SQL command}], q{SELECT 1 AS i \gset
\gset} ],
[ 'gset non SELECT', 2,
\gset}
],
[
'gset too many arguments', 1,
[qr{too many arguments}], q{SELECT 1 \gset a b}
],
[
'gset after gset', 1,
[qr{gset must follow a SQL command}], q{SELECT 1 AS i \gset
\gset}
],
[
'gset non SELECT',
2,
[qr{expected one row, got 0}],
q{DROP TABLE IF EXISTS no_such_table \gset} ],
[ 'gset bad default name', 2,
[qr{error storing into variable \?column\?}],
q{SELECT 1 \gset} ],
[ 'gset bad name', 2,
q{DROP TABLE IF EXISTS no_such_table \gset}
],
[
'gset bad default name', 2,
[qr{error storing into variable \?column\?}], q{SELECT 1 \gset}
],
[
'gset bad name',
2,
[qr{error storing into variable bad name!}],
q{SELECT 1 AS "bad name!" \gset} ],
);
q{SELECT 1 AS "bad name!" \gset}
],);
for my $e (@errors)
{
@ -792,9 +819,9 @@ for my $e (@errors)
my $n = '001_pgbench_error_' . $name;
$n =~ s/ /_/g;
pgbench(
'-n -t 1 -Dfoo=bla -Dnull=null -Dtrue=true -Done=1 -Dzero=0.0 -Dbadtrue=trueXXX' .
' -Dmaxint=9223372036854775807 -Dminint=-9223372036854775808' .
($no_prepare ? '' : ' -M prepared'),
'-n -t 1 -Dfoo=bla -Dnull=null -Dtrue=true -Done=1 -Dzero=0.0 -Dbadtrue=trueXXX'
. ' -Dmaxint=9223372036854775807 -Dminint=-9223372036854775808'
. ($no_prepare ? '' : ' -M prepared'),
$status,
[ $status == 1 ? qr{^$} : qr{processed: 0/1} ],
$re,
@ -869,12 +896,9 @@ my $bdir = $node->basedir;
# with sampling rate
pgbench(
"-n -S -t 50 -c 2 --log --sampling-rate=0.5",
0,
[ qr{select only}, qr{processed: 100/100} ],
[ qr{^$} ],
'pgbench logs',
undef,
"-n -S -t 50 -c 2 --log --sampling-rate=0.5", 0,
[ qr{select only}, qr{processed: 100/100} ], [qr{^$}],
'pgbench logs', undef,
"--log-prefix=$bdir/001_pgbench_log_2");
check_pgbench_logs($bdir, '001_pgbench_log_2', 1, 8, 92,
@ -882,8 +906,8 @@ check_pgbench_logs($bdir, '001_pgbench_log_2', 1, 8, 92,
# check log file in some detail
pgbench(
"-n -b se -t 10 -l",
0, [ qr{select only}, qr{processed: 10/10} ], [ qr{^$} ],
"-n -b se -t 10 -l", 0,
[ qr{select only}, qr{processed: 10/10} ], [qr{^$}],
'pgbench logs contents', undef,
"--log-prefix=$bdir/001_pgbench_log_3");

View File

@ -61,8 +61,7 @@ $node->issues_sql_like(
[ 'reindexdb', '--concurrently', '-S', 'public', 'postgres' ],
qr/statement: REINDEX SCHEMA CONCURRENTLY public;/,
'reindex specific schema concurrently');
$node->command_fails(
[ 'reindexdb', '--concurrently', '-s', 'postgres' ],
$node->command_fails([ 'reindexdb', '--concurrently', '-s', 'postgres' ],
'reindex system tables concurrently');
$node->issues_sql_like(
[ 'reindexdb', '-v', '-t', 'test1', 'postgres' ],

View File

@ -96,16 +96,16 @@ $node->command_checks_all(
[qr/^WARNING.*cannot vacuum non-tables or special system tables/s],
'vacuumdb with view');
$node->command_fails(
[ 'vacuumdb', '--table', 'vactable', '--min-mxid-age', '0',
'postgres'],
[ 'vacuumdb', '--table', 'vactable', '--min-mxid-age', '0', 'postgres' ],
'vacuumdb --min-mxid-age with incorrect value');
$node->command_fails(
[ 'vacuumdb', '--table', 'vactable', '--min-xid-age', '0',
'postgres'],
[ 'vacuumdb', '--table', 'vactable', '--min-xid-age', '0', 'postgres' ],
'vacuumdb --min-xid-age with incorrect value');
$node->issues_sql_like(
[ 'vacuumdb', '--table', 'vactable', '--min-mxid-age', '2147483000',
'postgres'],
[
'vacuumdb', '--table', 'vactable', '--min-mxid-age',
'2147483000', 'postgres'
],
qr/GREATEST.*relminmxid.*2147483000/,
'vacuumdb --table --min-mxid-age');
$node->issues_sql_like(

View File

@ -34,8 +34,7 @@ my $oids = Catalog::FindAllOidsFromHeaders(@input_files);
# Also push FirstGenbkiObjectId to serve as a terminator for the last gap.
my $FirstGenbkiObjectId =
Catalog::FindDefinedSymbol('access/transam.h', '..',
'FirstGenbkiObjectId');
Catalog::FindDefinedSymbol('access/transam.h', '..', 'FirstGenbkiObjectId');
push @{$oids}, $FirstGenbkiObjectId;
my $prev_oid = 0;

View File

@ -39,11 +39,11 @@ my %replace_line = (
'ExecuteStmtEXECUTEnameexecute_param_clause' =>
'EXECUTE prepared_name execute_param_clause execute_rest',
'ExecuteStmtCREATEOptTempTABLEcreate_as_targetASEXECUTEnameexecute_param_clauseopt_with_data' =>
'CREATE OptTemp TABLE create_as_target AS EXECUTE prepared_name execute_param_clause opt_with_data execute_rest' ,
'ExecuteStmtCREATEOptTempTABLEcreate_as_targetASEXECUTEnameexecute_param_clauseopt_with_data'
=> 'CREATE OptTemp TABLE create_as_target AS EXECUTE prepared_name execute_param_clause opt_with_data execute_rest',
'ExecuteStmtCREATEOptTempTABLEIF_PNOTEXISTScreate_as_targetASEXECUTEnameexecute_param_clauseopt_with_data' =>
'CREATE OptTemp TABLE IF_P NOT EXISTS create_as_target AS EXECUTE prepared_name execute_param_clause opt_with_data execute_rest' ,
'ExecuteStmtCREATEOptTempTABLEIF_PNOTEXISTScreate_as_targetASEXECUTEnameexecute_param_clauseopt_with_data'
=> 'CREATE OptTemp TABLE IF_P NOT EXISTS create_as_target AS EXECUTE prepared_name execute_param_clause opt_with_data execute_rest',
'PrepareStmtPREPAREnameprep_type_clauseASPreparableStmt' =>
'PREPARE prepared_name prep_type_clause AS PreparableStmt');

View File

@ -103,10 +103,10 @@ my %replace_line = (
'RETURNING target_list opt_ecpg_into',
'ExecuteStmtEXECUTEnameexecute_param_clause' =>
'EXECUTE prepared_name execute_param_clause execute_rest',
'ExecuteStmtCREATEOptTempTABLEcreate_as_targetASEXECUTEnameexecute_param_clauseopt_with_data' =>
'CREATE OptTemp TABLE create_as_target AS EXECUTE prepared_name execute_param_clause opt_with_data execute_rest',
'ExecuteStmtCREATEOptTempTABLEIF_PNOTEXISTScreate_as_targetASEXECUTEnameexecute_param_clauseopt_with_data' =>
'CREATE OptTemp TABLE IF_P NOT EXISTS create_as_target AS EXECUTE prepared_name execute_param_clause opt_with_data execute_rest',
'ExecuteStmtCREATEOptTempTABLEcreate_as_targetASEXECUTEnameexecute_param_clauseopt_with_data'
=> 'CREATE OptTemp TABLE create_as_target AS EXECUTE prepared_name execute_param_clause opt_with_data execute_rest',
'ExecuteStmtCREATEOptTempTABLEIF_PNOTEXISTScreate_as_targetASEXECUTEnameexecute_param_clauseopt_with_data'
=> 'CREATE OptTemp TABLE IF_P NOT EXISTS create_as_target AS EXECUTE prepared_name execute_param_clause opt_with_data execute_rest',
'PrepareStmtPREPAREnameprep_type_clauseASPreparableStmt' =>
'PREPARE prepared_name prep_type_clause AS PreparableStmt',
'var_nameColId' => 'ECPGColId');

View File

@ -85,8 +85,9 @@ $node_master->restart;
# Move commit timestamps across page boundaries. Things should still
# be able to work across restarts with those transactions committed while
# track_commit_timestamp is disabled.
$node_master->safe_psql('postgres',
qq(CREATE PROCEDURE consume_xid(cnt int)
$node_master->safe_psql(
'postgres',
qq(CREATE PROCEDURE consume_xid(cnt int)
AS \$\$
DECLARE
i int;

View File

@ -288,7 +288,7 @@ sub check_mode_recursive
unless (defined($file_stat))
{
my $is_ENOENT = $!{ENOENT};
my $msg = "unable to stat $File::Find::name: $!";
my $msg = "unable to stat $File::Find::name: $!";
if ($is_ENOENT)
{
warn $msg;

View File

@ -9,8 +9,9 @@ use Test::More tests => 32;
my $node_master = get_new_node('master');
# A specific role is created to perform some tests related to replication,
# and it needs proper authentication configuration.
$node_master->init(allows_streaming => 1,
auth_extra => ['--create-role', 'repl_role']);
$node_master->init(
allows_streaming => 1,
auth_extra => [ '--create-role', 'repl_role' ]);
$node_master->start;
my $backup_name = 'my_backup';
@ -124,50 +125,59 @@ test_target_session_attrs($node_standby_1, $node_master, $node_standby_1,
# role.
note "testing SHOW commands for replication connection";
$node_master->psql('postgres',"
$node_master->psql(
'postgres', "
CREATE ROLE repl_role REPLICATION LOGIN;
GRANT pg_read_all_settings TO repl_role;");
my $master_host = $node_master->host;
my $master_port = $node_master->port;
my $master_host = $node_master->host;
my $master_port = $node_master->port;
my $connstr_common = "host=$master_host port=$master_port user=repl_role";
my $connstr_rep = "$connstr_common replication=1";
my $connstr_db = "$connstr_common replication=database dbname=postgres";
my $connstr_rep = "$connstr_common replication=1";
my $connstr_db = "$connstr_common replication=database dbname=postgres";
# Test SHOW ALL
my ($ret, $stdout, $stderr) =
$node_master->psql('postgres', 'SHOW ALL;',
on_error_die => 1,
extra_params => [ '-d', $connstr_rep ]);
my ($ret, $stdout, $stderr) = $node_master->psql(
'postgres', 'SHOW ALL;',
on_error_die => 1,
extra_params => [ '-d', $connstr_rep ]);
ok($ret == 0, "SHOW ALL with replication role and physical replication");
($ret, $stdout, $stderr) =
$node_master->psql('postgres', 'SHOW ALL;',
on_error_die => 1,
extra_params => [ '-d', $connstr_db ]);
($ret, $stdout, $stderr) = $node_master->psql(
'postgres', 'SHOW ALL;',
on_error_die => 1,
extra_params => [ '-d', $connstr_db ]);
ok($ret == 0, "SHOW ALL with replication role and logical replication");
# Test SHOW with a user-settable parameter
($ret, $stdout, $stderr) =
$node_master->psql('postgres', 'SHOW work_mem;',
on_error_die => 1,
extra_params => [ '-d', $connstr_rep ]);
ok($ret == 0, "SHOW with user-settable parameter, replication role and physical replication");
($ret, $stdout, $stderr) =
$node_master->psql('postgres', 'SHOW work_mem;',
on_error_die => 1,
extra_params => [ '-d', $connstr_db ]);
ok($ret == 0, "SHOW with user-settable parameter, replication role and logical replication");
($ret, $stdout, $stderr) = $node_master->psql(
'postgres', 'SHOW work_mem;',
on_error_die => 1,
extra_params => [ '-d', $connstr_rep ]);
ok( $ret == 0,
"SHOW with user-settable parameter, replication role and physical replication"
);
($ret, $stdout, $stderr) = $node_master->psql(
'postgres', 'SHOW work_mem;',
on_error_die => 1,
extra_params => [ '-d', $connstr_db ]);
ok( $ret == 0,
"SHOW with user-settable parameter, replication role and logical replication"
);
# Test SHOW with a superuser-settable parameter
($ret, $stdout, $stderr) =
$node_master->psql('postgres', 'SHOW primary_conninfo;',
on_error_die => 1,
extra_params => [ '-d', $connstr_rep ]);
ok($ret == 0, "SHOW with superuser-settable parameter, replication role and physical replication");
($ret, $stdout, $stderr) =
$node_master->psql('postgres', 'SHOW primary_conninfo;',
on_error_die => 1,
extra_params => [ '-d', $connstr_db ]);
ok($ret == 0, "SHOW with superuser-settable parameter, replication role and logical replication");
($ret, $stdout, $stderr) = $node_master->psql(
'postgres', 'SHOW primary_conninfo;',
on_error_die => 1,
extra_params => [ '-d', $connstr_rep ]);
ok( $ret == 0,
"SHOW with superuser-settable parameter, replication role and physical replication"
);
($ret, $stdout, $stderr) = $node_master->psql(
'postgres', 'SHOW primary_conninfo;',
on_error_die => 1,
extra_params => [ '-d', $connstr_db ]);
ok( $ret == 0,
"SHOW with superuser-settable parameter, replication role and logical replication"
);
note "switching to physical replication slot";

View File

@ -122,21 +122,26 @@ test_recovery_standby('LSN', 'standby_5', $node_master, \@recovery_params,
# different one is allowed.
@recovery_params = (
"recovery_target_name = '$recovery_name'",
"recovery_target_name = ''",
"recovery_target_time = '$recovery_time'");
"recovery_target_name = '$recovery_name'",
"recovery_target_name = ''",
"recovery_target_time = '$recovery_time'");
test_recovery_standby('multiple overriding settings',
'standby_6', $node_master, \@recovery_params, "3000", $lsn3);
'standby_6', $node_master, \@recovery_params, "3000", $lsn3);
my $node_standby = get_new_node('standby_7');
$node_standby->init_from_backup($node_master, 'my_backup', has_restoring => 1);
$node_standby->append_conf('postgresql.conf', "recovery_target_name = '$recovery_name'
$node_standby->init_from_backup($node_master, 'my_backup',
has_restoring => 1);
$node_standby->append_conf(
'postgresql.conf', "recovery_target_name = '$recovery_name'
recovery_target_time = '$recovery_time'");
my $res = run_log(['pg_ctl', '-D', $node_standby->data_dir,
'-l', $node_standby->logfile, 'start']);
ok(! $res, 'invalid recovery startup fails');
my $res = run_log(
[
'pg_ctl', '-D', $node_standby->data_dir, '-l',
$node_standby->logfile, 'start'
]);
ok(!$res, 'invalid recovery startup fails');
my $logfile = slurp_file($node_standby->logfile());
ok ($logfile =~ qr/multiple recovery targets specified/,
ok($logfile =~ qr/multiple recovery targets specified/,
'multiple conflicting settings');

View File

@ -42,7 +42,9 @@ $node_master->teardown_node;
# promote standby 1 using "pg_promote", switching it to a new timeline
my $psql_out = '';
$node_standby_1->psql('postgres', "SELECT pg_promote(wait_seconds => 300)",
$node_standby_1->psql(
'postgres',
"SELECT pg_promote(wait_seconds => 300)",
stdout => \$psql_out);
is($psql_out, 't', "promotion of standby with pg_promote");

View File

@ -196,8 +196,10 @@ $killme_stdin .= q[
SELECT 1;
];
ok( pump_until(
$killme, \$killme_stderr,
qr/server closed the connection unexpectedly|connection to server was lost/m),
$killme,
\$killme_stderr,
qr/server closed the connection unexpectedly|connection to server was lost/m
),
"psql query died successfully after SIGKILL");
$killme->finish;

View File

@ -32,7 +32,8 @@ $bravo->start;
# Dummy table for the upcoming tests.
$alpha->safe_psql('postgres', 'create table test1 (a int)');
$alpha->safe_psql('postgres', 'insert into test1 select generate_series(1, 10000)');
$alpha->safe_psql('postgres',
'insert into test1 select generate_series(1, 10000)');
# take a checkpoint
$alpha->safe_psql('postgres', 'checkpoint');
@ -41,8 +42,7 @@ $alpha->safe_psql('postgres', 'checkpoint');
# problematic WAL records.
$alpha->safe_psql('postgres', 'vacuum verbose test1');
# Wait for last record to have been replayed on the standby.
$alpha->wait_for_catchup($bravo, 'replay',
$alpha->lsn('insert'));
$alpha->wait_for_catchup($bravo, 'replay', $alpha->lsn('insert'));
# Now force a checkpoint on the standby. This seems unnecessary but for "some"
# reason, the previous checkpoint on the primary does not reflect on the standby
@ -53,12 +53,12 @@ $bravo->safe_psql('postgres', 'checkpoint');
# Now just use a dummy table and run some operations to move minRecoveryPoint
# beyond the previous vacuum.
$alpha->safe_psql('postgres', 'create table test2 (a int, b text)');
$alpha->safe_psql('postgres', 'insert into test2 select generate_series(1,10000), md5(random()::text)');
$alpha->safe_psql('postgres',
'insert into test2 select generate_series(1,10000), md5(random()::text)');
$alpha->safe_psql('postgres', 'truncate test2');
# Wait again for all records to be replayed.
$alpha->wait_for_catchup($bravo, 'replay',
$alpha->lsn('insert'));
$alpha->wait_for_catchup($bravo, 'replay', $alpha->lsn('insert'));
# Do the promotion, which reinitializes minRecoveryPoint in the control
# file so as WAL is replayed up to the end.
@ -69,7 +69,8 @@ $bravo->promote;
# has not happened yet.
$bravo->safe_psql('postgres', 'truncate test1');
$bravo->safe_psql('postgres', 'vacuum verbose test1');
$bravo->safe_psql('postgres', 'insert into test1 select generate_series(1,1000)');
$bravo->safe_psql('postgres',
'insert into test1 select generate_series(1,1000)');
# Now crash-stop the promoted standby and restart. This makes sure that
# replay does not see invalid page references because of an invalid
@ -80,8 +81,5 @@ $bravo->start;
# Check state of the table after full crash recovery. All its data should
# be here.
my $psql_out;
$bravo->psql(
'postgres',
"SELECT count(*) FROM test1",
stdout => \$psql_out);
$bravo->psql('postgres', "SELECT count(*) FROM test1", stdout => \$psql_out);
is($psql_out, '1000', "Check that table state is correct");

View File

@ -17,20 +17,20 @@ use Test::More tests => 1;
sub find_largest_lsn
{
my $blocksize = int(shift);
my $filename = shift;
my ($max_hi,$max_lo) = (0,0);
my $filename = shift;
my ($max_hi, $max_lo) = (0, 0);
open(my $fh, "<:raw", $filename)
or die "failed to open $filename: $!";
my ($buf,$len);
my ($buf, $len);
while ($len = read($fh, $buf, $blocksize))
{
$len == $blocksize
or die "read only $len of $blocksize bytes from $filename";
my ($hi,$lo) = unpack("LL", $buf);
my ($hi, $lo) = unpack("LL", $buf);
if ($hi > $max_hi or ($hi == $max_hi and $lo > $max_lo))
{
($max_hi,$max_lo) = ($hi,$lo);
($max_hi, $max_lo) = ($hi, $lo);
}
}
defined($len) or die "read error on $filename: $!";
@ -63,7 +63,8 @@ $standby->init_from_backup($primary, 'bkp', has_streaming => 1);
$standby->start;
# Create base table whose data consistency is checked.
$primary->safe_psql('postgres', "
$primary->safe_psql(
'postgres', "
CREATE TABLE test1 (a int) WITH (fillfactor = 10);
INSERT INTO test1 SELECT generate_series(1, 10000);");
@ -74,8 +75,7 @@ $primary->safe_psql('postgres', 'CHECKPOINT;');
$primary->safe_psql('postgres', 'UPDATE test1 SET a = a + 1;');
# Wait for last record to have been replayed on the standby.
$primary->wait_for_catchup($standby, 'replay',
$primary->lsn('insert'));
$primary->wait_for_catchup($standby, 'replay', $primary->lsn('insert'));
# Fill in the standby's shared buffers with the data filled in
# previously.
@ -96,8 +96,7 @@ my $relfilenode = $primary->safe_psql('postgres',
"SELECT pg_relation_filepath('test1'::regclass);");
# Wait for last record to have been replayed on the standby.
$primary->wait_for_catchup($standby, 'replay',
$primary->lsn('insert'));
$primary->wait_for_catchup($standby, 'replay', $primary->lsn('insert'));
# Issue a restart point on the standby now, which makes the checkpointer
# update minRecoveryPoint.
@ -115,11 +114,11 @@ $standby->stop('fast');
# done by directly scanning the on-disk relation blocks and what
# pg_controldata lets know.
my $standby_data = $standby->data_dir;
my $offline_max_lsn = find_largest_lsn($blocksize,
"$standby_data/$relfilenode");
my $offline_max_lsn =
find_largest_lsn($blocksize, "$standby_data/$relfilenode");
# Fetch minRecoveryPoint from the control file itself
my ($stdout, $stderr) = run_command(['pg_controldata', $standby_data]);
my ($stdout, $stderr) = run_command([ 'pg_controldata', $standby_data ]);
my @control_data = split("\n", $stdout);
my $offline_recovery_lsn = undef;
foreach (@control_data)
@ -131,9 +130,9 @@ foreach (@control_data)
}
}
die "No minRecoveryPoint in control file found\n"
unless defined($offline_recovery_lsn);
unless defined($offline_recovery_lsn);
# minRecoveryPoint should never be older than the maximum LSN for all
# the pages on disk.
ok($offline_recovery_lsn ge $offline_max_lsn,
"Check offline that table data is consistent with minRecoveryPoint");
"Check offline that table data is consistent with minRecoveryPoint");

View File

@ -315,14 +315,18 @@ test_connect_fails(
"does not connect with client-side CRL");
# pg_stat_ssl
command_like([
'psql', '-X', '-A', '-F', ',', '-P', 'null=_null_',
'-d', "$common_connstr sslrootcert=invalid",
'-c', "SELECT * FROM pg_stat_ssl WHERE pid = pg_backend_pid()"
],
qr{^pid,ssl,version,cipher,bits,compression,client_dn,client_serial,issuer_dn\n
command_like(
[
'psql', '-X',
'-A', '-F',
',', '-P',
'null=_null_', '-d',
"$common_connstr sslrootcert=invalid", '-c',
"SELECT * FROM pg_stat_ssl WHERE pid = pg_backend_pid()"
],
qr{^pid,ssl,version,cipher,bits,compression,client_dn,client_serial,issuer_dn\n
^\d+,t,TLSv[\d.]+,[\w-]+,\d+,f,_null_,_null_,_null_$}mx,
'pg_stat_ssl view without client certificate');
'pg_stat_ssl view without client certificate');
### Server-side tests.
###
@ -347,14 +351,23 @@ test_connect_ok(
"certificate authorization succeeds with correct client cert");
# pg_stat_ssl
command_like([
'psql', '-X', '-A', '-F', ',', '-P', 'null=_null_',
'-d', "$common_connstr user=ssltestuser sslcert=ssl/client.crt sslkey=ssl/client_tmp.key",
'-c', "SELECT * FROM pg_stat_ssl WHERE pid = pg_backend_pid()"
],
qr{^pid,ssl,version,cipher,bits,compression,client_dn,client_serial,issuer_dn\n
command_like(
[
'psql',
'-X',
'-A',
'-F',
',',
'-P',
'null=_null_',
'-d',
"$common_connstr user=ssltestuser sslcert=ssl/client.crt sslkey=ssl/client_tmp.key",
'-c',
"SELECT * FROM pg_stat_ssl WHERE pid = pg_backend_pid()"
],
qr{^pid,ssl,version,cipher,bits,compression,client_dn,client_serial,issuer_dn\n
^\d+,t,TLSv[\d.]+,[\w-]+,\d+,f,/CN=ssltestuser,1,\Q/CN=Test CA for PostgreSQL SSL regression test client certs\E$}mx,
'pg_stat_ssl with client certificate');
'pg_stat_ssl with client certificate');
# client key with wrong permissions
test_connect_fails(
@ -382,22 +395,28 @@ test_connect_fails(
# works, iff username matches Common Name
# fails, iff username doesn't match Common Name.
$common_connstr =
"sslrootcert=ssl/root+server_ca.crt sslmode=require dbname=verifydb hostaddr=$SERVERHOSTADDR";
"sslrootcert=ssl/root+server_ca.crt sslmode=require dbname=verifydb hostaddr=$SERVERHOSTADDR";
test_connect_ok($common_connstr,
"user=ssltestuser sslcert=ssl/client.crt sslkey=ssl/client_tmp.key",
"auth_option clientcert=verify-full succeeds with matching username and Common Name");
test_connect_ok(
$common_connstr,
"user=ssltestuser sslcert=ssl/client.crt sslkey=ssl/client_tmp.key",
"auth_option clientcert=verify-full succeeds with matching username and Common Name"
);
test_connect_fails($common_connstr,
"user=anotheruser sslcert=ssl/client.crt sslkey=ssl/client_tmp.key",
qr/FATAL/,
"auth_option clientcert=verify-full fails with mismatching username and Common Name");
test_connect_fails(
$common_connstr,
"user=anotheruser sslcert=ssl/client.crt sslkey=ssl/client_tmp.key",
qr/FATAL/,
"auth_option clientcert=verify-full fails with mismatching username and Common Name"
);
# Check that connecting with auth-optionverify-ca in pg_hba :
# works, when username doesn't match Common Name
test_connect_ok($common_connstr,
"user=yetanotheruser sslcert=ssl/client.crt sslkey=ssl/client_tmp.key",
"auth_option clientcert=verify-ca succeeds with mismatching username and Common Name");
test_connect_ok(
$common_connstr,
"user=yetanotheruser sslcert=ssl/client.crt sslkey=ssl/client_tmp.key",
"auth_option clientcert=verify-ca succeeds with mismatching username and Common Name"
);
# intermediate client_ca.crt is provided by client, and isn't in server's ssl_ca_file
switch_server_cert($node, 'server-cn-only', 'root_ca');

View File

@ -47,7 +47,6 @@ $common_connstr =
"user=ssltestuser dbname=trustdb sslmode=require sslcert=invalid sslrootcert=invalid hostaddr=$SERVERHOSTADDR";
# Default settings
test_connect_ok($common_connstr, '',
"Basic SCRAM authentication with SSL");
test_connect_ok($common_connstr, '', "Basic SCRAM authentication with SSL");
done_testing($number_of_tests);

View File

@ -551,12 +551,14 @@ e|{e,d}
# Test a domain with a constraint backed by a SQL-language function,
# which needs an active snapshot in order to operate.
$node_publisher->safe_psql('postgres', "INSERT INTO tst_dom_constr VALUES (11)");
$node_publisher->safe_psql('postgres',
"INSERT INTO tst_dom_constr VALUES (11)");
$node_publisher->wait_for_catchup('tap_sub');
$result =
$node_subscriber->safe_psql('postgres', "SELECT sum(a) FROM tst_dom_constr");
$node_subscriber->safe_psql('postgres',
"SELECT sum(a) FROM tst_dom_constr");
is($result, '21', 'sql-function constraint on domain');
$node_subscriber->stop('fast');

View File

@ -18,15 +18,17 @@ $node_subscriber->start;
my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
$node_publisher->safe_psql('postgres',
"CREATE TABLE tab1 (a int PRIMARY KEY, b int GENERATED ALWAYS AS (a * 2) STORED)");
"CREATE TABLE tab1 (a int PRIMARY KEY, b int GENERATED ALWAYS AS (a * 2) STORED)"
);
$node_subscriber->safe_psql('postgres',
"CREATE TABLE tab1 (a int PRIMARY KEY, b int GENERATED ALWAYS AS (a * 22) STORED)");
"CREATE TABLE tab1 (a int PRIMARY KEY, b int GENERATED ALWAYS AS (a * 22) STORED)"
);
# data for initial sync
$node_publisher->safe_psql('postgres',
"INSERT INTO tab1 (a) VALUES (1), (2), (3)");
"INSERT INTO tab1 (a) VALUES (1), (2), (3)");
$node_publisher->safe_psql('postgres',
"CREATE PUBLICATION pub1 FOR ALL TABLES");
@ -40,25 +42,21 @@ my $synced_query =
$node_subscriber->poll_query_until('postgres', $synced_query)
or die "Timed out while waiting for subscriber to synchronize data";
my $result = $node_subscriber->safe_psql('postgres',
"SELECT a, b FROM tab1");
is($result, qq(1|22
my $result = $node_subscriber->safe_psql('postgres', "SELECT a, b FROM tab1");
is( $result, qq(1|22
2|44
3|66), 'generated columns initial sync');
# data to replicate
$node_publisher->safe_psql('postgres',
"INSERT INTO tab1 VALUES (4), (5)");
$node_publisher->safe_psql('postgres', "INSERT INTO tab1 VALUES (4), (5)");
$node_publisher->safe_psql('postgres',
"UPDATE tab1 SET a = 6 WHERE a = 5");
$node_publisher->safe_psql('postgres', "UPDATE tab1 SET a = 6 WHERE a = 5");
$node_publisher->wait_for_catchup('sub1');
$result = $node_subscriber->safe_psql('postgres',
"SELECT a, b FROM tab1");
is($result, qq(1|22
$result = $node_subscriber->safe_psql('postgres', "SELECT a, b FROM tab1");
is( $result, qq(1|22
2|44
3|66
4|88

View File

@ -16,11 +16,15 @@ else
}
my $node_publisher = get_new_node('publisher');
$node_publisher->init(allows_streaming => 'logical', extra => [ '--locale=C', '--encoding=UTF8' ]);
$node_publisher->init(
allows_streaming => 'logical',
extra => [ '--locale=C', '--encoding=UTF8' ]);
$node_publisher->start;
my $node_subscriber = get_new_node('subscriber');
$node_subscriber->init(allows_streaming => 'logical', extra => [ '--locale=C', '--encoding=UTF8' ]);
$node_subscriber->init(
allows_streaming => 'logical',
extra => [ '--locale=C', '--encoding=UTF8' ]);
$node_subscriber->start;
my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
@ -36,7 +40,8 @@ my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
# full, since those have different code paths internally.
$node_subscriber->safe_psql('postgres',
q{CREATE COLLATION ctest_nondet (provider = icu, locale = 'und', deterministic = false)});
q{CREATE COLLATION ctest_nondet (provider = icu, locale = 'und', deterministic = false)}
);
# table with replica identity index
@ -54,8 +59,7 @@ $node_subscriber->safe_psql('postgres',
# table with replica identity full
$node_publisher->safe_psql('postgres',
q{CREATE TABLE tab2 (a text, b text)});
$node_publisher->safe_psql('postgres', q{CREATE TABLE tab2 (a text, b text)});
$node_publisher->safe_psql('postgres',
q{ALTER TABLE tab2 REPLICA IDENTITY FULL});
@ -76,7 +80,8 @@ $node_publisher->safe_psql('postgres',
q{CREATE PUBLICATION pub1 FOR ALL TABLES});
$node_subscriber->safe_psql('postgres',
qq{CREATE SUBSCRIPTION sub1 CONNECTION '$publisher_connstr' PUBLICATION pub1 WITH (copy_data = false)});
qq{CREATE SUBSCRIPTION sub1 CONNECTION '$publisher_connstr' PUBLICATION pub1 WITH (copy_data = false)}
);
$node_publisher->wait_for_catchup('sub1');
@ -88,8 +93,7 @@ $node_publisher->safe_psql('postgres',
$node_publisher->wait_for_catchup('sub1');
is($node_subscriber->safe_psql('postgres', q{SELECT b FROM tab1}),
qq(bar),
'update with primary key with nondeterministic collation');
qq(bar), 'update with primary key with nondeterministic collation');
# test with replica identity full
@ -99,5 +103,5 @@ $node_publisher->safe_psql('postgres',
$node_publisher->wait_for_catchup('sub1');
is($node_subscriber->safe_psql('postgres', q{SELECT b FROM tab2}),
qq(bar),
'update with replica identity full with nondeterministic collation');
qq(bar),
'update with replica identity full with nondeterministic collation');

View File

@ -30,7 +30,8 @@ $node_publisher->safe_psql('postgres',
"CREATE TABLE tab1 (a int PRIMARY KEY, b int)");
$node_publisher->safe_psql('postgres',
"CREATE FUNCTION double(x int) RETURNS int IMMUTABLE LANGUAGE SQL AS 'select x * 2'");
"CREATE FUNCTION double(x int) RETURNS int IMMUTABLE LANGUAGE SQL AS 'select x * 2'"
);
# an index with a predicate that lends itself to constant expressions
# evaluation
@ -42,7 +43,8 @@ $node_subscriber->safe_psql('postgres',
"CREATE TABLE tab1 (a int PRIMARY KEY, b int)");
$node_subscriber->safe_psql('postgres',
"CREATE FUNCTION double(x int) RETURNS int IMMUTABLE LANGUAGE SQL AS 'select x * 2'");
"CREATE FUNCTION double(x int) RETURNS int IMMUTABLE LANGUAGE SQL AS 'select x * 2'"
);
$node_subscriber->safe_psql('postgres',
"CREATE INDEX ON tab1 (b) WHERE a > double(1)");
@ -51,14 +53,14 @@ $node_publisher->safe_psql('postgres',
"CREATE PUBLICATION pub1 FOR ALL TABLES");
$node_subscriber->safe_psql('postgres',
"CREATE SUBSCRIPTION sub1 CONNECTION '$publisher_connstr' PUBLICATION pub1");
"CREATE SUBSCRIPTION sub1 CONNECTION '$publisher_connstr' PUBLICATION pub1"
);
$node_publisher->wait_for_catchup('sub1');
# This would crash, first on the publisher, and then (if the publisher
# is fixed) on the subscriber.
$node_publisher->safe_psql('postgres',
"INSERT INTO tab1 VALUES (1, 2)");
$node_publisher->safe_psql('postgres', "INSERT INTO tab1 VALUES (1, 2)");
$node_publisher->wait_for_catchup('sub1');

View File

@ -38,9 +38,9 @@ use lib $FindBin::RealBin;
use PerfectHash;
my $output_path = '';
my $extern = 0;
my $case_fold = 1;
my $varname = 'ScanKeywords';
my $extern = 0;
my $case_fold = 1;
my $varname = 'ScanKeywords';
GetOptions(
'output:s' => \$output_path,
@ -56,12 +56,13 @@ if ($output_path ne '' && substr($output_path, -1) ne '/')
$output_path .= '/';
}
$kw_input_file =~ /(\w+)\.h$/ || die "Input file must be named something.h.\n";
$kw_input_file =~ /(\w+)\.h$/
|| die "Input file must be named something.h.\n";
my $base_filename = $1 . '_d';
my $kw_def_file = $output_path . $base_filename . '.h';
my $kw_def_file = $output_path . $base_filename . '.h';
open(my $kif, '<', $kw_input_file) || die "$kw_input_file: $!\n";
open(my $kwdef, '>', $kw_def_file) || die "$kw_def_file: $!\n";
open(my $kif, '<', $kw_input_file) || die "$kw_input_file: $!\n";
open(my $kwdef, '>', $kw_def_file) || die "$kw_def_file: $!\n";
# Opening boilerplate for keyword definition header.
printf $kwdef <<EOM, $base_filename, uc $base_filename, uc $base_filename;
@ -116,10 +117,11 @@ if ($case_fold)
# helpful because it provides a cheap way to reject duplicate keywords.
# Also, insisting on sorted order ensures that code that scans the keyword
# table linearly will see the keywords in a canonical order.
for my $i (0..$#keywords - 1)
for my $i (0 .. $#keywords - 1)
{
die qq|The keyword "$keywords[$i + 1]" is out of order in $kw_input_file\n|
if ($keywords[$i] cmp $keywords[$i + 1]) >= 0;
die
qq|The keyword "$keywords[$i + 1]" is out of order in $kw_input_file\n|
if ($keywords[$i] cmp $keywords[ $i + 1 ]) >= 0;
}
# Emit the string containing all the keywords.
@ -133,7 +135,7 @@ print $kwdef qq|";\n\n|;
printf $kwdef "static const uint16 %s_kw_offsets[] = {\n", $varname;
my $offset = 0;
my $offset = 0;
my $max_len = 0;
foreach my $name (@keywords)
{
@ -169,11 +171,11 @@ printf $kwdef qq|static %s\n|, $f;
printf $kwdef "static " if !$extern;
printf $kwdef "const ScanKeywordList %s = {\n", $varname;
printf $kwdef qq|\t%s_kw_string,\n|, $varname;
printf $kwdef qq|\t%s_kw_offsets,\n|, $varname;
printf $kwdef qq|\t%s,\n|, $funcname;
printf $kwdef qq|\t%s_NUM_KEYWORDS,\n|, uc $varname;
printf $kwdef qq|\t%d\n|, $max_len;
printf $kwdef qq|\t%s_kw_string,\n|, $varname;
printf $kwdef qq|\t%s_kw_offsets,\n|, $varname;
printf $kwdef qq|\t%s,\n|, $funcname;
printf $kwdef qq|\t%s_NUM_KEYWORDS,\n|, uc $varname;
printf $kwdef qq|\t%d\n|, $max_len;
printf $kwdef "};\n\n";
printf $kwdef "#endif\t\t\t\t\t\t\t/* %s_H */\n", uc $base_filename;

View File

@ -518,9 +518,12 @@ sub CopySubdirFiles
{
$flist = '';
if ($mf =~ /^HEADERS\s*=\s*(.*)$/m) { $flist .= $1 }
my @modlist = ();
my @modlist = ();
my %fmodlist = ();
while ($mf =~ /^HEADERS_([^\s=]+)\s*=\s*(.*)$/mg) { $fmodlist{$1} .= $2 }
while ($mf =~ /^HEADERS_([^\s=]+)\s*=\s*(.*)$/mg)
{
$fmodlist{$1} .= $2;
}
if ($mf =~ /^MODULE_big\s*=\s*(.*)$/m)
{
@ -544,14 +547,13 @@ sub CopySubdirFiles
croak "HEADERS_$mod for unknown module in $subdir $module"
unless grep { $_ eq $mod } @modlist;
$flist = ParseAndCleanRule($fmodlist{$mod}, $mf);
EnsureDirectories($target,
"include", "include/server",
"include/server/$moduledir",
"include/server/$moduledir/$mod");
EnsureDirectories($target, "include", "include/server",
"include/server/$moduledir",
"include/server/$moduledir/$mod");
foreach my $f (split /\s+/, $flist)
{
lcopy("$subdir/$module/$f",
"$target/include/server/$moduledir/$mod/" . basename($f))
"$target/include/server/$moduledir/$mod/" . basename($f))
|| croak("Could not copy file $f in $subdir $module");
print '.';
}
@ -615,8 +617,7 @@ sub CopyIncludeFiles
'Public headers', $target . '/include/',
'src/include/', 'postgres_ext.h',
'pg_config.h', 'pg_config_ext.h',
'pg_config_os.h',
'pg_config_manual.h');
'pg_config_os.h', 'pg_config_manual.h');
lcopy('src/include/libpq/libpq-fs.h', $target . '/include/libpq/')
|| croak 'Could not copy libpq-fs.h';

View File

@ -269,7 +269,7 @@ sub GenerateFiles
"LIBPGTYPES");
chdir('src/backend/utils');
my $pg_proc_dat = '../../../src/include/catalog/pg_proc.dat';
my $pg_proc_dat = '../../../src/include/catalog/pg_proc.dat';
if ( IsNewer('fmgr-stamp', 'Gen_fmgrtab.pl')
|| IsNewer('fmgr-stamp', '../catalog/Catalog.pm')
|| IsNewer('fmgr-stamp', $pg_proc_dat)
@ -409,12 +409,12 @@ sub GenerateFiles
chdir('../../..');
}
if (IsNewer(
'src/common/kwlist_d.h',
'src/include/parser/kwlist.h'))
if (IsNewer('src/common/kwlist_d.h', 'src/include/parser/kwlist.h'))
{
print "Generating kwlist_d.h...\n";
system('perl -I src/tools src/tools/gen_keywordlist.pl --extern -o src/common src/include/parser/kwlist.h');
system(
'perl -I src/tools src/tools/gen_keywordlist.pl --extern -o src/common src/include/parser/kwlist.h'
);
}
if (IsNewer(
@ -424,10 +424,15 @@ sub GenerateFiles
'src/pl/plpgsql/src/pl_unreserved_kwlist_d.h',
'src/pl/plpgsql/src/pl_unreserved_kwlist.h'))
{
print "Generating pl_reserved_kwlist_d.h and pl_unreserved_kwlist_d.h...\n";
print
"Generating pl_reserved_kwlist_d.h and pl_unreserved_kwlist_d.h...\n";
chdir('src/pl/plpgsql/src');
system('perl -I ../../../tools ../../../tools/gen_keywordlist.pl --varname ReservedPLKeywords pl_reserved_kwlist.h');
system('perl -I ../../../tools ../../../tools/gen_keywordlist.pl --varname UnreservedPLKeywords pl_unreserved_kwlist.h');
system(
'perl -I ../../../tools ../../../tools/gen_keywordlist.pl --varname ReservedPLKeywords pl_reserved_kwlist.h'
);
system(
'perl -I ../../../tools ../../../tools/gen_keywordlist.pl --varname UnreservedPLKeywords pl_unreserved_kwlist.h'
);
chdir('../../../..');
}
@ -440,8 +445,12 @@ sub GenerateFiles
{
print "Generating c_kwlist_d.h and ecpg_kwlist_d.h...\n";
chdir('src/interfaces/ecpg/preproc');
system('perl -I ../../../tools ../../../tools/gen_keywordlist.pl --varname ScanCKeywords --no-case-fold c_kwlist.h');
system('perl -I ../../../tools ../../../tools/gen_keywordlist.pl --varname ScanECPGKeywords ecpg_kwlist.h');
system(
'perl -I ../../../tools ../../../tools/gen_keywordlist.pl --varname ScanCKeywords --no-case-fold c_kwlist.h'
);
system(
'perl -I ../../../tools ../../../tools/gen_keywordlist.pl --varname ScanECPGKeywords ecpg_kwlist.h'
);
chdir('../../../..');
}
@ -527,7 +536,9 @@ EOF
{
chdir('src/backend/catalog');
my $bki_srcs = join(' ../../../src/include/catalog/', @bki_srcs);
system("perl genbki.pl --include-path ../../../src/include/ --set-version=$self->{majorver} $bki_srcs");
system(
"perl genbki.pl --include-path ../../../src/include/ --set-version=$self->{majorver} $bki_srcs"
);
open(my $f, '>', 'bki-stamp')
|| confess "Could not touch bki-stamp";
close($f);

View File

@ -6,7 +6,7 @@ use strict;
use File::Basename;
use File::Spec;
BEGIN { use lib File::Spec->rel2abs(dirname(__FILE__)); }
BEGIN { use lib File::Spec->rel2abs(dirname(__FILE__)); }
use Cwd;

View File

@ -8,7 +8,7 @@ use warnings;
use File::Basename;
use File::Spec;
BEGIN { use lib File::Spec->rel2abs(dirname(__FILE__)); }
BEGIN { use lib File::Spec->rel2abs(dirname(__FILE__)); }
use Install qw(Install);

View File

@ -9,7 +9,7 @@ use warnings;
use File::Basename;
use File::Spec;
BEGIN { use lib File::Spec->rel2abs(dirname(__FILE__)); }
BEGIN { use lib File::Spec->rel2abs(dirname(__FILE__)); }
use Mkvcbuild;

View File

@ -12,7 +12,7 @@ use File::Copy;
use File::Find ();
use File::Path qw(rmtree);
use File::Spec;
BEGIN { use lib File::Spec->rel2abs(dirname(__FILE__)); }
BEGIN { use lib File::Spec->rel2abs(dirname(__FILE__)); }
use Install qw(Install);
@ -203,8 +203,8 @@ sub tap_check
# adjust the environment for just this test
local %ENV = %ENV;
$ENV{PERL5LIB} = "$topdir/src/test/perl;$ENV{PERL5LIB}";
$ENV{PG_REGRESS} = "$topdir/$Config/pg_regress/pg_regress";
$ENV{PERL5LIB} = "$topdir/src/test/perl;$ENV{PERL5LIB}";
$ENV{PG_REGRESS} = "$topdir/$Config/pg_regress/pg_regress";
$ENV{REGRESS_SHLIB} = "$topdir/src/test/regress/regress.dll";
$ENV{TESTDIR} = "$dir";
@ -404,7 +404,7 @@ sub subdircheck
return;
}
my @opts = fetchRegressOpts();
my @opts = fetchRegressOpts();
# Special processing for python transform modules, see their respective
# Makefiles for more details regarding Python-version specific