tap tests: replace 'master' with 'primary'.

We've largely replaced master with primary in docs etc, but tap test
still widely used master.

Author: Andres Freund
Reviewed-By: David Steele
Discussion: https://postgr.es/m/20200615182235.x7lch5n6kcjq4aue@alap3.anarazel.de
This commit is contained in:
Andres Freund 2020-06-14 11:47:37 -07:00
parent 2661a793ff
commit 229f8c219f
37 changed files with 777 additions and 777 deletions

View File

@ -5,10 +5,10 @@ use PostgresNode;
use TestLib;
use Test::More tests => 31;
my $node_master;
my $node_primary;
my $node_standby;
# Run few queries on both master and standby and check their results match.
# Run few queries on both primary and standby and check their results match.
sub test_index_replay
{
my ($test_name) = @_;
@ -17,7 +17,7 @@ sub test_index_replay
my $applname = $node_standby->name;
my $caughtup_query =
"SELECT pg_current_wal_lsn() <= write_lsn FROM pg_stat_replication WHERE application_name = '$applname';";
$node_master->poll_query_until('postgres', $caughtup_query)
$node_primary->poll_query_until('postgres', $caughtup_query)
or die "Timed out while waiting for standby 1 to catch up";
my $queries = qq(SET enable_seqscan=off;
@ -32,35 +32,35 @@ SELECT * FROM tst WHERE i = 7 AND t = 'e';
);
# Run test queries and compare their result
my $master_result = $node_master->safe_psql("postgres", $queries);
my $primary_result = $node_primary->safe_psql("postgres", $queries);
my $standby_result = $node_standby->safe_psql("postgres", $queries);
is($master_result, $standby_result, "$test_name: query result matches");
is($primary_result, $standby_result, "$test_name: query result matches");
return;
}
# Initialize master node
$node_master = get_new_node('master');
$node_master->init(allows_streaming => 1);
$node_master->start;
# Initialize primary node
$node_primary = get_new_node('primary');
$node_primary->init(allows_streaming => 1);
$node_primary->start;
my $backup_name = 'my_backup';
# Take backup
$node_master->backup($backup_name);
$node_primary->backup($backup_name);
# Create streaming standby linking to master
# Create streaming standby linking to primary
$node_standby = get_new_node('standby');
$node_standby->init_from_backup($node_master, $backup_name,
$node_standby->init_from_backup($node_primary, $backup_name,
has_streaming => 1);
$node_standby->start;
# Create some bloom index on master
$node_master->safe_psql("postgres", "CREATE EXTENSION bloom;");
$node_master->safe_psql("postgres", "CREATE TABLE tst (i int4, t text);");
$node_master->safe_psql("postgres",
# Create some bloom index on primary
$node_primary->safe_psql("postgres", "CREATE EXTENSION bloom;");
$node_primary->safe_psql("postgres", "CREATE TABLE tst (i int4, t text);");
$node_primary->safe_psql("postgres",
"INSERT INTO tst SELECT i%10, substr(md5(i::text), 1, 1) FROM generate_series(1,100000) i;"
);
$node_master->safe_psql("postgres",
$node_primary->safe_psql("postgres",
"CREATE INDEX bloomidx ON tst USING bloom (i, t) WITH (col1 = 3);");
# Test that queries give same result
@ -69,12 +69,12 @@ test_index_replay('initial');
# Run 10 cycles of table modification. Run test queries after each modification.
for my $i (1 .. 10)
{
$node_master->safe_psql("postgres", "DELETE FROM tst WHERE i = $i;");
$node_primary->safe_psql("postgres", "DELETE FROM tst WHERE i = $i;");
test_index_replay("delete $i");
$node_master->safe_psql("postgres", "VACUUM tst;");
$node_primary->safe_psql("postgres", "VACUUM tst;");
test_index_replay("vacuum $i");
my ($start, $end) = (100001 + ($i - 1) * 10000, 100000 + $i * 10000);
$node_master->safe_psql("postgres",
$node_primary->safe_psql("postgres",
"INSERT INTO tst SELECT i%10, substr(md5(i::text), 1, 1) FROM generate_series($start,$end) i;"
);
test_index_replay("insert $i");

View File

@ -13,58 +13,58 @@ sub run_test
my $test_mode = shift;
RewindTest::setup_cluster($test_mode);
RewindTest::start_master();
RewindTest::start_primary();
# Create a test table and insert a row in master.
master_psql("CREATE TABLE tbl1 (d text)");
master_psql("INSERT INTO tbl1 VALUES ('in master')");
# Create a test table and insert a row in primary.
primary_psql("CREATE TABLE tbl1 (d text)");
primary_psql("INSERT INTO tbl1 VALUES ('in primary')");
# This test table will be used to test truncation, i.e. the table
# is extended in the old master after promotion
master_psql("CREATE TABLE trunc_tbl (d text)");
master_psql("INSERT INTO trunc_tbl VALUES ('in master')");
# is extended in the old primary after promotion
primary_psql("CREATE TABLE trunc_tbl (d text)");
primary_psql("INSERT INTO trunc_tbl VALUES ('in primary')");
# This test table will be used to test the "copy-tail" case, i.e. the
# table is truncated in the old master after promotion
master_psql("CREATE TABLE tail_tbl (id integer, d text)");
master_psql("INSERT INTO tail_tbl VALUES (0, 'in master')");
# table is truncated in the old primary after promotion
primary_psql("CREATE TABLE tail_tbl (id integer, d text)");
primary_psql("INSERT INTO tail_tbl VALUES (0, 'in primary')");
master_psql("CHECKPOINT");
primary_psql("CHECKPOINT");
RewindTest::create_standby($test_mode);
# Insert additional data on master that will be replicated to standby
master_psql("INSERT INTO tbl1 values ('in master, before promotion')");
master_psql(
"INSERT INTO trunc_tbl values ('in master, before promotion')");
master_psql(
"INSERT INTO tail_tbl SELECT g, 'in master, before promotion: ' || g FROM generate_series(1, 10000) g"
# Insert additional data on primary that will be replicated to standby
primary_psql("INSERT INTO tbl1 values ('in primary, before promotion')");
primary_psql(
"INSERT INTO trunc_tbl values ('in primary, before promotion')");
primary_psql(
"INSERT INTO tail_tbl SELECT g, 'in primary, before promotion: ' || g FROM generate_series(1, 10000) g"
);
master_psql('CHECKPOINT');
primary_psql('CHECKPOINT');
RewindTest::promote_standby();
# Insert a row in the old master. This causes the master and standby
# Insert a row in the old primary. This causes the primary and standby
# to have "diverged", it's no longer possible to just apply the
# standy's logs over master directory - you need to rewind.
master_psql("INSERT INTO tbl1 VALUES ('in master, after promotion')");
# standy's logs over primary directory - you need to rewind.
primary_psql("INSERT INTO tbl1 VALUES ('in primary, after promotion')");
# Also insert a new row in the standby, which won't be present in the
# old master.
# old primary.
standby_psql("INSERT INTO tbl1 VALUES ('in standby, after promotion')");
# Insert enough rows to trunc_tbl to extend the file. pg_rewind should
# truncate it back to the old size.
master_psql(
"INSERT INTO trunc_tbl SELECT 'in master, after promotion: ' || g FROM generate_series(1, 10000) g"
primary_psql(
"INSERT INTO trunc_tbl SELECT 'in primary, after promotion: ' || g FROM generate_series(1, 10000) g"
);
# Truncate tail_tbl. pg_rewind should copy back the truncated part
# (We cannot use an actual TRUNCATE command here, as that creates a
# whole new relfilenode)
master_psql("DELETE FROM tail_tbl WHERE id > 10");
master_psql("VACUUM tail_tbl");
primary_psql("DELETE FROM tail_tbl WHERE id > 10");
primary_psql("VACUUM tail_tbl");
# Before running pg_rewind, do a couple of extra tests with several
# option combinations. As the code paths taken by those tests
@ -72,7 +72,7 @@ sub run_test
# in "local" mode for simplicity's sake.
if ($test_mode eq 'local')
{
my $master_pgdata = $node_master->data_dir;
my $primary_pgdata = $node_primary->data_dir;
my $standby_pgdata = $node_standby->data_dir;
# First check that pg_rewind fails if the target cluster is
@ -82,7 +82,7 @@ sub run_test
[
'pg_rewind', '--debug',
'--source-pgdata', $standby_pgdata,
'--target-pgdata', $master_pgdata,
'--target-pgdata', $primary_pgdata,
'--no-sync'
],
'pg_rewind with running target');
@ -94,7 +94,7 @@ sub run_test
[
'pg_rewind', '--debug',
'--source-pgdata', $standby_pgdata,
'--target-pgdata', $master_pgdata,
'--target-pgdata', $primary_pgdata,
'--no-sync', '--no-ensure-shutdown'
],
'pg_rewind --no-ensure-shutdown with running target');
@ -102,12 +102,12 @@ sub run_test
# Stop the target, and attempt to run with a local source
# still running. This fails as pg_rewind requires to have
# a source cleanly stopped.
$node_master->stop;
$node_primary->stop;
command_fails(
[
'pg_rewind', '--debug',
'--source-pgdata', $standby_pgdata,
'--target-pgdata', $master_pgdata,
'--target-pgdata', $primary_pgdata,
'--no-sync', '--no-ensure-shutdown'
],
'pg_rewind with unexpected running source');
@ -121,30 +121,30 @@ sub run_test
[
'pg_rewind', '--debug',
'--source-pgdata', $standby_pgdata,
'--target-pgdata', $master_pgdata,
'--target-pgdata', $primary_pgdata,
'--no-sync', '--dry-run'
],
'pg_rewind --dry-run');
# Both clusters need to be alive moving forward.
$node_standby->start;
$node_master->start;
$node_primary->start;
}
RewindTest::run_pg_rewind($test_mode);
check_query(
'SELECT * FROM tbl1',
qq(in master
in master, before promotion
qq(in primary
in primary, before promotion
in standby, after promotion
),
'table content');
check_query(
'SELECT * FROM trunc_tbl',
qq(in master
in master, before promotion
qq(in primary
in primary, before promotion
),
'truncation');
@ -160,7 +160,7 @@ in master, before promotion
skip "unix-style permissions not supported on Windows", 1
if ($windows_os);
ok(check_mode_recursive($node_master->data_dir(), 0700, 0600),
ok(check_mode_recursive($node_primary->data_dir(), 0700, 0600),
'check PGDATA permissions');
}

View File

@ -13,26 +13,26 @@ sub run_test
my $test_mode = shift;
RewindTest::setup_cluster($test_mode, ['-g']);
RewindTest::start_master();
RewindTest::start_primary();
# Create a database in master with a table.
master_psql('CREATE DATABASE inmaster');
master_psql('CREATE TABLE inmaster_tab (a int)', 'inmaster');
# Create a database in primary with a table.
primary_psql('CREATE DATABASE inprimary');
primary_psql('CREATE TABLE inprimary_tab (a int)', 'inprimary');
RewindTest::create_standby($test_mode);
# Create another database with another table, the creation is
# replicated to the standby.
master_psql('CREATE DATABASE beforepromotion');
master_psql('CREATE TABLE beforepromotion_tab (a int)',
primary_psql('CREATE DATABASE beforepromotion');
primary_psql('CREATE TABLE beforepromotion_tab (a int)',
'beforepromotion');
RewindTest::promote_standby();
# Create databases in the old master and the new promoted standby.
master_psql('CREATE DATABASE master_afterpromotion');
master_psql('CREATE TABLE master_promotion_tab (a int)',
'master_afterpromotion');
# Create databases in the old primary and the new promoted standby.
primary_psql('CREATE DATABASE primary_afterpromotion');
primary_psql('CREATE TABLE primary_promotion_tab (a int)',
'primary_afterpromotion');
standby_psql('CREATE DATABASE standby_afterpromotion');
standby_psql('CREATE TABLE standby_promotion_tab (a int)',
'standby_afterpromotion');
@ -45,7 +45,7 @@ sub run_test
check_query(
'SELECT datname FROM pg_database ORDER BY 1',
qq(beforepromotion
inmaster
inprimary
postgres
standby_afterpromotion
template0
@ -59,7 +59,7 @@ template1
skip "unix-style permissions not supported on Windows", 1
if ($windows_os);
ok(check_mode_recursive($node_master->data_dir(), 0750, 0640),
ok(check_mode_recursive($node_primary->data_dir(), 0750, 0640),
'check PGDATA permissions');
}

View File

@ -18,21 +18,21 @@ sub run_test
my $test_mode = shift;
RewindTest::setup_cluster($test_mode);
RewindTest::start_master();
RewindTest::start_primary();
my $test_master_datadir = $node_master->data_dir;
my $test_primary_datadir = $node_primary->data_dir;
# Create a subdir and files that will be present in both
mkdir "$test_master_datadir/tst_both_dir";
append_to_file "$test_master_datadir/tst_both_dir/both_file1", "in both1";
append_to_file "$test_master_datadir/tst_both_dir/both_file2", "in both2";
mkdir "$test_master_datadir/tst_both_dir/both_subdir/";
append_to_file "$test_master_datadir/tst_both_dir/both_subdir/both_file3",
mkdir "$test_primary_datadir/tst_both_dir";
append_to_file "$test_primary_datadir/tst_both_dir/both_file1", "in both1";
append_to_file "$test_primary_datadir/tst_both_dir/both_file2", "in both2";
mkdir "$test_primary_datadir/tst_both_dir/both_subdir/";
append_to_file "$test_primary_datadir/tst_both_dir/both_subdir/both_file3",
"in both3";
RewindTest::create_standby($test_mode);
# Create different subdirs and files in master and standby
# Create different subdirs and files in primary and standby
my $test_standby_datadir = $node_standby->data_dir;
mkdir "$test_standby_datadir/tst_standby_dir";
@ -45,15 +45,15 @@ sub run_test
"$test_standby_datadir/tst_standby_dir/standby_subdir/standby_file3",
"in standby3";
mkdir "$test_master_datadir/tst_master_dir";
append_to_file "$test_master_datadir/tst_master_dir/master_file1",
"in master1";
append_to_file "$test_master_datadir/tst_master_dir/master_file2",
"in master2";
mkdir "$test_master_datadir/tst_master_dir/master_subdir/";
mkdir "$test_primary_datadir/tst_primary_dir";
append_to_file "$test_primary_datadir/tst_primary_dir/primary_file1",
"in primary1";
append_to_file "$test_primary_datadir/tst_primary_dir/primary_file2",
"in primary2";
mkdir "$test_primary_datadir/tst_primary_dir/primary_subdir/";
append_to_file
"$test_master_datadir/tst_master_dir/master_subdir/master_file3",
"in master3";
"$test_primary_datadir/tst_primary_dir/primary_subdir/primary_file3",
"in primary3";
RewindTest::promote_standby();
RewindTest::run_pg_rewind($test_mode);
@ -65,21 +65,21 @@ sub run_test
push @paths, $File::Find::name
if $File::Find::name =~ m/.*tst_.*/;
},
$test_master_datadir);
$test_primary_datadir);
@paths = sort @paths;
is_deeply(
\@paths,
[
"$test_master_datadir/tst_both_dir",
"$test_master_datadir/tst_both_dir/both_file1",
"$test_master_datadir/tst_both_dir/both_file2",
"$test_master_datadir/tst_both_dir/both_subdir",
"$test_master_datadir/tst_both_dir/both_subdir/both_file3",
"$test_master_datadir/tst_standby_dir",
"$test_master_datadir/tst_standby_dir/standby_file1",
"$test_master_datadir/tst_standby_dir/standby_file2",
"$test_master_datadir/tst_standby_dir/standby_subdir",
"$test_master_datadir/tst_standby_dir/standby_subdir/standby_file3"
"$test_primary_datadir/tst_both_dir",
"$test_primary_datadir/tst_both_dir/both_file1",
"$test_primary_datadir/tst_both_dir/both_file2",
"$test_primary_datadir/tst_both_dir/both_subdir",
"$test_primary_datadir/tst_both_dir/both_subdir/both_file3",
"$test_primary_datadir/tst_standby_dir",
"$test_primary_datadir/tst_standby_dir/standby_file1",
"$test_primary_datadir/tst_standby_dir/standby_file2",
"$test_primary_datadir/tst_standby_dir/standby_subdir",
"$test_primary_datadir/tst_standby_dir/standby_subdir/standby_file3"
],
"file lists match");

View File

@ -26,50 +26,50 @@ sub run_test
{
my $test_mode = shift;
my $master_xlogdir = "${TestLib::tmp_check}/xlog_master";
my $primary_xlogdir = "${TestLib::tmp_check}/xlog_primary";
rmtree($master_xlogdir);
rmtree($primary_xlogdir);
RewindTest::setup_cluster($test_mode);
my $test_master_datadir = $node_master->data_dir;
my $test_primary_datadir = $node_primary->data_dir;
# turn pg_wal into a symlink
print("moving $test_master_datadir/pg_wal to $master_xlogdir\n");
move("$test_master_datadir/pg_wal", $master_xlogdir) or die;
symlink($master_xlogdir, "$test_master_datadir/pg_wal") or die;
print("moving $test_primary_datadir/pg_wal to $primary_xlogdir\n");
move("$test_primary_datadir/pg_wal", $primary_xlogdir) or die;
symlink($primary_xlogdir, "$test_primary_datadir/pg_wal") or die;
RewindTest::start_master();
RewindTest::start_primary();
# Create a test table and insert a row in master.
master_psql("CREATE TABLE tbl1 (d text)");
master_psql("INSERT INTO tbl1 VALUES ('in master')");
# Create a test table and insert a row in primary.
primary_psql("CREATE TABLE tbl1 (d text)");
primary_psql("INSERT INTO tbl1 VALUES ('in primary')");
master_psql("CHECKPOINT");
primary_psql("CHECKPOINT");
RewindTest::create_standby($test_mode);
# Insert additional data on master that will be replicated to standby
master_psql("INSERT INTO tbl1 values ('in master, before promotion')");
# Insert additional data on primary that will be replicated to standby
primary_psql("INSERT INTO tbl1 values ('in primary, before promotion')");
master_psql('CHECKPOINT');
primary_psql('CHECKPOINT');
RewindTest::promote_standby();
# Insert a row in the old master. This causes the master and standby
# Insert a row in the old primary. This causes the primary and standby
# to have "diverged", it's no longer possible to just apply the
# standy's logs over master directory - you need to rewind.
master_psql("INSERT INTO tbl1 VALUES ('in master, after promotion')");
# standy's logs over primary directory - you need to rewind.
primary_psql("INSERT INTO tbl1 VALUES ('in primary, after promotion')");
# Also insert a new row in the standby, which won't be present in the
# old master.
# old primary.
standby_psql("INSERT INTO tbl1 VALUES ('in standby, after promotion')");
RewindTest::run_pg_rewind($test_mode);
check_query(
'SELECT * FROM tbl1',
qq(in master
in master, before promotion
qq(in primary
in primary, before promotion
in standby, after promotion
),
'table content');

View File

@ -13,7 +13,7 @@ use lib $FindBin::RealBin;
use RewindTest;
RewindTest::setup_cluster();
RewindTest::start_master();
RewindTest::start_primary();
RewindTest::create_standby();
RewindTest::run_pg_rewind('local');
RewindTest::clean_rewind_test();

View File

@ -2,31 +2,31 @@ package RewindTest;
# Test driver for pg_rewind. Each test consists of a cycle where a new cluster
# is first created with initdb, and a streaming replication standby is set up
# to follow the master. Then the master is shut down and the standby is
# promoted, and finally pg_rewind is used to rewind the old master, using the
# to follow the primary. Then the primary is shut down and the standby is
# promoted, and finally pg_rewind is used to rewind the old primary, using the
# standby as the source.
#
# To run a test, the test script (in t/ subdirectory) calls the functions
# in this module. These functions should be called in this sequence:
#
# 1. setup_cluster - creates a PostgreSQL cluster that runs as the master
# 1. setup_cluster - creates a PostgreSQL cluster that runs as the primary
#
# 2. start_master - starts the master server
# 2. start_primary - starts the primary server
#
# 3. create_standby - runs pg_basebackup to initialize a standby server, and
# sets it up to follow the master.
# sets it up to follow the primary.
#
# 4. promote_standby - runs "pg_ctl promote" to promote the standby server.
# The old master keeps running.
# The old primary keeps running.
#
# 5. run_pg_rewind - stops the old master (if it's still running) and runs
# 5. run_pg_rewind - stops the old primary (if it's still running) and runs
# pg_rewind to synchronize it with the now-promoted standby server.
#
# 6. clean_rewind_test - stops both servers used in the test, if they're
# still running.
#
# The test script can use the helper functions master_psql and standby_psql
# to run psql against the master and standby servers, respectively.
# The test script can use the helper functions primary_psql and standby_psql
# to run psql against the primary and standby servers, respectively.
use strict;
use warnings;
@ -43,15 +43,15 @@ use TestLib;
use Test::More;
our @EXPORT = qw(
$node_master
$node_primary
$node_standby
master_psql
primary_psql
standby_psql
check_query
setup_cluster
start_master
start_primary
create_standby
promote_standby
run_pg_rewind
@ -59,16 +59,16 @@ our @EXPORT = qw(
);
# Our nodes.
our $node_master;
our $node_primary;
our $node_standby;
sub master_psql
sub primary_psql
{
my $cmd = shift;
my $dbname = shift || 'postgres';
system_or_bail 'psql', '-q', '--no-psqlrc', '-d',
$node_master->connstr($dbname), '-c', "$cmd";
$node_primary->connstr($dbname), '-c', "$cmd";
return;
}
@ -82,7 +82,7 @@ sub standby_psql
return;
}
# Run a query against the master, and check that the output matches what's
# Run a query against the primary, and check that the output matches what's
# expected
sub check_query
{
@ -94,7 +94,7 @@ sub check_query
# we want just the output, no formatting
my $result = run [
'psql', '-q', '-A', '-t', '--no-psqlrc', '-d',
$node_master->connstr('postgres'),
$node_primary->connstr('postgres'),
'-c', $query
],
'>', \$stdout, '2>', \$stderr;
@ -123,34 +123,34 @@ sub setup_cluster
my $extra_name = shift; # Used to differentiate clusters
my $extra = shift; # Extra params for initdb
# Initialize master, data checksums are mandatory
$node_master =
get_new_node('master' . ($extra_name ? "_${extra_name}" : ''));
# Initialize primary, data checksums are mandatory
$node_primary =
get_new_node('primary' . ($extra_name ? "_${extra_name}" : ''));
# Set up pg_hba.conf and pg_ident.conf for the role running
# pg_rewind. This role is used for all the tests, and has
# minimal permissions enough to rewind from an online source.
$node_master->init(
$node_primary->init(
allows_streaming => 1,
extra => $extra,
auth_extra => [ '--create-role', 'rewind_user' ]);
# Set wal_keep_segments to prevent WAL segment recycling after enforced
# checkpoints in the tests.
$node_master->append_conf(
$node_primary->append_conf(
'postgresql.conf', qq(
wal_keep_segments = 20
));
return;
}
sub start_master
sub start_primary
{
$node_master->start;
$node_primary->start;
# Create custom role which is used to run pg_rewind, and adjust its
# permissions to the minimum necessary.
$node_master->safe_psql(
$node_primary->safe_psql(
'postgres', "
CREATE ROLE rewind_user LOGIN;
GRANT EXECUTE ON function pg_catalog.pg_ls_dir(text, boolean, boolean)
@ -162,7 +162,7 @@ sub start_master
GRANT EXECUTE ON function pg_catalog.pg_read_binary_file(text, bigint, bigint, boolean)
TO rewind_user;");
#### Now run the test-specific parts to initialize the master before setting
#### Now run the test-specific parts to initialize the primary before setting
# up standby
return;
@ -174,13 +174,13 @@ sub create_standby
$node_standby =
get_new_node('standby' . ($extra_name ? "_${extra_name}" : ''));
$node_master->backup('my_backup');
$node_standby->init_from_backup($node_master, 'my_backup');
my $connstr_master = $node_master->connstr();
$node_primary->backup('my_backup');
$node_standby->init_from_backup($node_primary, 'my_backup');
my $connstr_primary = $node_primary->connstr();
$node_standby->append_conf(
"postgresql.conf", qq(
primary_conninfo='$connstr_master'
primary_conninfo='$connstr_primary'
));
$node_standby->set_standby_mode();
@ -200,10 +200,10 @@ sub promote_standby
# up standby
# Wait for the standby to receive and write all WAL.
$node_master->wait_for_catchup($node_standby, 'write');
$node_primary->wait_for_catchup($node_standby, 'write');
# Now promote standby and insert some new data on master, this will put
# the master out-of-sync with the standby.
# Now promote standby and insert some new data on primary, this will put
# the primary out-of-sync with the standby.
$node_standby->promote;
# Force a checkpoint after the promotion. pg_rewind looks at the control
@ -220,7 +220,7 @@ sub promote_standby
sub run_pg_rewind
{
my $test_mode = shift;
my $master_pgdata = $node_master->data_dir;
my $primary_pgdata = $node_primary->data_dir;
my $standby_pgdata = $node_standby->data_dir;
my $standby_connstr = $node_standby->connstr('postgres');
my $tmp_folder = TestLib::tempdir;
@ -239,14 +239,14 @@ sub run_pg_rewind
# segments but that would just make the test more costly,
# without improving the coverage. Hence, instead, stop
# gracefully the primary here.
$node_master->stop;
$node_primary->stop;
}
else
{
# Stop the master and be ready to perform the rewind. The cluster
# Stop the primary and be ready to perform the rewind. The cluster
# needs recovery to finish once, and pg_rewind makes sure that it
# happens automatically.
$node_master->stop('immediate');
$node_primary->stop('immediate');
}
# At this point, the rewind processing is ready to run.
@ -254,25 +254,25 @@ sub run_pg_rewind
# The real testing begins really now with a bifurcation of the possible
# scenarios that pg_rewind supports.
# Keep a temporary postgresql.conf for master node or it would be
# Keep a temporary postgresql.conf for primary node or it would be
# overwritten during the rewind.
copy(
"$master_pgdata/postgresql.conf",
"$tmp_folder/master-postgresql.conf.tmp");
"$primary_pgdata/postgresql.conf",
"$tmp_folder/primary-postgresql.conf.tmp");
# Now run pg_rewind
if ($test_mode eq "local")
{
# Do rewind using a local pgdata as source
# Stop the master and be ready to perform the rewind
# Stop the primary and be ready to perform the rewind
$node_standby->stop;
command_ok(
[
'pg_rewind',
"--debug",
"--source-pgdata=$standby_pgdata",
"--target-pgdata=$master_pgdata",
"--target-pgdata=$primary_pgdata",
"--no-sync"
],
'pg_rewind local');
@ -285,19 +285,19 @@ sub run_pg_rewind
[
'pg_rewind', "--debug",
"--source-server", $standby_connstr,
"--target-pgdata=$master_pgdata", "--no-sync",
"--target-pgdata=$primary_pgdata", "--no-sync",
"--write-recovery-conf"
],
'pg_rewind remote');
# Check that standby.signal is here as recovery configuration
# was requested.
ok( -e "$master_pgdata/standby.signal",
ok( -e "$primary_pgdata/standby.signal",
'standby.signal created after pg_rewind');
# Now, when pg_rewind apparently succeeded with minimal permissions,
# add REPLICATION privilege. So we could test that new standby
# is able to connect to the new master with generated config.
# is able to connect to the new primary with generated config.
$node_standby->safe_psql('postgres',
"ALTER ROLE rewind_user WITH REPLICATION;");
}
@ -305,30 +305,30 @@ sub run_pg_rewind
{
# Do rewind using a local pgdata as source and specified
# directory with target WAL archive. The old master has
# directory with target WAL archive. The old primary has
# to be stopped at this point.
# Remove the existing archive directory and move all WAL
# segments from the old master to the archives. These
# segments from the old primary to the archives. These
# will be used by pg_rewind.
rmtree($node_master->archive_dir);
RecursiveCopy::copypath($node_master->data_dir . "/pg_wal",
$node_master->archive_dir);
rmtree($node_primary->archive_dir);
RecursiveCopy::copypath($node_primary->data_dir . "/pg_wal",
$node_primary->archive_dir);
# Fast way to remove entire directory content
rmtree($node_master->data_dir . "/pg_wal");
mkdir($node_master->data_dir . "/pg_wal");
rmtree($node_primary->data_dir . "/pg_wal");
mkdir($node_primary->data_dir . "/pg_wal");
# Make sure that directories have the right umask as this is
# required by a follow-up check on permissions, and better
# safe than sorry.
chmod(0700, $node_master->archive_dir);
chmod(0700, $node_master->data_dir . "/pg_wal");
chmod(0700, $node_primary->archive_dir);
chmod(0700, $node_primary->data_dir . "/pg_wal");
# Add appropriate restore_command to the target cluster
$node_master->enable_restoring($node_master, 0);
$node_primary->enable_restoring($node_primary, 0);
# Stop the new master and be ready to perform the rewind.
# Stop the new primary and be ready to perform the rewind.
$node_standby->stop;
# Note the use of --no-ensure-shutdown here. WAL files are
@ -339,7 +339,7 @@ sub run_pg_rewind
'pg_rewind',
"--debug",
"--source-pgdata=$standby_pgdata",
"--target-pgdata=$master_pgdata",
"--target-pgdata=$primary_pgdata",
"--no-sync",
"--no-ensure-shutdown",
"--restore-target-wal"
@ -355,28 +355,28 @@ sub run_pg_rewind
# Now move back postgresql.conf with old settings
move(
"$tmp_folder/master-postgresql.conf.tmp",
"$master_pgdata/postgresql.conf");
"$tmp_folder/primary-postgresql.conf.tmp",
"$primary_pgdata/postgresql.conf");
chmod(
$node_master->group_access() ? 0640 : 0600,
"$master_pgdata/postgresql.conf")
$node_primary->group_access() ? 0640 : 0600,
"$primary_pgdata/postgresql.conf")
or BAIL_OUT(
"unable to set permissions for $master_pgdata/postgresql.conf");
"unable to set permissions for $primary_pgdata/postgresql.conf");
# Plug-in rewound node to the now-promoted standby node
if ($test_mode ne "remote")
{
my $port_standby = $node_standby->port;
$node_master->append_conf(
$node_primary->append_conf(
'postgresql.conf', qq(
primary_conninfo='port=$port_standby'));
$node_master->set_standby_mode();
$node_primary->set_standby_mode();
}
# Restart the master to check that rewind went correctly
$node_master->start;
# Restart the primary to check that rewind went correctly
$node_primary->start;
#### Now run the test-specific parts to check the result
@ -386,7 +386,7 @@ primary_conninfo='port=$port_standby'));
# Clean up after the test. Stop both servers, if they're still running.
sub clean_rewind_test
{
$node_master->teardown_node if defined $node_master;
$node_primary->teardown_node if defined $node_primary;
$node_standby->teardown_node if defined $node_standby;
return;
}

View File

@ -9,13 +9,13 @@ use PostgresNode;
use TestLib;
use Test::More tests => 19;
my $master = get_new_node('master');
$master->init(allows_streaming => 1);
$master->start;
my $primary = get_new_node('primary');
$primary->init(allows_streaming => 1);
$primary->start;
for my $algorithm (qw(bogus none crc32c sha224 sha256 sha384 sha512))
{
my $backup_path = $master->backup_dir . '/' . $algorithm;
my $backup_path = $primary->backup_dir . '/' . $algorithm;
my @backup = (
'pg_basebackup', '-D', $backup_path,
'--manifest-checksums', $algorithm, '--no-sync');
@ -24,13 +24,13 @@ for my $algorithm (qw(bogus none crc32c sha224 sha256 sha384 sha512))
# A backup with a bogus algorithm should fail.
if ($algorithm eq 'bogus')
{
$master->command_fails(\@backup,
$primary->command_fails(\@backup,
"backup fails with algorithm \"$algorithm\"");
next;
}
# A backup with a valid algorithm should work.
$master->command_ok(\@backup, "backup ok with algorithm \"$algorithm\"");
$primary->command_ok(\@backup, "backup ok with algorithm \"$algorithm\"");
# We expect each real checksum algorithm to be mentioned on every line of
# the backup manifest file except the first and last; for simplicity, we
@ -50,7 +50,7 @@ for my $algorithm (qw(bogus none crc32c sha224 sha256 sha384 sha512))
}
# Make sure that it verifies OK.
$master->command_ok(\@verify,
$primary->command_ok(\@verify,
"verify backup with algorithm \"$algorithm\"");
# Remove backup immediately to save disk space.

View File

@ -9,9 +9,9 @@ use PostgresNode;
use TestLib;
use Test::More tests => 44;
my $master = get_new_node('master');
$master->init(allows_streaming => 1);
$master->start;
my $primary = get_new_node('primary');
$primary->init(allows_streaming => 1);
$primary->start;
# Include a user-defined tablespace in the hopes of detecting problems in that
# area.
@ -19,7 +19,7 @@ my $source_ts_path = TestLib::perl2host(TestLib::tempdir_short());
my $source_ts_prefix = $source_ts_path;
$source_ts_prefix =~ s!(^[A-Z]:/[^/]*)/.*!$1!;
$master->safe_psql('postgres', <<EOM);
$primary->safe_psql('postgres', <<EOM);
CREATE TABLE x1 (a int);
INSERT INTO x1 VALUES (111);
CREATE TABLESPACE ts1 LOCATION '$source_ts_path';
@ -103,13 +103,13 @@ for my $scenario (@scenario)
if $scenario->{'skip_on_windows'} && $windows_os;
# Take a backup and check that it verifies OK.
my $backup_path = $master->backup_dir . '/' . $name;
my $backup_path = $primary->backup_dir . '/' . $name;
my $backup_ts_path = TestLib::perl2host(TestLib::tempdir_short());
# The tablespace map parameter confuses Msys2, which tries to mangle
# it. Tell it not to.
# See https://www.msys2.org/wiki/Porting/#filesystem-namespaces
local $ENV{MSYS2_ARG_CONV_EXCL} = $source_ts_prefix;
$master->command_ok(
$primary->command_ok(
[
'pg_basebackup', '-D', $backup_path, '--no-sync',
'-T', "${source_ts_path}=${backup_ts_path}"

View File

@ -10,11 +10,11 @@ use TestLib;
use Test::More tests => 25;
# Start up the server and take a backup.
my $master = get_new_node('master');
$master->init(allows_streaming => 1);
$master->start;
my $backup_path = $master->backup_dir . '/test_options';
$master->command_ok([ 'pg_basebackup', '-D', $backup_path, '--no-sync' ],
my $primary = get_new_node('primary');
$primary->init(allows_streaming => 1);
$primary->start;
my $backup_path = $primary->backup_dir . '/test_options';
$primary->command_ok([ 'pg_basebackup', '-D', $backup_path, '--no-sync' ],
"base backup ok");
# Verify that pg_verifybackup -q succeeds and produces no output.

View File

@ -8,11 +8,11 @@ use PostgresNode;
use TestLib;
use Test::More tests => 5;
my $master = get_new_node('master');
$master->init(allows_streaming => 1);
$master->start;
my $backup_path = $master->backup_dir . '/test_encoding';
$master->command_ok(
my $primary = get_new_node('primary');
$primary->init(allows_streaming => 1);
$primary->start;
my $backup_path = $primary->backup_dir . '/test_encoding';
$primary->command_ok(
[
'pg_basebackup', '-D',
$backup_path, '--no-sync',

View File

@ -10,16 +10,16 @@ use TestLib;
use Test::More tests => 7;
# Start up the server and take a backup.
my $master = get_new_node('master');
$master->init(allows_streaming => 1);
$master->start;
my $backup_path = $master->backup_dir . '/test_wal';
$master->command_ok([ 'pg_basebackup', '-D', $backup_path, '--no-sync' ],
my $primary = get_new_node('primary');
$primary->init(allows_streaming => 1);
$primary->start;
my $backup_path = $primary->backup_dir . '/test_wal';
$primary->command_ok([ 'pg_basebackup', '-D', $backup_path, '--no-sync' ],
"base backup ok");
# Rename pg_wal.
my $original_pg_wal = $backup_path . '/pg_wal';
my $relocated_pg_wal = $master->backup_dir . '/relocated_pg_wal';
my $relocated_pg_wal = $primary->backup_dir . '/relocated_pg_wal';
rename($original_pg_wal, $relocated_pg_wal) || die "rename pg_wal: $!";
# WAL verification should fail.

View File

@ -51,8 +51,8 @@ sub test_role
return;
}
# Initialize master node
my $node = get_new_node('master');
# Initialize primary node
my $node = get_new_node('primary');
$node->init;
$node->start;

View File

@ -49,9 +49,9 @@ sub test_login
return;
}
# Initialize master node. Force UTF-8 encoding, so that we can use non-ASCII
# Initialize primary node. Force UTF-8 encoding, so that we can use non-ASCII
# characters in the passwords below.
my $node = get_new_node('master');
my $node = get_new_node('primary');
$node->init(extra => [ '--locale=C', '--encoding=UTF8' ]);
$node->start;

View File

@ -8,45 +8,45 @@ use Test::More tests => 4;
use PostgresNode;
my $bkplabel = 'backup';
my $master = get_new_node('master');
$master->init(allows_streaming => 1);
my $primary = get_new_node('primary');
$primary->init(allows_streaming => 1);
$master->append_conf(
$primary->append_conf(
'postgresql.conf', qq{
track_commit_timestamp = on
max_wal_senders = 5
});
$master->start;
$master->backup($bkplabel);
$primary->start;
$primary->backup($bkplabel);
my $standby = get_new_node('standby');
$standby->init_from_backup($master, $bkplabel, has_streaming => 1);
$standby->init_from_backup($primary, $bkplabel, has_streaming => 1);
$standby->start;
for my $i (1 .. 10)
{
$master->safe_psql('postgres', "create table t$i()");
$primary->safe_psql('postgres', "create table t$i()");
}
my $master_ts = $master->safe_psql('postgres',
my $primary_ts = $primary->safe_psql('postgres',
qq{SELECT ts.* FROM pg_class, pg_xact_commit_timestamp(xmin) AS ts WHERE relname = 't10'}
);
my $master_lsn =
$master->safe_psql('postgres', 'select pg_current_wal_lsn()');
my $primary_lsn =
$primary->safe_psql('postgres', 'select pg_current_wal_lsn()');
$standby->poll_query_until('postgres',
qq{SELECT '$master_lsn'::pg_lsn <= pg_last_wal_replay_lsn()})
qq{SELECT '$primary_lsn'::pg_lsn <= pg_last_wal_replay_lsn()})
or die "standby never caught up";
my $standby_ts = $standby->safe_psql('postgres',
qq{select ts.* from pg_class, pg_xact_commit_timestamp(xmin) ts where relname = 't10'}
);
is($master_ts, $standby_ts, "standby gives same value as master");
is($primary_ts, $standby_ts, "standby gives same value as primary");
$master->append_conf('postgresql.conf', 'track_commit_timestamp = off');
$master->restart;
$master->safe_psql('postgres', 'checkpoint');
$master_lsn = $master->safe_psql('postgres', 'select pg_current_wal_lsn()');
$primary->append_conf('postgresql.conf', 'track_commit_timestamp = off');
$primary->restart;
$primary->safe_psql('postgres', 'checkpoint');
$primary_lsn = $primary->safe_psql('postgres', 'select pg_current_wal_lsn()');
$standby->poll_query_until('postgres',
qq{SELECT '$master_lsn'::pg_lsn <= pg_last_wal_replay_lsn()})
qq{SELECT '$primary_lsn'::pg_lsn <= pg_last_wal_replay_lsn()})
or die "standby never caught up";
$standby->safe_psql('postgres', 'checkpoint');
@ -54,10 +54,10 @@ $standby->safe_psql('postgres', 'checkpoint');
my ($ret, $standby_ts_stdout, $standby_ts_stderr) = $standby->psql('postgres',
'select ts.* from pg_class, pg_xact_commit_timestamp(xmin) ts where relname = \'t10\''
);
is($ret, 3, 'standby errors when master turned feature off');
is($ret, 3, 'standby errors when primary turned feature off');
is($standby_ts_stdout, '',
"standby gives no value when master turned feature off");
"standby gives no value when primary turned feature off");
like(
$standby_ts_stderr,
qr/could not get commit timestamp data/,
'expected error when master turned feature off');
'expected error when primary turned feature off');

View File

@ -1,4 +1,4 @@
# Test master/standby scenario where the track_commit_timestamp GUC is
# Test primary/standby scenario where the track_commit_timestamp GUC is
# repeatedly toggled on and off.
use strict;
use warnings;
@ -8,31 +8,31 @@ use Test::More tests => 4;
use PostgresNode;
my $bkplabel = 'backup';
my $master = get_new_node('master');
$master->init(allows_streaming => 1);
$master->append_conf(
my $primary = get_new_node('primary');
$primary->init(allows_streaming => 1);
$primary->append_conf(
'postgresql.conf', qq{
track_commit_timestamp = on
max_wal_senders = 5
});
$master->start;
$master->backup($bkplabel);
$primary->start;
$primary->backup($bkplabel);
my $standby = get_new_node('standby');
$standby->init_from_backup($master, $bkplabel, has_streaming => 1);
$standby->init_from_backup($primary, $bkplabel, has_streaming => 1);
$standby->start;
for my $i (1 .. 10)
{
$master->safe_psql('postgres', "create table t$i()");
$primary->safe_psql('postgres', "create table t$i()");
}
$master->append_conf('postgresql.conf', 'track_commit_timestamp = off');
$master->restart;
$master->safe_psql('postgres', 'checkpoint');
my $master_lsn =
$master->safe_psql('postgres', 'select pg_current_wal_lsn()');
$primary->append_conf('postgresql.conf', 'track_commit_timestamp = off');
$primary->restart;
$primary->safe_psql('postgres', 'checkpoint');
my $primary_lsn =
$primary->safe_psql('postgres', 'select pg_current_wal_lsn()');
$standby->poll_query_until('postgres',
qq{SELECT '$master_lsn'::pg_lsn <= pg_last_wal_replay_lsn()})
qq{SELECT '$primary_lsn'::pg_lsn <= pg_last_wal_replay_lsn()})
or die "standby never caught up";
$standby->safe_psql('postgres', 'checkpoint');
@ -49,10 +49,10 @@ like(
qr/could not get commit timestamp data/,
'expected err msg after restart');
$master->append_conf('postgresql.conf', 'track_commit_timestamp = on');
$master->restart;
$master->append_conf('postgresql.conf', 'track_commit_timestamp = off');
$master->restart;
$primary->append_conf('postgresql.conf', 'track_commit_timestamp = on');
$primary->restart;
$primary->append_conf('postgresql.conf', 'track_commit_timestamp = off');
$primary->restart;
system_or_bail('pg_ctl', '-D', $standby->data_dir, 'promote');

View File

@ -5,15 +5,15 @@ use PostgresNode;
use TestLib;
use Test::More tests => 16;
my $node_master = get_new_node('master');
$node_master->init(allows_streaming => 1);
$node_master->append_conf('postgresql.conf', 'track_commit_timestamp = on');
$node_master->start;
my $node_primary = get_new_node('primary');
$node_primary->init(allows_streaming => 1);
$node_primary->append_conf('postgresql.conf', 'track_commit_timestamp = on');
$node_primary->start;
my ($ret, $stdout, $stderr);
($ret, $stdout, $stderr) =
$node_master->psql('postgres', qq[SELECT pg_xact_commit_timestamp('0');]);
$node_primary->psql('postgres', qq[SELECT pg_xact_commit_timestamp('0');]);
is($ret, 3, 'getting ts of InvalidTransactionId reports error');
like(
$stderr,
@ -21,27 +21,27 @@ like(
'expected error from InvalidTransactionId');
($ret, $stdout, $stderr) =
$node_master->psql('postgres', qq[SELECT pg_xact_commit_timestamp('1');]);
$node_primary->psql('postgres', qq[SELECT pg_xact_commit_timestamp('1');]);
is($ret, 0, 'getting ts of BootstrapTransactionId succeeds');
is($stdout, '', 'timestamp of BootstrapTransactionId is null');
($ret, $stdout, $stderr) =
$node_master->psql('postgres', qq[SELECT pg_xact_commit_timestamp('2');]);
$node_primary->psql('postgres', qq[SELECT pg_xact_commit_timestamp('2');]);
is($ret, 0, 'getting ts of FrozenTransactionId succeeds');
is($stdout, '', 'timestamp of FrozenTransactionId is null');
# Since FirstNormalTransactionId will've occurred during initdb, long before we
# enabled commit timestamps, it'll be null since we have no cts data for it but
# cts are enabled.
is( $node_master->safe_psql(
is( $node_primary->safe_psql(
'postgres', qq[SELECT pg_xact_commit_timestamp('3');]),
'',
'committs for FirstNormalTransactionId is null');
$node_master->safe_psql('postgres',
$node_primary->safe_psql('postgres',
qq[CREATE TABLE committs_test(x integer, y timestamp with time zone);]);
my $xid = $node_master->safe_psql(
my $xid = $node_primary->safe_psql(
'postgres', qq[
BEGIN;
INSERT INTO committs_test(x, y) VALUES (1, current_timestamp);
@ -49,43 +49,43 @@ my $xid = $node_master->safe_psql(
COMMIT;
]);
my $before_restart_ts = $node_master->safe_psql('postgres',
my $before_restart_ts = $node_primary->safe_psql('postgres',
qq[SELECT pg_xact_commit_timestamp('$xid');]);
ok($before_restart_ts ne '' && $before_restart_ts ne 'null',
'commit timestamp recorded');
$node_master->stop('immediate');
$node_master->start;
$node_primary->stop('immediate');
$node_primary->start;
my $after_crash_ts = $node_master->safe_psql('postgres',
my $after_crash_ts = $node_primary->safe_psql('postgres',
qq[SELECT pg_xact_commit_timestamp('$xid');]);
is($after_crash_ts, $before_restart_ts,
'timestamps before and after crash are equal');
$node_master->stop('fast');
$node_master->start;
$node_primary->stop('fast');
$node_primary->start;
my $after_restart_ts = $node_master->safe_psql('postgres',
my $after_restart_ts = $node_primary->safe_psql('postgres',
qq[SELECT pg_xact_commit_timestamp('$xid');]);
is($after_restart_ts, $before_restart_ts,
'timestamps before and after restart are equal');
# Now disable commit timestamps
$node_master->append_conf('postgresql.conf', 'track_commit_timestamp = off');
$node_master->stop('fast');
$node_primary->append_conf('postgresql.conf', 'track_commit_timestamp = off');
$node_primary->stop('fast');
# Start the server, which generates a XLOG_PARAMETER_CHANGE record where
# the parameter change is registered.
$node_master->start;
$node_primary->start;
# Now restart again the server so as no XLOG_PARAMETER_CHANGE record are
# replayed with the follow-up immediate shutdown.
$node_master->restart;
$node_primary->restart;
# Move commit timestamps across page boundaries. Things should still
# be able to work across restarts with those transactions committed while
# track_commit_timestamp is disabled.
$node_master->safe_psql(
$node_primary->safe_psql(
'postgres',
qq(CREATE PROCEDURE consume_xid(cnt int)
AS \$\$
@ -100,9 +100,9 @@ DECLARE
\$\$
LANGUAGE plpgsql;
));
$node_master->safe_psql('postgres', 'CALL consume_xid(2000)');
$node_primary->safe_psql('postgres', 'CALL consume_xid(2000)');
($ret, $stdout, $stderr) = $node_master->psql('postgres',
($ret, $stdout, $stderr) = $node_primary->psql('postgres',
qq[SELECT pg_xact_commit_timestamp('$xid');]);
is($ret, 3, 'no commit timestamp from enable tx when cts disabled');
like(
@ -111,7 +111,7 @@ like(
'expected error from enabled tx when committs disabled');
# Do a tx while cts disabled
my $xid_disabled = $node_master->safe_psql(
my $xid_disabled = $node_primary->safe_psql(
'postgres', qq[
BEGIN;
INSERT INTO committs_test(x, y) VALUES (2, current_timestamp);
@ -120,7 +120,7 @@ my $xid_disabled = $node_master->safe_psql(
]);
# Should be inaccessible
($ret, $stdout, $stderr) = $node_master->psql('postgres',
($ret, $stdout, $stderr) = $node_primary->psql('postgres',
qq[SELECT pg_xact_commit_timestamp('$xid_disabled');]);
is($ret, 3, 'no commit timestamp when disabled');
like(
@ -129,21 +129,21 @@ like(
'expected error from disabled tx when committs disabled');
# Re-enable, restart and ensure we can still get the old timestamps
$node_master->append_conf('postgresql.conf', 'track_commit_timestamp = on');
$node_primary->append_conf('postgresql.conf', 'track_commit_timestamp = on');
# An immediate shutdown is used here. At next startup recovery will
# replay transactions which committed when track_commit_timestamp was
# disabled, and the facility should be able to work properly.
$node_master->stop('immediate');
$node_master->start;
$node_primary->stop('immediate');
$node_primary->start;
my $after_enable_ts = $node_master->safe_psql('postgres',
my $after_enable_ts = $node_primary->safe_psql('postgres',
qq[SELECT pg_xact_commit_timestamp('$xid');]);
is($after_enable_ts, '', 'timestamp of enabled tx null after re-enable');
my $after_enable_disabled_ts = $node_master->safe_psql('postgres',
my $after_enable_disabled_ts = $node_primary->safe_psql('postgres',
qq[SELECT pg_xact_commit_timestamp('$xid_disabled');]);
is($after_enable_disabled_ts, '',
'timestamp of disabled tx null after re-enable');
$node_master->stop;
$node_primary->stop;

View File

@ -7,7 +7,7 @@ use TestLib;
use Test::More tests => 42;
# Initialize a test cluster
my $node = get_new_node('master');
my $node = get_new_node('primary');
$node->init();
# Turn message level up to DEBUG1 so that we get the messages we want to see
$node->append_conf('postgresql.conf', 'client_min_messages = DEBUG1');

View File

@ -1822,11 +1822,11 @@ sub run_log
Look up WAL locations on the server:
* insert location (master only, error on replica)
* write location (master only, error on replica)
* flush location (master only, error on replica)
* receive location (always undef on master)
* replay location (always undef on master)
* insert location (primary only, error on replica)
* write location (primary only, error on replica)
* flush location (primary only, error on replica)
* receive location (always undef on primary)
* replay location (always undef on primary)
mode must be specified.
@ -1876,7 +1876,7 @@ poll_query_until timeout.
Requires that the 'postgres' db exists and is accessible.
target_lsn may be any arbitrary lsn, but is typically $master_node->lsn('insert').
target_lsn may be any arbitrary lsn, but is typically $primary_node->lsn('insert').
If omitted, pg_current_wal_lsn() is used.
This is not a test. It die()s on failure.
@ -1935,7 +1935,7 @@ This is not a test. It die()s on failure.
If the slot is not active, will time out after poll_query_until's timeout.
target_lsn may be any arbitrary lsn, but is typically $master_node->lsn('insert').
target_lsn may be any arbitrary lsn, but is typically $primary_node->lsn('insert').
Note that for logical slots, restart_lsn is held down by the oldest in-progress tx.

View File

@ -48,7 +48,7 @@ Each test script should begin with:
then it will generally need to set up one or more nodes, run commands
against them and evaluate the results. For example:
my $node = PostgresNode->get_new_node('master');
my $node = PostgresNode->get_new_node('primary');
$node->init;
$node->start;

View File

@ -5,22 +5,22 @@ use PostgresNode;
use TestLib;
use Test::More tests => 36;
# Initialize master node
my $node_master = get_new_node('master');
# Initialize primary node
my $node_primary = get_new_node('primary');
# A specific role is created to perform some tests related to replication,
# and it needs proper authentication configuration.
$node_master->init(
$node_primary->init(
allows_streaming => 1,
auth_extra => [ '--create-role', 'repl_role' ]);
$node_master->start;
$node_primary->start;
my $backup_name = 'my_backup';
# Take backup
$node_master->backup($backup_name);
$node_primary->backup($backup_name);
# Create streaming standby linking to master
# Create streaming standby linking to primary
my $node_standby_1 = get_new_node('standby_1');
$node_standby_1->init_from_backup($node_master, $backup_name,
$node_standby_1->init_from_backup($node_primary, $backup_name,
has_streaming => 1);
$node_standby_1->start;
@ -28,10 +28,10 @@ $node_standby_1->start;
# pg_basebackup works on a standby).
$node_standby_1->backup($backup_name);
# Take a second backup of the standby while the master is offline.
$node_master->stop;
# Take a second backup of the standby while the primary is offline.
$node_primary->stop;
$node_standby_1->backup('my_backup_2');
$node_master->start;
$node_primary->start;
# Create second standby node linking to standby 1
my $node_standby_2 = get_new_node('standby_2');
@ -39,13 +39,13 @@ $node_standby_2->init_from_backup($node_standby_1, $backup_name,
has_streaming => 1);
$node_standby_2->start;
# Create some content on master and check its presence in standby 1
$node_master->safe_psql('postgres',
# Create some content on primary and check its presence in standby 1
$node_primary->safe_psql('postgres',
"CREATE TABLE tab_int AS SELECT generate_series(1,1002) AS a");
# Wait for standbys to catch up
$node_master->wait_for_catchup($node_standby_1, 'replay',
$node_master->lsn('insert'));
$node_primary->wait_for_catchup($node_standby_1, 'replay',
$node_primary->lsn('insert'));
$node_standby_1->wait_for_catchup($node_standby_2, 'replay',
$node_standby_1->lsn('replay'));
@ -105,57 +105,57 @@ sub test_target_session_attrs
return;
}
# Connect to master in "read-write" mode with master,standby1 list.
test_target_session_attrs($node_master, $node_standby_1, $node_master,
# Connect to primary in "read-write" mode with primary,standby1 list.
test_target_session_attrs($node_primary, $node_standby_1, $node_primary,
"read-write", 0);
# Connect to master in "read-write" mode with standby1,master list.
test_target_session_attrs($node_standby_1, $node_master, $node_master,
# Connect to primary in "read-write" mode with standby1,primary list.
test_target_session_attrs($node_standby_1, $node_primary, $node_primary,
"read-write", 0);
# Connect to master in "any" mode with master,standby1 list.
test_target_session_attrs($node_master, $node_standby_1, $node_master, "any",
# Connect to primary in "any" mode with primary,standby1 list.
test_target_session_attrs($node_primary, $node_standby_1, $node_primary, "any",
0);
# Connect to standby1 in "any" mode with standby1,master list.
test_target_session_attrs($node_standby_1, $node_master, $node_standby_1,
# Connect to standby1 in "any" mode with standby1,primary list.
test_target_session_attrs($node_standby_1, $node_primary, $node_standby_1,
"any", 0);
# Test for SHOW commands using a WAL sender connection with a replication
# role.
note "testing SHOW commands for replication connection";
$node_master->psql(
$node_primary->psql(
'postgres', "
CREATE ROLE repl_role REPLICATION LOGIN;
GRANT pg_read_all_settings TO repl_role;");
my $master_host = $node_master->host;
my $master_port = $node_master->port;
my $connstr_common = "host=$master_host port=$master_port user=repl_role";
my $primary_host = $node_primary->host;
my $primary_port = $node_primary->port;
my $connstr_common = "host=$primary_host port=$primary_port user=repl_role";
my $connstr_rep = "$connstr_common replication=1";
my $connstr_db = "$connstr_common replication=database dbname=postgres";
# Test SHOW ALL
my ($ret, $stdout, $stderr) = $node_master->psql(
my ($ret, $stdout, $stderr) = $node_primary->psql(
'postgres', 'SHOW ALL;',
on_error_die => 1,
extra_params => [ '-d', $connstr_rep ]);
ok($ret == 0, "SHOW ALL with replication role and physical replication");
($ret, $stdout, $stderr) = $node_master->psql(
($ret, $stdout, $stderr) = $node_primary->psql(
'postgres', 'SHOW ALL;',
on_error_die => 1,
extra_params => [ '-d', $connstr_db ]);
ok($ret == 0, "SHOW ALL with replication role and logical replication");
# Test SHOW with a user-settable parameter
($ret, $stdout, $stderr) = $node_master->psql(
($ret, $stdout, $stderr) = $node_primary->psql(
'postgres', 'SHOW work_mem;',
on_error_die => 1,
extra_params => [ '-d', $connstr_rep ]);
ok( $ret == 0,
"SHOW with user-settable parameter, replication role and physical replication"
);
($ret, $stdout, $stderr) = $node_master->psql(
($ret, $stdout, $stderr) = $node_primary->psql(
'postgres', 'SHOW work_mem;',
on_error_die => 1,
extra_params => [ '-d', $connstr_db ]);
@ -164,14 +164,14 @@ ok( $ret == 0,
);
# Test SHOW with a superuser-settable parameter
($ret, $stdout, $stderr) = $node_master->psql(
($ret, $stdout, $stderr) = $node_primary->psql(
'postgres', 'SHOW primary_conninfo;',
on_error_die => 1,
extra_params => [ '-d', $connstr_rep ]);
ok( $ret == 0,
"SHOW with superuser-settable parameter, replication role and physical replication"
);
($ret, $stdout, $stderr) = $node_master->psql(
($ret, $stdout, $stderr) = $node_primary->psql(
'postgres', 'SHOW primary_conninfo;',
on_error_die => 1,
extra_params => [ '-d', $connstr_db ]);
@ -186,13 +186,13 @@ note "switching to physical replication slot";
# standbys. Since we're going to be testing things that affect the slot state,
# also increase the standby feedback interval to ensure timely updates.
my ($slotname_1, $slotname_2) = ('standby_1', 'standby_2');
$node_master->append_conf('postgresql.conf', "max_replication_slots = 4");
$node_master->restart;
is( $node_master->psql(
$node_primary->append_conf('postgresql.conf', "max_replication_slots = 4");
$node_primary->restart;
is( $node_primary->psql(
'postgres',
qq[SELECT pg_create_physical_replication_slot('$slotname_1');]),
0,
'physical slot created on master');
'physical slot created on primary');
$node_standby_1->append_conf('postgresql.conf',
"primary_slot_name = $slotname_1");
$node_standby_1->append_conf('postgresql.conf',
@ -231,7 +231,7 @@ sub get_slot_xmins
# There's no hot standby feedback and there are no logical slots on either peer
# so xmin and catalog_xmin should be null on both slots.
my ($xmin, $catalog_xmin) = get_slot_xmins($node_master, $slotname_1,
my ($xmin, $catalog_xmin) = get_slot_xmins($node_primary, $slotname_1,
"xmin IS NULL AND catalog_xmin IS NULL");
is($xmin, '', 'xmin of non-cascaded slot null with no hs_feedback');
is($catalog_xmin, '',
@ -244,20 +244,20 @@ is($catalog_xmin, '',
'catalog xmin of cascaded slot null with no hs_feedback');
# Replication still works?
$node_master->safe_psql('postgres', 'CREATE TABLE replayed(val integer);');
$node_primary->safe_psql('postgres', 'CREATE TABLE replayed(val integer);');
sub replay_check
{
my $newval = $node_master->safe_psql('postgres',
my $newval = $node_primary->safe_psql('postgres',
'INSERT INTO replayed(val) SELECT coalesce(max(val),0) + 1 AS newval FROM replayed RETURNING val'
);
$node_master->wait_for_catchup($node_standby_1, 'replay',
$node_master->lsn('insert'));
$node_primary->wait_for_catchup($node_standby_1, 'replay',
$node_primary->lsn('insert'));
$node_standby_1->wait_for_catchup($node_standby_2, 'replay',
$node_standby_1->lsn('replay'));
$node_standby_1->safe_psql('postgres',
qq[SELECT 1 FROM replayed WHERE val = $newval])
or die "standby_1 didn't replay master value $newval";
or die "standby_1 didn't replay primary value $newval";
$node_standby_2->safe_psql('postgres',
qq[SELECT 1 FROM replayed WHERE val = $newval])
or die "standby_2 didn't replay standby_1 value $newval";
@ -278,7 +278,7 @@ $node_standby_2->safe_psql('postgres',
$node_standby_2->reload;
replay_check();
($xmin, $catalog_xmin) = get_slot_xmins($node_master, $slotname_1,
($xmin, $catalog_xmin) = get_slot_xmins($node_primary, $slotname_1,
"xmin IS NOT NULL AND catalog_xmin IS NULL");
isnt($xmin, '', 'xmin of non-cascaded slot non-null with hs feedback');
is($catalog_xmin, '',
@ -291,7 +291,7 @@ is($catalog_xmin1, '',
'catalog xmin of cascaded slot still null with hs_feedback');
note "doing some work to advance xmin";
$node_master->safe_psql(
$node_primary->safe_psql(
'postgres', q{
do $$
begin
@ -306,12 +306,12 @@ begin
end$$;
});
$node_master->safe_psql('postgres', 'VACUUM;');
$node_master->safe_psql('postgres', 'CHECKPOINT;');
$node_primary->safe_psql('postgres', 'VACUUM;');
$node_primary->safe_psql('postgres', 'CHECKPOINT;');
my ($xmin2, $catalog_xmin2) =
get_slot_xmins($node_master, $slotname_1, "xmin <> '$xmin'");
note "master slot's new xmin $xmin2, old xmin $xmin";
get_slot_xmins($node_primary, $slotname_1, "xmin <> '$xmin'");
note "primary slot's new xmin $xmin2, old xmin $xmin";
isnt($xmin2, $xmin, 'xmin of non-cascaded slot with hs feedback has changed');
is($catalog_xmin2, '',
'catalog xmin of non-cascaded slot still null with hs_feedback unchanged'
@ -335,7 +335,7 @@ $node_standby_2->safe_psql('postgres',
$node_standby_2->reload;
replay_check();
($xmin, $catalog_xmin) = get_slot_xmins($node_master, $slotname_1,
($xmin, $catalog_xmin) = get_slot_xmins($node_primary, $slotname_1,
"xmin IS NULL AND catalog_xmin IS NULL");
is($xmin, '', 'xmin of non-cascaded slot null with hs feedback reset');
is($catalog_xmin, '',
@ -349,55 +349,55 @@ is($catalog_xmin, '',
note "check change primary_conninfo without restart";
$node_standby_2->append_conf('postgresql.conf', "primary_slot_name = ''");
$node_standby_2->enable_streaming($node_master);
$node_standby_2->enable_streaming($node_primary);
$node_standby_2->reload;
# be sure do not streaming from cascade
$node_standby_1->stop;
my $newval = $node_master->safe_psql('postgres',
my $newval = $node_primary->safe_psql('postgres',
'INSERT INTO replayed(val) SELECT coalesce(max(val),0) + 1 AS newval FROM replayed RETURNING val'
);
$node_master->wait_for_catchup($node_standby_2, 'replay',
$node_master->lsn('insert'));
$node_primary->wait_for_catchup($node_standby_2, 'replay',
$node_primary->lsn('insert'));
my $is_replayed = $node_standby_2->safe_psql('postgres',
qq[SELECT 1 FROM replayed WHERE val = $newval]);
is($is_replayed, qq(1), "standby_2 didn't replay master value $newval");
is($is_replayed, qq(1), "standby_2 didn't replay primary value $newval");
# Drop any existing slots on the primary, for the follow-up tests.
$node_master->safe_psql('postgres',
$node_primary->safe_psql('postgres',
"SELECT pg_drop_replication_slot(slot_name) FROM pg_replication_slots;");
# Test physical slot advancing and its durability. Create a new slot on
# the primary, not used by any of the standbys. This reserves WAL at creation.
my $phys_slot = 'phys_slot';
$node_master->safe_psql('postgres',
$node_primary->safe_psql('postgres',
"SELECT pg_create_physical_replication_slot('$phys_slot', true);");
# Generate some WAL, and switch to a new segment, used to check that
# the previous segment is correctly getting recycled as the slot advancing
# would recompute the minimum LSN calculated across all slots.
my $segment_removed = $node_master->safe_psql('postgres',
my $segment_removed = $node_primary->safe_psql('postgres',
'SELECT pg_walfile_name(pg_current_wal_lsn())');
chomp($segment_removed);
$node_master->psql(
$node_primary->psql(
'postgres', "
CREATE TABLE tab_phys_slot (a int);
INSERT INTO tab_phys_slot VALUES (generate_series(1,10));
SELECT pg_switch_wal();");
my $current_lsn =
$node_master->safe_psql('postgres', "SELECT pg_current_wal_lsn();");
$node_primary->safe_psql('postgres', "SELECT pg_current_wal_lsn();");
chomp($current_lsn);
my $psql_rc = $node_master->psql('postgres',
my $psql_rc = $node_primary->psql('postgres',
"SELECT pg_replication_slot_advance('$phys_slot', '$current_lsn'::pg_lsn);"
);
is($psql_rc, '0', 'slot advancing with physical slot');
my $phys_restart_lsn_pre = $node_master->safe_psql('postgres',
my $phys_restart_lsn_pre = $node_primary->safe_psql('postgres',
"SELECT restart_lsn from pg_replication_slots WHERE slot_name = '$phys_slot';"
);
chomp($phys_restart_lsn_pre);
# Slot advance should persist across clean restarts.
$node_master->restart;
my $phys_restart_lsn_post = $node_master->safe_psql('postgres',
$node_primary->restart;
my $phys_restart_lsn_post = $node_primary->safe_psql('postgres',
"SELECT restart_lsn from pg_replication_slots WHERE slot_name = '$phys_slot';"
);
chomp($phys_restart_lsn_post);
@ -406,6 +406,6 @@ ok( ($phys_restart_lsn_pre cmp $phys_restart_lsn_post) == 0,
# Check if the previous segment gets correctly recycled after the
# server stopped cleanly, causing a shutdown checkpoint to be generated.
my $master_data = $node_master->data_dir;
ok(!-f "$master_data/pg_wal/$segment_removed",
my $primary_data = $node_primary->data_dir;
ok(!-f "$primary_data/pg_wal/$segment_removed",
"WAL segment $segment_removed recycled after physical slot advancing");

View File

@ -6,38 +6,38 @@ use TestLib;
use Test::More tests => 3;
use File::Copy;
# Initialize master node, doing archives
my $node_master = get_new_node('master');
$node_master->init(
# Initialize primary node, doing archives
my $node_primary = get_new_node('primary');
$node_primary->init(
has_archiving => 1,
allows_streaming => 1);
my $backup_name = 'my_backup';
# Start it
$node_master->start;
$node_primary->start;
# Take backup for standby
$node_master->backup($backup_name);
$node_primary->backup($backup_name);
# Initialize standby node from backup, fetching WAL from archives
my $node_standby = get_new_node('standby');
$node_standby->init_from_backup($node_master, $backup_name,
$node_standby->init_from_backup($node_primary, $backup_name,
has_restoring => 1);
$node_standby->append_conf('postgresql.conf',
"wal_retrieve_retry_interval = '100ms'");
$node_standby->start;
# Create some content on master
$node_master->safe_psql('postgres',
# Create some content on primary
$node_primary->safe_psql('postgres',
"CREATE TABLE tab_int AS SELECT generate_series(1,1000) AS a");
my $current_lsn =
$node_master->safe_psql('postgres', "SELECT pg_current_wal_lsn();");
$node_primary->safe_psql('postgres', "SELECT pg_current_wal_lsn();");
# Force archiving of WAL file to make it present on master
$node_master->safe_psql('postgres', "SELECT pg_switch_wal()");
# Force archiving of WAL file to make it present on primary
$node_primary->safe_psql('postgres', "SELECT pg_switch_wal()");
# Add some more content, it should not be present on standby
$node_master->safe_psql('postgres',
$node_primary->safe_psql('postgres',
"INSERT INTO tab_int VALUES (generate_series(1001,2000))");
# Wait until necessary replay has been done on standby
@ -60,7 +60,7 @@ is($result, qq(1000), 'check content from archives');
$node_standby->promote;
my $node_standby2 = get_new_node('standby2');
$node_standby2->init_from_backup($node_master, $backup_name,
$node_standby2->init_from_backup($node_primary, $backup_name,
has_restoring => 1);
$node_standby2->start;

View File

@ -13,13 +13,13 @@ sub test_recovery_standby
{
my $test_name = shift;
my $node_name = shift;
my $node_master = shift;
my $node_primary = shift;
my $recovery_params = shift;
my $num_rows = shift;
my $until_lsn = shift;
my $node_standby = get_new_node($node_name);
$node_standby->init_from_backup($node_master, 'my_backup',
$node_standby->init_from_backup($node_primary, 'my_backup',
has_restoring => 1);
foreach my $param_item (@$recovery_params)
@ -35,7 +35,7 @@ sub test_recovery_standby
$node_standby->poll_query_until('postgres', $caughtup_query)
or die "Timed out while waiting for standby to catch up";
# Create some content on master and check its presence in standby
# Create some content on primary and check its presence in standby
my $result =
$node_standby->safe_psql('postgres', "SELECT count(*) FROM tab_int");
is($result, qq($num_rows), "check standby content for $test_name");
@ -46,74 +46,74 @@ sub test_recovery_standby
return;
}
# Initialize master node
my $node_master = get_new_node('master');
$node_master->init(has_archiving => 1, allows_streaming => 1);
# Initialize primary node
my $node_primary = get_new_node('primary');
$node_primary->init(has_archiving => 1, allows_streaming => 1);
# Start it
$node_master->start;
$node_primary->start;
# Create data before taking the backup, aimed at testing
# recovery_target = 'immediate'
$node_master->safe_psql('postgres',
$node_primary->safe_psql('postgres',
"CREATE TABLE tab_int AS SELECT generate_series(1,1000) AS a");
my $lsn1 =
$node_master->safe_psql('postgres', "SELECT pg_current_wal_lsn();");
$node_primary->safe_psql('postgres', "SELECT pg_current_wal_lsn();");
# Take backup from which all operations will be run
$node_master->backup('my_backup');
$node_primary->backup('my_backup');
# Insert some data with used as a replay reference, with a recovery
# target TXID.
$node_master->safe_psql('postgres',
$node_primary->safe_psql('postgres',
"INSERT INTO tab_int VALUES (generate_series(1001,2000))");
my $ret = $node_master->safe_psql('postgres',
my $ret = $node_primary->safe_psql('postgres',
"SELECT pg_current_wal_lsn(), pg_current_xact_id();");
my ($lsn2, $recovery_txid) = split /\|/, $ret;
# More data, with recovery target timestamp
$node_master->safe_psql('postgres',
$node_primary->safe_psql('postgres',
"INSERT INTO tab_int VALUES (generate_series(2001,3000))");
my $lsn3 =
$node_master->safe_psql('postgres', "SELECT pg_current_wal_lsn();");
my $recovery_time = $node_master->safe_psql('postgres', "SELECT now()");
$node_primary->safe_psql('postgres', "SELECT pg_current_wal_lsn();");
my $recovery_time = $node_primary->safe_psql('postgres', "SELECT now()");
# Even more data, this time with a recovery target name
$node_master->safe_psql('postgres',
$node_primary->safe_psql('postgres',
"INSERT INTO tab_int VALUES (generate_series(3001,4000))");
my $recovery_name = "my_target";
my $lsn4 =
$node_master->safe_psql('postgres', "SELECT pg_current_wal_lsn();");
$node_master->safe_psql('postgres',
$node_primary->safe_psql('postgres', "SELECT pg_current_wal_lsn();");
$node_primary->safe_psql('postgres',
"SELECT pg_create_restore_point('$recovery_name');");
# And now for a recovery target LSN
$node_master->safe_psql('postgres',
$node_primary->safe_psql('postgres',
"INSERT INTO tab_int VALUES (generate_series(4001,5000))");
my $lsn5 = my $recovery_lsn =
$node_master->safe_psql('postgres', "SELECT pg_current_wal_lsn()");
$node_primary->safe_psql('postgres', "SELECT pg_current_wal_lsn()");
$node_master->safe_psql('postgres',
$node_primary->safe_psql('postgres',
"INSERT INTO tab_int VALUES (generate_series(5001,6000))");
# Force archiving of WAL file
$node_master->safe_psql('postgres', "SELECT pg_switch_wal()");
$node_primary->safe_psql('postgres', "SELECT pg_switch_wal()");
# Test recovery targets
my @recovery_params = ("recovery_target = 'immediate'");
test_recovery_standby('immediate target',
'standby_1', $node_master, \@recovery_params, "1000", $lsn1);
'standby_1', $node_primary, \@recovery_params, "1000", $lsn1);
@recovery_params = ("recovery_target_xid = '$recovery_txid'");
test_recovery_standby('XID', 'standby_2', $node_master, \@recovery_params,
test_recovery_standby('XID', 'standby_2', $node_primary, \@recovery_params,
"2000", $lsn2);
@recovery_params = ("recovery_target_time = '$recovery_time'");
test_recovery_standby('time', 'standby_3', $node_master, \@recovery_params,
test_recovery_standby('time', 'standby_3', $node_primary, \@recovery_params,
"3000", $lsn3);
@recovery_params = ("recovery_target_name = '$recovery_name'");
test_recovery_standby('name', 'standby_4', $node_master, \@recovery_params,
test_recovery_standby('name', 'standby_4', $node_primary, \@recovery_params,
"4000", $lsn4);
@recovery_params = ("recovery_target_lsn = '$recovery_lsn'");
test_recovery_standby('LSN', 'standby_5', $node_master, \@recovery_params,
test_recovery_standby('LSN', 'standby_5', $node_primary, \@recovery_params,
"5000", $lsn5);
# Multiple targets
@ -127,10 +127,10 @@ test_recovery_standby('LSN', 'standby_5', $node_master, \@recovery_params,
"recovery_target_name = ''",
"recovery_target_time = '$recovery_time'");
test_recovery_standby('multiple overriding settings',
'standby_6', $node_master, \@recovery_params, "3000", $lsn3);
'standby_6', $node_primary, \@recovery_params, "3000", $lsn3);
my $node_standby = get_new_node('standby_7');
$node_standby->init_from_backup($node_master, 'my_backup',
$node_standby->init_from_backup($node_primary, 'my_backup',
has_restoring => 1);
$node_standby->append_conf(
'postgresql.conf', "recovery_target_name = '$recovery_name'
@ -151,7 +151,7 @@ ok($logfile =~ qr/multiple recovery targets specified/,
$node_standby = get_new_node('standby_8');
$node_standby->init_from_backup(
$node_master, 'my_backup',
$node_primary, 'my_backup',
has_restoring => 1,
standby => 0);
$node_standby->append_conf('postgresql.conf',

View File

@ -10,35 +10,35 @@ use Test::More tests => 2;
$ENV{PGDATABASE} = 'postgres';
# Initialize master node
my $node_master = get_new_node('master');
$node_master->init(allows_streaming => 1);
$node_master->start;
# Initialize primary node
my $node_primary = get_new_node('primary');
$node_primary->init(allows_streaming => 1);
$node_primary->start;
# Take backup
my $backup_name = 'my_backup';
$node_master->backup($backup_name);
$node_primary->backup($backup_name);
# Create two standbys linking to it
my $node_standby_1 = get_new_node('standby_1');
$node_standby_1->init_from_backup($node_master, $backup_name,
$node_standby_1->init_from_backup($node_primary, $backup_name,
has_streaming => 1);
$node_standby_1->start;
my $node_standby_2 = get_new_node('standby_2');
$node_standby_2->init_from_backup($node_master, $backup_name,
$node_standby_2->init_from_backup($node_primary, $backup_name,
has_streaming => 1);
$node_standby_2->start;
# Create some content on master
$node_master->safe_psql('postgres',
# Create some content on primary
$node_primary->safe_psql('postgres',
"CREATE TABLE tab_int AS SELECT generate_series(1,1000) AS a");
# Wait until standby has replayed enough data on standby 1
$node_master->wait_for_catchup($node_standby_1, 'replay',
$node_master->lsn('write'));
$node_primary->wait_for_catchup($node_standby_1, 'replay',
$node_primary->lsn('write'));
# Stop and remove master
$node_master->teardown_node;
# Stop and remove primary
$node_primary->teardown_node;
# promote standby 1 using "pg_promote", switching it to a new timeline
my $psql_out = '';

View File

@ -6,23 +6,23 @@ use PostgresNode;
use TestLib;
use Test::More tests => 1;
# Initialize master node
my $node_master = get_new_node('master');
$node_master->init(allows_streaming => 1);
$node_master->start;
# Initialize primary node
my $node_primary = get_new_node('primary');
$node_primary->init(allows_streaming => 1);
$node_primary->start;
# And some content
$node_master->safe_psql('postgres',
$node_primary->safe_psql('postgres',
"CREATE TABLE tab_int AS SELECT generate_series(1, 10) AS a");
# Take backup
my $backup_name = 'my_backup';
$node_master->backup($backup_name);
$node_primary->backup($backup_name);
# Create streaming standby from backup
my $node_standby = get_new_node('standby');
my $delay = 3;
$node_standby->init_from_backup($node_master, $backup_name,
$node_standby->init_from_backup($node_primary, $backup_name,
has_streaming => 1);
$node_standby->append_conf(
'postgresql.conf', qq(
@ -30,19 +30,19 @@ recovery_min_apply_delay = '${delay}s'
));
$node_standby->start;
# Make new content on master and check its presence in standby depending
# Make new content on primary and check its presence in standby depending
# on the delay applied above. Before doing the insertion, get the
# current timestamp that will be used as a comparison base. Even on slow
# machines, this allows to have a predictable behavior when comparing the
# delay between data insertion moment on master and replay time on standby.
my $master_insert_time = time();
$node_master->safe_psql('postgres',
# delay between data insertion moment on primary and replay time on standby.
my $primary_insert_time = time();
$node_primary->safe_psql('postgres',
"INSERT INTO tab_int VALUES (generate_series(11, 20))");
# Now wait for replay to complete on standby. We're done waiting when the
# standby has replayed up to the previously saved master LSN.
# standby has replayed up to the previously saved primary LSN.
my $until_lsn =
$node_master->safe_psql('postgres', "SELECT pg_current_wal_lsn()");
$node_primary->safe_psql('postgres', "SELECT pg_current_wal_lsn()");
$node_standby->poll_query_until('postgres',
"SELECT (pg_last_wal_replay_lsn() - '$until_lsn'::pg_lsn) >= 0")
@ -50,5 +50,5 @@ $node_standby->poll_query_until('postgres',
# This test is successful if and only if the LSN has been applied with at least
# the configured apply delay.
ok(time() - $master_insert_time >= $delay,
ok(time() - $primary_insert_time >= $delay,
"standby applies WAL only after replication delay");

View File

@ -10,25 +10,25 @@ use TestLib;
use Test::More tests => 14;
use Config;
# Initialize master node
my $node_master = get_new_node('master');
$node_master->init(allows_streaming => 1);
$node_master->append_conf(
# Initialize primary node
my $node_primary = get_new_node('primary');
$node_primary->init(allows_streaming => 1);
$node_primary->append_conf(
'postgresql.conf', qq(
wal_level = logical
));
$node_master->start;
my $backup_name = 'master_backup';
$node_primary->start;
my $backup_name = 'primary_backup';
$node_master->safe_psql('postgres',
$node_primary->safe_psql('postgres',
qq[CREATE TABLE decoding_test(x integer, y text);]);
$node_master->safe_psql('postgres',
$node_primary->safe_psql('postgres',
qq[SELECT pg_create_logical_replication_slot('test_slot', 'test_decoding');]
);
# Cover walsender error shutdown code
my ($result, $stdout, $stderr) = $node_master->psql(
my ($result, $stdout, $stderr) = $node_primary->psql(
'template1',
qq[START_REPLICATION SLOT test_slot LOGICAL 0/0],
replication => 'database');
@ -38,19 +38,19 @@ ok( $stderr =~
# Check case of walsender not using a database connection. Logical
# decoding should not be allowed.
($result, $stdout, $stderr) = $node_master->psql(
($result, $stdout, $stderr) = $node_primary->psql(
'template1',
qq[START_REPLICATION SLOT s1 LOGICAL 0/1],
replication => 'true');
ok($stderr =~ /ERROR: logical decoding requires a database connection/,
"Logical decoding fails on non-database connection");
$node_master->safe_psql('postgres',
$node_primary->safe_psql('postgres',
qq[INSERT INTO decoding_test(x,y) SELECT s, s::text FROM generate_series(1,10) s;]
);
# Basic decoding works
$result = $node_master->safe_psql('postgres',
$result = $node_primary->safe_psql('postgres',
qq[SELECT pg_logical_slot_get_changes('test_slot', NULL, NULL);]);
is(scalar(my @foobar = split /^/m, $result),
12, 'Decoding produced 12 rows inc BEGIN/COMMIT');
@ -58,17 +58,17 @@ is(scalar(my @foobar = split /^/m, $result),
# If we immediately crash the server we might lose the progress we just made
# and replay the same changes again. But a clean shutdown should never repeat
# the same changes when we use the SQL decoding interface.
$node_master->restart('fast');
$node_primary->restart('fast');
# There are no new writes, so the result should be empty.
$result = $node_master->safe_psql('postgres',
$result = $node_primary->safe_psql('postgres',
qq[SELECT pg_logical_slot_get_changes('test_slot', NULL, NULL);]);
chomp($result);
is($result, '', 'Decoding after fast restart repeats no rows');
# Insert some rows and verify that we get the same results from pg_recvlogical
# and the SQL interface.
$node_master->safe_psql('postgres',
$node_primary->safe_psql('postgres',
qq[INSERT INTO decoding_test(x,y) SELECT s, s::text FROM generate_series(1,4) s;]
);
@ -79,22 +79,22 @@ table public.decoding_test: INSERT: x[integer]:3 y[text]:'3'
table public.decoding_test: INSERT: x[integer]:4 y[text]:'4'
COMMIT};
my $stdout_sql = $node_master->safe_psql('postgres',
my $stdout_sql = $node_primary->safe_psql('postgres',
qq[SELECT data FROM pg_logical_slot_peek_changes('test_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1');]
);
is($stdout_sql, $expected, 'got expected output from SQL decoding session');
my $endpos = $node_master->safe_psql('postgres',
my $endpos = $node_primary->safe_psql('postgres',
"SELECT lsn FROM pg_logical_slot_peek_changes('test_slot', NULL, NULL) ORDER BY lsn DESC LIMIT 1;"
);
print "waiting to replay $endpos\n";
# Insert some rows after $endpos, which we won't read.
$node_master->safe_psql('postgres',
$node_primary->safe_psql('postgres',
qq[INSERT INTO decoding_test(x,y) SELECT s, s::text FROM generate_series(5,50) s;]
);
my $stdout_recv = $node_master->pg_recvlogical_upto(
my $stdout_recv = $node_primary->pg_recvlogical_upto(
'postgres', 'test_slot', $endpos, 180,
'include-xids' => '0',
'skip-empty-xacts' => '1');
@ -102,27 +102,27 @@ chomp($stdout_recv);
is($stdout_recv, $expected,
'got same expected output from pg_recvlogical decoding session');
$node_master->poll_query_until('postgres',
$node_primary->poll_query_until('postgres',
"SELECT EXISTS (SELECT 1 FROM pg_replication_slots WHERE slot_name = 'test_slot' AND active_pid IS NULL)"
) or die "slot never became inactive";
$stdout_recv = $node_master->pg_recvlogical_upto(
$stdout_recv = $node_primary->pg_recvlogical_upto(
'postgres', 'test_slot', $endpos, 180,
'include-xids' => '0',
'skip-empty-xacts' => '1');
chomp($stdout_recv);
is($stdout_recv, '', 'pg_recvlogical acknowledged changes');
$node_master->safe_psql('postgres', 'CREATE DATABASE otherdb');
$node_primary->safe_psql('postgres', 'CREATE DATABASE otherdb');
is( $node_master->psql(
is( $node_primary->psql(
'otherdb',
"SELECT lsn FROM pg_logical_slot_peek_changes('test_slot', NULL, NULL) ORDER BY lsn DESC LIMIT 1;"
),
3,
'replaying logical slot from another database fails');
$node_master->safe_psql('otherdb',
$node_primary->safe_psql('otherdb',
qq[SELECT pg_create_logical_replication_slot('otherdb_slot', 'test_decoding');]
);
@ -135,51 +135,51 @@ SKIP:
my $pg_recvlogical = IPC::Run::start(
[
'pg_recvlogical', '-d', $node_master->connstr('otherdb'),
'pg_recvlogical', '-d', $node_primary->connstr('otherdb'),
'-S', 'otherdb_slot', '-f', '-', '--start'
]);
$node_master->poll_query_until('otherdb',
$node_primary->poll_query_until('otherdb',
"SELECT EXISTS (SELECT 1 FROM pg_replication_slots WHERE slot_name = 'otherdb_slot' AND active_pid IS NOT NULL)"
) or die "slot never became active";
is($node_master->psql('postgres', 'DROP DATABASE otherdb'),
is($node_primary->psql('postgres', 'DROP DATABASE otherdb'),
3, 'dropping a DB with active logical slots fails');
$pg_recvlogical->kill_kill;
is($node_master->slot('otherdb_slot')->{'slot_name'},
is($node_primary->slot('otherdb_slot')->{'slot_name'},
undef, 'logical slot still exists');
}
$node_master->poll_query_until('otherdb',
$node_primary->poll_query_until('otherdb',
"SELECT EXISTS (SELECT 1 FROM pg_replication_slots WHERE slot_name = 'otherdb_slot' AND active_pid IS NULL)"
) or die "slot never became inactive";
is($node_master->psql('postgres', 'DROP DATABASE otherdb'),
is($node_primary->psql('postgres', 'DROP DATABASE otherdb'),
0, 'dropping a DB with inactive logical slots succeeds');
is($node_master->slot('otherdb_slot')->{'slot_name'},
is($node_primary->slot('otherdb_slot')->{'slot_name'},
undef, 'logical slot was actually dropped with DB');
# Test logical slot advancing and its durability.
my $logical_slot = 'logical_slot';
$node_master->safe_psql('postgres',
$node_primary->safe_psql('postgres',
"SELECT pg_create_logical_replication_slot('$logical_slot', 'test_decoding', false);"
);
$node_master->psql(
$node_primary->psql(
'postgres', "
CREATE TABLE tab_logical_slot (a int);
INSERT INTO tab_logical_slot VALUES (generate_series(1,10));");
my $current_lsn =
$node_master->safe_psql('postgres', "SELECT pg_current_wal_lsn();");
$node_primary->safe_psql('postgres', "SELECT pg_current_wal_lsn();");
chomp($current_lsn);
my $psql_rc = $node_master->psql('postgres',
my $psql_rc = $node_primary->psql('postgres',
"SELECT pg_replication_slot_advance('$logical_slot', '$current_lsn'::pg_lsn);"
);
is($psql_rc, '0', 'slot advancing with logical slot');
my $logical_restart_lsn_pre = $node_master->safe_psql('postgres',
my $logical_restart_lsn_pre = $node_primary->safe_psql('postgres',
"SELECT restart_lsn from pg_replication_slots WHERE slot_name = '$logical_slot';"
);
chomp($logical_restart_lsn_pre);
# Slot advance should persist across clean restarts.
$node_master->restart;
my $logical_restart_lsn_post = $node_master->safe_psql('postgres',
$node_primary->restart;
my $logical_restart_lsn_post = $node_primary->safe_psql('postgres',
"SELECT restart_lsn from pg_replication_slots WHERE slot_name = '$logical_slot';"
);
chomp($logical_restart_lsn_post);
@ -187,4 +187,4 @@ ok(($logical_restart_lsn_pre cmp $logical_restart_lsn_post) == 0,
"logical slot advance persists across restarts");
# done with the node
$node_master->stop;
$node_primary->stop;

View File

@ -32,53 +32,53 @@ sub test_sync_state
# until the standby is confirmed as registered.
sub start_standby_and_wait
{
my ($master, $standby) = @_;
my $master_name = $master->name;
my ($primary, $standby) = @_;
my $primary_name = $primary->name;
my $standby_name = $standby->name;
my $query =
"SELECT count(1) = 1 FROM pg_stat_replication WHERE application_name = '$standby_name'";
$standby->start;
print("### Waiting for standby \"$standby_name\" on \"$master_name\"\n");
$master->poll_query_until('postgres', $query);
print("### Waiting for standby \"$standby_name\" on \"$primary_name\"\n");
$primary->poll_query_until('postgres', $query);
return;
}
# Initialize master node
my $node_master = get_new_node('master');
$node_master->init(allows_streaming => 1);
$node_master->start;
my $backup_name = 'master_backup';
# Initialize primary node
my $node_primary = get_new_node('primary');
$node_primary->init(allows_streaming => 1);
$node_primary->start;
my $backup_name = 'primary_backup';
# Take backup
$node_master->backup($backup_name);
$node_primary->backup($backup_name);
# Create all the standbys. Their status on the primary is checked to ensure
# the ordering of each one of them in the WAL sender array of the primary.
# Create standby1 linking to master
# Create standby1 linking to primary
my $node_standby_1 = get_new_node('standby1');
$node_standby_1->init_from_backup($node_master, $backup_name,
$node_standby_1->init_from_backup($node_primary, $backup_name,
has_streaming => 1);
start_standby_and_wait($node_master, $node_standby_1);
start_standby_and_wait($node_primary, $node_standby_1);
# Create standby2 linking to master
# Create standby2 linking to primary
my $node_standby_2 = get_new_node('standby2');
$node_standby_2->init_from_backup($node_master, $backup_name,
$node_standby_2->init_from_backup($node_primary, $backup_name,
has_streaming => 1);
start_standby_and_wait($node_master, $node_standby_2);
start_standby_and_wait($node_primary, $node_standby_2);
# Create standby3 linking to master
# Create standby3 linking to primary
my $node_standby_3 = get_new_node('standby3');
$node_standby_3->init_from_backup($node_master, $backup_name,
$node_standby_3->init_from_backup($node_primary, $backup_name,
has_streaming => 1);
start_standby_and_wait($node_master, $node_standby_3);
start_standby_and_wait($node_primary, $node_standby_3);
# Check that sync_state is determined correctly when
# synchronous_standby_names is specified in old syntax.
test_sync_state(
$node_master, qq(standby1|1|sync
$node_primary, qq(standby1|1|sync
standby2|2|potential
standby3|0|async),
'old syntax of synchronous_standby_names',
@ -90,7 +90,7 @@ standby3|0|async),
# it's stored in the head of WalSnd array which manages
# all the standbys though they have the same priority.
test_sync_state(
$node_master, qq(standby1|1|sync
$node_primary, qq(standby1|1|sync
standby2|1|potential
standby3|1|potential),
'asterisk in synchronous_standby_names',
@ -105,23 +105,23 @@ $node_standby_3->stop;
# Make sure that each standby reports back to the primary in the wanted
# order.
start_standby_and_wait($node_master, $node_standby_2);
start_standby_and_wait($node_master, $node_standby_3);
start_standby_and_wait($node_primary, $node_standby_2);
start_standby_and_wait($node_primary, $node_standby_3);
# Specify 2 as the number of sync standbys.
# Check that two standbys are in 'sync' state.
test_sync_state(
$node_master, qq(standby2|2|sync
$node_primary, qq(standby2|2|sync
standby3|3|sync),
'2 synchronous standbys',
'2(standby1,standby2,standby3)');
# Start standby1
start_standby_and_wait($node_master, $node_standby_1);
start_standby_and_wait($node_primary, $node_standby_1);
# Create standby4 linking to master
# Create standby4 linking to primary
my $node_standby_4 = get_new_node('standby4');
$node_standby_4->init_from_backup($node_master, $backup_name,
$node_standby_4->init_from_backup($node_primary, $backup_name,
has_streaming => 1);
$node_standby_4->start;
@ -130,7 +130,7 @@ $node_standby_4->start;
# standby3 appearing later represents potential, and standby4 is
# in 'async' state because it's not in the list.
test_sync_state(
$node_master, qq(standby1|1|sync
$node_primary, qq(standby1|1|sync
standby2|2|sync
standby3|3|potential
standby4|0|async),
@ -140,7 +140,7 @@ standby4|0|async),
# when num_sync exceeds the number of names of potential sync standbys
# specified in synchronous_standby_names.
test_sync_state(
$node_master, qq(standby1|0|async
$node_primary, qq(standby1|0|async
standby2|4|sync
standby3|3|sync
standby4|1|sync),
@ -154,7 +154,7 @@ standby4|1|sync),
# second standby listed first in the WAL sender array, which is
# standby2 in this case.
test_sync_state(
$node_master, qq(standby1|1|sync
$node_primary, qq(standby1|1|sync
standby2|2|sync
standby3|2|potential
standby4|2|potential),
@ -164,7 +164,7 @@ standby4|2|potential),
# Check that the setting of '2(*)' chooses standby2 and standby3 that are stored
# earlier in WalSnd array as sync standbys.
test_sync_state(
$node_master, qq(standby1|1|potential
$node_primary, qq(standby1|1|potential
standby2|1|sync
standby3|1|sync
standby4|1|potential),
@ -177,7 +177,7 @@ $node_standby_3->stop;
# Check that the state of standby1 stored earlier in WalSnd array than
# standby4 is transited from potential to sync.
test_sync_state(
$node_master, qq(standby1|1|sync
$node_primary, qq(standby1|1|sync
standby2|1|sync
standby4|1|potential),
'potential standby found earlier in array is promoted to sync');
@ -185,7 +185,7 @@ standby4|1|potential),
# Check that standby1 and standby2 are chosen as sync standbys
# based on their priorities.
test_sync_state(
$node_master, qq(standby1|1|sync
$node_primary, qq(standby1|1|sync
standby2|2|sync
standby4|0|async),
'priority-based sync replication specified by FIRST keyword',
@ -194,7 +194,7 @@ standby4|0|async),
# Check that all the listed standbys are considered as candidates
# for sync standbys in a quorum-based sync replication.
test_sync_state(
$node_master, qq(standby1|1|quorum
$node_primary, qq(standby1|1|quorum
standby2|1|quorum
standby4|0|async),
'2 quorum and 1 async',
@ -206,7 +206,7 @@ $node_standby_3->start;
# Check that the setting of 'ANY 2(*)' chooses all standbys as
# candidates for quorum sync standbys.
test_sync_state(
$node_master, qq(standby1|1|quorum
$node_primary, qq(standby1|1|quorum
standby2|1|quorum
standby3|1|quorum
standby4|1|quorum),

View File

@ -9,10 +9,10 @@ use PostgresNode;
use TestLib;
use Test::More tests => 1;
my $node_master = get_new_node('master');
$node_master->init(allows_streaming => 1);
my $node_primary = get_new_node('primary');
$node_primary->init(allows_streaming => 1);
$node_master->append_conf(
$node_primary->append_conf(
'postgresql.conf', qq{
fsync = on
wal_log_hints = on
@ -20,17 +20,17 @@ max_prepared_transactions = 5
autovacuum = off
});
# Create a master node and its standby, initializing both with some data
# Create a primary node and its standby, initializing both with some data
# at the same time.
$node_master->start;
$node_primary->start;
$node_master->backup('master_backup');
$node_primary->backup('primary_backup');
my $node_standby = get_new_node('standby');
$node_standby->init_from_backup($node_master, 'master_backup',
$node_standby->init_from_backup($node_primary, 'primary_backup',
has_streaming => 1);
$node_standby->start;
$node_master->psql(
$node_primary->psql(
'postgres', qq{
create table testtab (a int, b char(100));
insert into testtab select generate_series(1,1000), 'foo';
@ -39,7 +39,7 @@ delete from testtab where ctid > '(8,0)';
});
# Take a lock on the table to prevent following vacuum from truncating it
$node_master->psql(
$node_primary->psql(
'postgres', qq{
begin;
lock table testtab in row share mode;
@ -47,14 +47,14 @@ prepare transaction 'p1';
});
# Vacuum, update FSM without truncation
$node_master->psql('postgres', 'vacuum verbose testtab');
$node_primary->psql('postgres', 'vacuum verbose testtab');
# Force a checkpoint
$node_master->psql('postgres', 'checkpoint');
$node_primary->psql('postgres', 'checkpoint');
# Now do some more insert/deletes, another vacuum to ensure full-page writes
# are done
$node_master->psql(
$node_primary->psql(
'postgres', qq{
insert into testtab select generate_series(1,1000), 'foo';
delete from testtab where ctid > '(8,0)';
@ -65,15 +65,15 @@ vacuum verbose testtab;
$node_standby->psql('postgres', 'checkpoint');
# Release the lock, vacuum again which should lead to truncation
$node_master->psql(
$node_primary->psql(
'postgres', qq{
rollback prepared 'p1';
vacuum verbose testtab;
});
$node_master->psql('postgres', 'checkpoint');
$node_primary->psql('postgres', 'checkpoint');
my $until_lsn =
$node_master->safe_psql('postgres', "SELECT pg_current_wal_lsn();");
$node_primary->safe_psql('postgres', "SELECT pg_current_wal_lsn();");
# Wait long enough for standby to receive and apply all WAL
my $caughtup_query =

View File

@ -23,7 +23,7 @@ sub configure_and_reload
return;
}
# Set up two nodes, which will alternately be master and replication standby.
# Set up two nodes, which will alternately be primary and replication standby.
# Setup london node
my $node_london = get_new_node("london");
@ -46,13 +46,13 @@ $node_paris->start;
configure_and_reload($node_london, "synchronous_standby_names = 'paris'");
configure_and_reload($node_paris, "synchronous_standby_names = 'london'");
# Set up nonce names for current master and standby nodes
note "Initially, london is master and paris is standby";
my ($cur_master, $cur_standby) = ($node_london, $node_paris);
my $cur_master_name = $cur_master->name;
# Set up nonce names for current primary and standby nodes
note "Initially, london is primary and paris is standby";
my ($cur_primary, $cur_standby) = ($node_london, $node_paris);
my $cur_primary_name = $cur_primary->name;
# Create table we'll use in the test transactions
$cur_master->psql('postgres', "CREATE TABLE t_009_tbl (id int, msg text)");
$cur_primary->psql('postgres', "CREATE TABLE t_009_tbl (id int, msg text)");
###############################################################################
# Check that we can commit and abort transaction after soft restart.
@ -61,25 +61,25 @@ $cur_master->psql('postgres', "CREATE TABLE t_009_tbl (id int, msg text)");
# files.
###############################################################################
$cur_master->psql(
$cur_primary->psql(
'postgres', "
BEGIN;
INSERT INTO t_009_tbl VALUES (1, 'issued to ${cur_master_name}');
INSERT INTO t_009_tbl VALUES (1, 'issued to ${cur_primary_name}');
SAVEPOINT s1;
INSERT INTO t_009_tbl VALUES (2, 'issued to ${cur_master_name}');
INSERT INTO t_009_tbl VALUES (2, 'issued to ${cur_primary_name}');
PREPARE TRANSACTION 'xact_009_1';
BEGIN;
INSERT INTO t_009_tbl VALUES (3, 'issued to ${cur_master_name}');
INSERT INTO t_009_tbl VALUES (3, 'issued to ${cur_primary_name}');
SAVEPOINT s1;
INSERT INTO t_009_tbl VALUES (4, 'issued to ${cur_master_name}');
INSERT INTO t_009_tbl VALUES (4, 'issued to ${cur_primary_name}');
PREPARE TRANSACTION 'xact_009_2';");
$cur_master->stop;
$cur_master->start;
$cur_primary->stop;
$cur_primary->start;
$psql_rc = $cur_master->psql('postgres', "COMMIT PREPARED 'xact_009_1'");
$psql_rc = $cur_primary->psql('postgres', "COMMIT PREPARED 'xact_009_1'");
is($psql_rc, '0', 'Commit prepared transaction after restart');
$psql_rc = $cur_master->psql('postgres', "ROLLBACK PREPARED 'xact_009_2'");
$psql_rc = $cur_primary->psql('postgres', "ROLLBACK PREPARED 'xact_009_2'");
is($psql_rc, '0', 'Rollback prepared transaction after restart');
###############################################################################
@ -88,50 +88,50 @@ is($psql_rc, '0', 'Rollback prepared transaction after restart');
# transaction using dedicated WAL records.
###############################################################################
$cur_master->psql(
$cur_primary->psql(
'postgres', "
CHECKPOINT;
BEGIN;
INSERT INTO t_009_tbl VALUES (5, 'issued to ${cur_master_name}');
INSERT INTO t_009_tbl VALUES (5, 'issued to ${cur_primary_name}');
SAVEPOINT s1;
INSERT INTO t_009_tbl VALUES (6, 'issued to ${cur_master_name}');
INSERT INTO t_009_tbl VALUES (6, 'issued to ${cur_primary_name}');
PREPARE TRANSACTION 'xact_009_3';
BEGIN;
INSERT INTO t_009_tbl VALUES (7, 'issued to ${cur_master_name}');
INSERT INTO t_009_tbl VALUES (7, 'issued to ${cur_primary_name}');
SAVEPOINT s1;
INSERT INTO t_009_tbl VALUES (8, 'issued to ${cur_master_name}');
INSERT INTO t_009_tbl VALUES (8, 'issued to ${cur_primary_name}');
PREPARE TRANSACTION 'xact_009_4';");
$cur_master->teardown_node;
$cur_master->start;
$cur_primary->teardown_node;
$cur_primary->start;
$psql_rc = $cur_master->psql('postgres', "COMMIT PREPARED 'xact_009_3'");
$psql_rc = $cur_primary->psql('postgres', "COMMIT PREPARED 'xact_009_3'");
is($psql_rc, '0', 'Commit prepared transaction after teardown');
$psql_rc = $cur_master->psql('postgres', "ROLLBACK PREPARED 'xact_009_4'");
$psql_rc = $cur_primary->psql('postgres', "ROLLBACK PREPARED 'xact_009_4'");
is($psql_rc, '0', 'Rollback prepared transaction after teardown');
###############################################################################
# Check that WAL replay can handle several transactions with same GID name.
###############################################################################
$cur_master->psql(
$cur_primary->psql(
'postgres', "
CHECKPOINT;
BEGIN;
INSERT INTO t_009_tbl VALUES (9, 'issued to ${cur_master_name}');
INSERT INTO t_009_tbl VALUES (9, 'issued to ${cur_primary_name}');
SAVEPOINT s1;
INSERT INTO t_009_tbl VALUES (10, 'issued to ${cur_master_name}');
INSERT INTO t_009_tbl VALUES (10, 'issued to ${cur_primary_name}');
PREPARE TRANSACTION 'xact_009_5';
COMMIT PREPARED 'xact_009_5';
BEGIN;
INSERT INTO t_009_tbl VALUES (11, 'issued to ${cur_master_name}');
INSERT INTO t_009_tbl VALUES (11, 'issued to ${cur_primary_name}');
SAVEPOINT s1;
INSERT INTO t_009_tbl VALUES (12, 'issued to ${cur_master_name}');
INSERT INTO t_009_tbl VALUES (12, 'issued to ${cur_primary_name}');
PREPARE TRANSACTION 'xact_009_5';");
$cur_master->teardown_node;
$cur_master->start;
$cur_primary->teardown_node;
$cur_primary->start;
$psql_rc = $cur_master->psql('postgres', "COMMIT PREPARED 'xact_009_5'");
$psql_rc = $cur_primary->psql('postgres', "COMMIT PREPARED 'xact_009_5'");
is($psql_rc, '0', 'Replay several transactions with same GID');
###############################################################################
@ -139,39 +139,39 @@ is($psql_rc, '0', 'Replay several transactions with same GID');
# while replaying transaction commits.
###############################################################################
$cur_master->psql(
$cur_primary->psql(
'postgres', "
BEGIN;
INSERT INTO t_009_tbl VALUES (13, 'issued to ${cur_master_name}');
INSERT INTO t_009_tbl VALUES (13, 'issued to ${cur_primary_name}');
SAVEPOINT s1;
INSERT INTO t_009_tbl VALUES (14, 'issued to ${cur_master_name}');
INSERT INTO t_009_tbl VALUES (14, 'issued to ${cur_primary_name}');
PREPARE TRANSACTION 'xact_009_6';
COMMIT PREPARED 'xact_009_6';");
$cur_master->teardown_node;
$cur_master->start;
$psql_rc = $cur_master->psql(
$cur_primary->teardown_node;
$cur_primary->start;
$psql_rc = $cur_primary->psql(
'postgres', "
BEGIN;
INSERT INTO t_009_tbl VALUES (15, 'issued to ${cur_master_name}');
INSERT INTO t_009_tbl VALUES (15, 'issued to ${cur_primary_name}');
SAVEPOINT s1;
INSERT INTO t_009_tbl VALUES (16, 'issued to ${cur_master_name}');
INSERT INTO t_009_tbl VALUES (16, 'issued to ${cur_primary_name}');
-- This prepare can fail due to conflicting GID or locks conflicts if
-- replay did not fully cleanup its state on previous commit.
PREPARE TRANSACTION 'xact_009_7';");
is($psql_rc, '0', "Cleanup of shared memory state for 2PC commit");
$cur_master->psql('postgres', "COMMIT PREPARED 'xact_009_7'");
$cur_primary->psql('postgres', "COMMIT PREPARED 'xact_009_7'");
###############################################################################
# Check that WAL replay will cleanup its shared memory state on running standby.
###############################################################################
$cur_master->psql(
$cur_primary->psql(
'postgres', "
BEGIN;
INSERT INTO t_009_tbl VALUES (17, 'issued to ${cur_master_name}');
INSERT INTO t_009_tbl VALUES (17, 'issued to ${cur_primary_name}');
SAVEPOINT s1;
INSERT INTO t_009_tbl VALUES (18, 'issued to ${cur_master_name}');
INSERT INTO t_009_tbl VALUES (18, 'issued to ${cur_primary_name}');
PREPARE TRANSACTION 'xact_009_8';
COMMIT PREPARED 'xact_009_8';");
$cur_standby->psql(
@ -186,15 +186,15 @@ is($psql_out, '0',
# prepare and commit to use on-disk twophase files.
###############################################################################
$cur_master->psql(
$cur_primary->psql(
'postgres', "
BEGIN;
INSERT INTO t_009_tbl VALUES (19, 'issued to ${cur_master_name}');
INSERT INTO t_009_tbl VALUES (19, 'issued to ${cur_primary_name}');
SAVEPOINT s1;
INSERT INTO t_009_tbl VALUES (20, 'issued to ${cur_master_name}');
INSERT INTO t_009_tbl VALUES (20, 'issued to ${cur_primary_name}');
PREPARE TRANSACTION 'xact_009_9';");
$cur_standby->psql('postgres', "CHECKPOINT");
$cur_master->psql('postgres', "COMMIT PREPARED 'xact_009_9'");
$cur_primary->psql('postgres', "COMMIT PREPARED 'xact_009_9'");
$cur_standby->psql(
'postgres',
"SELECT count(*) FROM pg_prepared_xacts",
@ -206,114 +206,114 @@ is($psql_out, '0',
# Check that prepared transactions can be committed on promoted standby.
###############################################################################
$cur_master->psql(
$cur_primary->psql(
'postgres', "
BEGIN;
INSERT INTO t_009_tbl VALUES (21, 'issued to ${cur_master_name}');
INSERT INTO t_009_tbl VALUES (21, 'issued to ${cur_primary_name}');
SAVEPOINT s1;
INSERT INTO t_009_tbl VALUES (22, 'issued to ${cur_master_name}');
INSERT INTO t_009_tbl VALUES (22, 'issued to ${cur_primary_name}');
PREPARE TRANSACTION 'xact_009_10';");
$cur_master->teardown_node;
$cur_primary->teardown_node;
$cur_standby->promote;
# change roles
note "Now paris is master and london is standby";
($cur_master, $cur_standby) = ($node_paris, $node_london);
$cur_master_name = $cur_master->name;
note "Now paris is primary and london is standby";
($cur_primary, $cur_standby) = ($node_paris, $node_london);
$cur_primary_name = $cur_primary->name;
# because london is not running at this point, we can't use syncrep commit
# on this command
$psql_rc = $cur_master->psql('postgres',
$psql_rc = $cur_primary->psql('postgres',
"SET synchronous_commit = off; COMMIT PREPARED 'xact_009_10'");
is($psql_rc, '0', "Restore of prepared transaction on promoted standby");
# restart old master as new standby
$cur_standby->enable_streaming($cur_master);
# restart old primary as new standby
$cur_standby->enable_streaming($cur_primary);
$cur_standby->start;
###############################################################################
# Check that prepared transactions are replayed after soft restart of standby
# while master is down. Since standby knows that master is down it uses a
# while primary is down. Since standby knows that primary is down it uses a
# different code path on startup to ensure that the status of transactions is
# consistent.
###############################################################################
$cur_master->psql(
$cur_primary->psql(
'postgres', "
BEGIN;
INSERT INTO t_009_tbl VALUES (23, 'issued to ${cur_master_name}');
INSERT INTO t_009_tbl VALUES (23, 'issued to ${cur_primary_name}');
SAVEPOINT s1;
INSERT INTO t_009_tbl VALUES (24, 'issued to ${cur_master_name}');
INSERT INTO t_009_tbl VALUES (24, 'issued to ${cur_primary_name}');
PREPARE TRANSACTION 'xact_009_11';");
$cur_master->stop;
$cur_primary->stop;
$cur_standby->restart;
$cur_standby->promote;
# change roles
note "Now london is master and paris is standby";
($cur_master, $cur_standby) = ($node_london, $node_paris);
$cur_master_name = $cur_master->name;
note "Now london is primary and paris is standby";
($cur_primary, $cur_standby) = ($node_london, $node_paris);
$cur_primary_name = $cur_primary->name;
$cur_master->psql(
$cur_primary->psql(
'postgres',
"SELECT count(*) FROM pg_prepared_xacts",
stdout => \$psql_out);
is($psql_out, '1',
"Restore prepared transactions from files with master down");
"Restore prepared transactions from files with primary down");
# restart old master as new standby
$cur_standby->enable_streaming($cur_master);
# restart old primary as new standby
$cur_standby->enable_streaming($cur_primary);
$cur_standby->start;
$cur_master->psql('postgres', "COMMIT PREPARED 'xact_009_11'");
$cur_primary->psql('postgres', "COMMIT PREPARED 'xact_009_11'");
###############################################################################
# Check that prepared transactions are correctly replayed after standby hard
# restart while master is down.
# restart while primary is down.
###############################################################################
$cur_master->psql(
$cur_primary->psql(
'postgres', "
BEGIN;
INSERT INTO t_009_tbl VALUES (25, 'issued to ${cur_master_name}');
INSERT INTO t_009_tbl VALUES (25, 'issued to ${cur_primary_name}');
SAVEPOINT s1;
INSERT INTO t_009_tbl VALUES (26, 'issued to ${cur_master_name}');
INSERT INTO t_009_tbl VALUES (26, 'issued to ${cur_primary_name}');
PREPARE TRANSACTION 'xact_009_12';
");
$cur_master->stop;
$cur_primary->stop;
$cur_standby->teardown_node;
$cur_standby->start;
$cur_standby->promote;
# change roles
note "Now paris is master and london is standby";
($cur_master, $cur_standby) = ($node_paris, $node_london);
$cur_master_name = $cur_master->name;
note "Now paris is primary and london is standby";
($cur_primary, $cur_standby) = ($node_paris, $node_london);
$cur_primary_name = $cur_primary->name;
$cur_master->psql(
$cur_primary->psql(
'postgres',
"SELECT count(*) FROM pg_prepared_xacts",
stdout => \$psql_out);
is($psql_out, '1',
"Restore prepared transactions from records with master down");
"Restore prepared transactions from records with primary down");
# restart old master as new standby
$cur_standby->enable_streaming($cur_master);
# restart old primary as new standby
$cur_standby->enable_streaming($cur_primary);
$cur_standby->start;
$cur_master->psql('postgres', "COMMIT PREPARED 'xact_009_12'");
$cur_primary->psql('postgres', "COMMIT PREPARED 'xact_009_12'");
###############################################################################
# Check for a lock conflict between prepared transaction with DDL inside and
# replay of XLOG_STANDBY_LOCK wal record.
###############################################################################
$cur_master->psql(
$cur_primary->psql(
'postgres', "
BEGIN;
CREATE TABLE t_009_tbl2 (id int, msg text);
SAVEPOINT s1;
INSERT INTO t_009_tbl2 VALUES (27, 'issued to ${cur_master_name}');
INSERT INTO t_009_tbl2 VALUES (27, 'issued to ${cur_primary_name}');
PREPARE TRANSACTION 'xact_009_13';
-- checkpoint will issue XLOG_STANDBY_LOCK that can conflict with lock
-- held by 'create table' statement
@ -321,10 +321,10 @@ $cur_master->psql(
COMMIT PREPARED 'xact_009_13';");
# Ensure that last transaction is replayed on standby.
my $cur_master_lsn =
$cur_master->safe_psql('postgres', "SELECT pg_current_wal_lsn()");
my $cur_primary_lsn =
$cur_primary->safe_psql('postgres', "SELECT pg_current_wal_lsn()");
my $caughtup_query =
"SELECT '$cur_master_lsn'::pg_lsn <= pg_last_wal_replay_lsn()";
"SELECT '$cur_primary_lsn'::pg_lsn <= pg_last_wal_replay_lsn()";
$cur_standby->poll_query_until('postgres', $caughtup_query)
or die "Timed out while waiting for standby to catch up";
@ -336,69 +336,69 @@ is($psql_out, '1', "Replay prepared transaction with DDL");
###############################################################################
# Check recovery of prepared transaction with DDL inside after a hard restart
# of the master.
# of the primary.
###############################################################################
$cur_master->psql(
$cur_primary->psql(
'postgres', "
BEGIN;
CREATE TABLE t_009_tbl3 (id int, msg text);
SAVEPOINT s1;
INSERT INTO t_009_tbl3 VALUES (28, 'issued to ${cur_master_name}');
INSERT INTO t_009_tbl3 VALUES (28, 'issued to ${cur_primary_name}');
PREPARE TRANSACTION 'xact_009_14';
BEGIN;
CREATE TABLE t_009_tbl4 (id int, msg text);
SAVEPOINT s1;
INSERT INTO t_009_tbl4 VALUES (29, 'issued to ${cur_master_name}');
INSERT INTO t_009_tbl4 VALUES (29, 'issued to ${cur_primary_name}');
PREPARE TRANSACTION 'xact_009_15';");
$cur_master->teardown_node;
$cur_master->start;
$cur_primary->teardown_node;
$cur_primary->start;
$psql_rc = $cur_master->psql('postgres', "COMMIT PREPARED 'xact_009_14'");
$psql_rc = $cur_primary->psql('postgres', "COMMIT PREPARED 'xact_009_14'");
is($psql_rc, '0', 'Commit prepared transaction after teardown');
$psql_rc = $cur_master->psql('postgres', "ROLLBACK PREPARED 'xact_009_15'");
$psql_rc = $cur_primary->psql('postgres', "ROLLBACK PREPARED 'xact_009_15'");
is($psql_rc, '0', 'Rollback prepared transaction after teardown');
###############################################################################
# Check recovery of prepared transaction with DDL inside after a soft restart
# of the master.
# of the primary.
###############################################################################
$cur_master->psql(
$cur_primary->psql(
'postgres', "
BEGIN;
CREATE TABLE t_009_tbl5 (id int, msg text);
SAVEPOINT s1;
INSERT INTO t_009_tbl5 VALUES (30, 'issued to ${cur_master_name}');
INSERT INTO t_009_tbl5 VALUES (30, 'issued to ${cur_primary_name}');
PREPARE TRANSACTION 'xact_009_16';
BEGIN;
CREATE TABLE t_009_tbl6 (id int, msg text);
SAVEPOINT s1;
INSERT INTO t_009_tbl6 VALUES (31, 'issued to ${cur_master_name}');
INSERT INTO t_009_tbl6 VALUES (31, 'issued to ${cur_primary_name}');
PREPARE TRANSACTION 'xact_009_17';");
$cur_master->stop;
$cur_master->start;
$cur_primary->stop;
$cur_primary->start;
$psql_rc = $cur_master->psql('postgres', "COMMIT PREPARED 'xact_009_16'");
$psql_rc = $cur_primary->psql('postgres', "COMMIT PREPARED 'xact_009_16'");
is($psql_rc, '0', 'Commit prepared transaction after restart');
$psql_rc = $cur_master->psql('postgres', "ROLLBACK PREPARED 'xact_009_17'");
$psql_rc = $cur_primary->psql('postgres', "ROLLBACK PREPARED 'xact_009_17'");
is($psql_rc, '0', 'Rollback prepared transaction after restart');
###############################################################################
# Verify expected data appears on both servers.
###############################################################################
$cur_master->psql(
$cur_primary->psql(
'postgres',
"SELECT count(*) FROM pg_prepared_xacts",
stdout => \$psql_out);
is($psql_out, '0', "No uncommitted prepared transactions on master");
is($psql_out, '0', "No uncommitted prepared transactions on primary");
$cur_master->psql(
$cur_primary->psql(
'postgres',
"SELECT * FROM t_009_tbl ORDER BY id",
stdout => \$psql_out);
@ -424,15 +424,15 @@ is( $psql_out, qq{1|issued to london
24|issued to paris
25|issued to london
26|issued to london},
"Check expected t_009_tbl data on master");
"Check expected t_009_tbl data on primary");
$cur_master->psql(
$cur_primary->psql(
'postgres',
"SELECT * FROM t_009_tbl2",
stdout => \$psql_out);
is( $psql_out,
qq{27|issued to paris},
"Check expected t_009_tbl2 data on master");
"Check expected t_009_tbl2 data on primary");
$cur_standby->psql(
'postgres',

View File

@ -30,10 +30,10 @@ use Scalar::Util qw(blessed);
my ($stdout, $stderr, $ret);
# Initialize master node
my $node_master = get_new_node('master');
$node_master->init(allows_streaming => 1, has_archiving => 1);
$node_master->append_conf(
# Initialize primary node
my $node_primary = get_new_node('primary');
$node_primary->init(allows_streaming => 1, has_archiving => 1);
$node_primary->append_conf(
'postgresql.conf', q[
wal_level = 'logical'
max_replication_slots = 3
@ -42,38 +42,38 @@ log_min_messages = 'debug2'
hot_standby_feedback = on
wal_receiver_status_interval = 1
]);
$node_master->dump_info;
$node_master->start;
$node_primary->dump_info;
$node_primary->start;
note "testing logical timeline following with a filesystem-level copy";
$node_master->safe_psql('postgres',
$node_primary->safe_psql('postgres',
"SELECT pg_create_logical_replication_slot('before_basebackup', 'test_decoding');"
);
$node_master->safe_psql('postgres', "CREATE TABLE decoding(blah text);");
$node_master->safe_psql('postgres',
$node_primary->safe_psql('postgres', "CREATE TABLE decoding(blah text);");
$node_primary->safe_psql('postgres',
"INSERT INTO decoding(blah) VALUES ('beforebb');");
# We also want to verify that DROP DATABASE on a standby with a logical
# slot works. This isn't strictly related to timeline following, but
# the only way to get a logical slot on a standby right now is to use
# the same physical copy trick, so:
$node_master->safe_psql('postgres', 'CREATE DATABASE dropme;');
$node_master->safe_psql('dropme',
$node_primary->safe_psql('postgres', 'CREATE DATABASE dropme;');
$node_primary->safe_psql('dropme',
"SELECT pg_create_logical_replication_slot('dropme_slot', 'test_decoding');"
);
$node_master->safe_psql('postgres', 'CHECKPOINT;');
$node_primary->safe_psql('postgres', 'CHECKPOINT;');
my $backup_name = 'b1';
$node_master->backup_fs_hot($backup_name);
$node_primary->backup_fs_hot($backup_name);
$node_master->safe_psql('postgres',
$node_primary->safe_psql('postgres',
q[SELECT pg_create_physical_replication_slot('phys_slot');]);
my $node_replica = get_new_node('replica');
$node_replica->init_from_backup(
$node_master, $backup_name,
$node_primary, $backup_name,
has_streaming => 1,
has_restoring => 1);
$node_replica->append_conf('postgresql.conf',
@ -81,26 +81,26 @@ $node_replica->append_conf('postgresql.conf',
$node_replica->start;
# If we drop 'dropme' on the master, the standby should drop the
# If we drop 'dropme' on the primary, the standby should drop the
# db and associated slot.
is($node_master->psql('postgres', 'DROP DATABASE dropme'),
0, 'dropped DB with logical slot OK on master');
$node_master->wait_for_catchup($node_replica, 'replay',
$node_master->lsn('insert'));
is($node_primary->psql('postgres', 'DROP DATABASE dropme'),
0, 'dropped DB with logical slot OK on primary');
$node_primary->wait_for_catchup($node_replica, 'replay',
$node_primary->lsn('insert'));
is( $node_replica->safe_psql(
'postgres', q[SELECT 1 FROM pg_database WHERE datname = 'dropme']),
'',
'dropped DB dropme on standby');
is($node_master->slot('dropme_slot')->{'slot_name'},
is($node_primary->slot('dropme_slot')->{'slot_name'},
undef, 'logical slot was actually dropped on standby');
# Back to testing failover...
$node_master->safe_psql('postgres',
$node_primary->safe_psql('postgres',
"SELECT pg_create_logical_replication_slot('after_basebackup', 'test_decoding');"
);
$node_master->safe_psql('postgres',
$node_primary->safe_psql('postgres',
"INSERT INTO decoding(blah) VALUES ('afterbb');");
$node_master->safe_psql('postgres', 'CHECKPOINT;');
$node_primary->safe_psql('postgres', 'CHECKPOINT;');
# Verify that only the before base_backup slot is on the replica
$stdout = $node_replica->safe_psql('postgres',
@ -109,20 +109,20 @@ is($stdout, 'before_basebackup',
'Expected to find only slot before_basebackup on replica');
# Examine the physical slot the replica uses to stream changes
# from the master to make sure its hot_standby_feedback
# from the primary to make sure its hot_standby_feedback
# has locked in a catalog_xmin on the physical slot, and that
# any xmin is < the catalog_xmin
$node_master->poll_query_until(
$node_primary->poll_query_until(
'postgres', q[
SELECT catalog_xmin IS NOT NULL
FROM pg_replication_slots
WHERE slot_name = 'phys_slot'
]) or die "slot's catalog_xmin never became set";
my $phys_slot = $node_master->slot('phys_slot');
isnt($phys_slot->{'xmin'}, '', 'xmin assigned on physical slot of master');
my $phys_slot = $node_primary->slot('phys_slot');
isnt($phys_slot->{'xmin'}, '', 'xmin assigned on physical slot of primary');
isnt($phys_slot->{'catalog_xmin'},
'', 'catalog_xmin assigned on physical slot of master');
'', 'catalog_xmin assigned on physical slot of primary');
# Ignore wrap-around here, we're on a new cluster:
cmp_ok(
@ -130,11 +130,11 @@ cmp_ok(
$phys_slot->{'catalog_xmin'},
'xmin on physical slot must not be lower than catalog_xmin');
$node_master->safe_psql('postgres', 'CHECKPOINT');
$node_master->wait_for_catchup($node_replica, 'write');
$node_primary->safe_psql('postgres', 'CHECKPOINT');
$node_primary->wait_for_catchup($node_replica, 'write');
# Boom, crash
$node_master->stop('immediate');
$node_primary->stop('immediate');
$node_replica->promote;

View File

@ -18,7 +18,7 @@ else
plan tests => 3;
}
my $node = get_new_node('master');
my $node = get_new_node('primary');
$node->init(allows_streaming => 1);
$node->start;

View File

@ -6,30 +6,30 @@ use PostgresNode;
use TestLib;
use Test::More tests => 12;
# Setup master node
my $node_master = get_new_node("master");
$node_master->init(allows_streaming => 1);
$node_master->append_conf(
# Setup primary node
my $node_primary = get_new_node("primary");
$node_primary->init(allows_streaming => 1);
$node_primary->append_conf(
'postgresql.conf', qq(
max_prepared_transactions = 10
log_checkpoints = true
));
$node_master->start;
$node_master->backup('master_backup');
$node_master->psql('postgres', "CREATE TABLE t_012_tbl (id int)");
$node_primary->start;
$node_primary->backup('primary_backup');
$node_primary->psql('postgres', "CREATE TABLE t_012_tbl (id int)");
# Setup standby node
my $node_standby = get_new_node('standby');
$node_standby->init_from_backup($node_master, 'master_backup',
$node_standby->init_from_backup($node_primary, 'primary_backup',
has_streaming => 1);
$node_standby->start;
# Switch to synchronous replication
$node_master->append_conf(
$node_primary->append_conf(
'postgresql.conf', qq(
synchronous_standby_names = '*'
));
$node_master->psql('postgres', "SELECT pg_reload_conf()");
$node_primary->psql('postgres', "SELECT pg_reload_conf()");
my $psql_out = '';
my $psql_rc = '';
@ -39,7 +39,7 @@ my $psql_rc = '';
# so that it won't conflict with savepoint xids.
###############################################################################
$node_master->psql(
$node_primary->psql(
'postgres', "
BEGIN;
DELETE FROM t_012_tbl;
@ -57,9 +57,9 @@ $node_master->psql(
PREPARE TRANSACTION 'xact_012_1';
CHECKPOINT;");
$node_master->stop;
$node_master->start;
$node_master->psql(
$node_primary->stop;
$node_primary->start;
$node_primary->psql(
'postgres', "
-- here we can get xid of previous savepoint if nextXid
-- wasn't properly advanced
@ -68,7 +68,7 @@ $node_master->psql(
ROLLBACK;
COMMIT PREPARED 'xact_012_1';");
$node_master->psql(
$node_primary->psql(
'postgres',
"SELECT count(*) FROM t_012_tbl",
stdout => \$psql_out);
@ -79,10 +79,10 @@ is($psql_out, '6', "Check nextXid handling for prepared subtransactions");
# PGPROC_MAX_CACHED_SUBXIDS subtransactions and also show data properly
# on promotion
###############################################################################
$node_master->psql('postgres', "DELETE FROM t_012_tbl");
$node_primary->psql('postgres', "DELETE FROM t_012_tbl");
# Function borrowed from src/test/regress/sql/hs_primary_extremes.sql
$node_master->psql(
$node_primary->psql(
'postgres', "
CREATE OR REPLACE FUNCTION hs_subxids (n integer)
RETURNS void
@ -95,19 +95,19 @@ $node_master->psql(
RETURN;
EXCEPTION WHEN raise_exception THEN NULL; END;
\$\$;");
$node_master->psql(
$node_primary->psql(
'postgres', "
BEGIN;
SELECT hs_subxids(127);
COMMIT;");
$node_master->wait_for_catchup($node_standby, 'replay',
$node_master->lsn('insert'));
$node_primary->wait_for_catchup($node_standby, 'replay',
$node_primary->lsn('insert'));
$node_standby->psql(
'postgres',
"SELECT coalesce(sum(id),-1) FROM t_012_tbl",
stdout => \$psql_out);
is($psql_out, '8128', "Visible");
$node_master->stop;
$node_primary->stop;
$node_standby->promote;
$node_standby->psql(
@ -117,8 +117,8 @@ $node_standby->psql(
is($psql_out, '8128', "Visible");
# restore state
($node_master, $node_standby) = ($node_standby, $node_master);
$node_standby->enable_streaming($node_master);
($node_primary, $node_standby) = ($node_standby, $node_primary);
$node_standby->enable_streaming($node_primary);
$node_standby->start;
$node_standby->psql(
'postgres',
@ -126,10 +126,10 @@ $node_standby->psql(
stdout => \$psql_out);
is($psql_out, '8128', "Visible");
$node_master->psql('postgres', "DELETE FROM t_012_tbl");
$node_primary->psql('postgres', "DELETE FROM t_012_tbl");
# Function borrowed from src/test/regress/sql/hs_primary_extremes.sql
$node_master->psql(
$node_primary->psql(
'postgres', "
CREATE OR REPLACE FUNCTION hs_subxids (n integer)
RETURNS void
@ -142,19 +142,19 @@ $node_master->psql(
RETURN;
EXCEPTION WHEN raise_exception THEN NULL; END;
\$\$;");
$node_master->psql(
$node_primary->psql(
'postgres', "
BEGIN;
SELECT hs_subxids(127);
PREPARE TRANSACTION 'xact_012_1';");
$node_master->wait_for_catchup($node_standby, 'replay',
$node_master->lsn('insert'));
$node_primary->wait_for_catchup($node_standby, 'replay',
$node_primary->lsn('insert'));
$node_standby->psql(
'postgres',
"SELECT coalesce(sum(id),-1) FROM t_012_tbl",
stdout => \$psql_out);
is($psql_out, '-1', "Not visible");
$node_master->stop;
$node_primary->stop;
$node_standby->promote;
$node_standby->psql(
@ -164,34 +164,34 @@ $node_standby->psql(
is($psql_out, '-1', "Not visible");
# restore state
($node_master, $node_standby) = ($node_standby, $node_master);
$node_standby->enable_streaming($node_master);
($node_primary, $node_standby) = ($node_standby, $node_primary);
$node_standby->enable_streaming($node_primary);
$node_standby->start;
$psql_rc = $node_master->psql('postgres', "COMMIT PREPARED 'xact_012_1'");
$psql_rc = $node_primary->psql('postgres', "COMMIT PREPARED 'xact_012_1'");
is($psql_rc, '0',
"Restore of PGPROC_MAX_CACHED_SUBXIDS+ prepared transaction on promoted standby"
);
$node_master->psql(
$node_primary->psql(
'postgres',
"SELECT coalesce(sum(id),-1) FROM t_012_tbl",
stdout => \$psql_out);
is($psql_out, '8128', "Visible");
$node_master->psql('postgres', "DELETE FROM t_012_tbl");
$node_master->psql(
$node_primary->psql('postgres', "DELETE FROM t_012_tbl");
$node_primary->psql(
'postgres', "
BEGIN;
SELECT hs_subxids(201);
PREPARE TRANSACTION 'xact_012_1';");
$node_master->wait_for_catchup($node_standby, 'replay',
$node_master->lsn('insert'));
$node_primary->wait_for_catchup($node_standby, 'replay',
$node_primary->lsn('insert'));
$node_standby->psql(
'postgres',
"SELECT coalesce(sum(id),-1) FROM t_012_tbl",
stdout => \$psql_out);
is($psql_out, '-1', "Not visible");
$node_master->stop;
$node_primary->stop;
$node_standby->promote;
$node_standby->psql(
@ -201,15 +201,15 @@ $node_standby->psql(
is($psql_out, '-1', "Not visible");
# restore state
($node_master, $node_standby) = ($node_standby, $node_master);
$node_standby->enable_streaming($node_master);
($node_primary, $node_standby) = ($node_standby, $node_primary);
$node_standby->enable_streaming($node_primary);
$node_standby->start;
$psql_rc = $node_master->psql('postgres', "ROLLBACK PREPARED 'xact_012_1'");
$psql_rc = $node_primary->psql('postgres', "ROLLBACK PREPARED 'xact_012_1'");
is($psql_rc, '0',
"Rollback of PGPROC_MAX_CACHED_SUBXIDS+ prepared transaction on promoted standby"
);
$node_master->psql(
$node_primary->psql(
'postgres',
"SELECT coalesce(sum(id),-1) FROM t_012_tbl",
stdout => \$psql_out);

View File

@ -25,7 +25,7 @@ plan tests => 18;
# is really wrong.
my $psql_timeout = IPC::Run::timer(60);
my $node = get_new_node('master');
my $node = get_new_node('primary');
$node->init(allows_streaming => 1);
$node->start();

View File

@ -13,21 +13,21 @@ use Time::HiRes qw(usleep);
$ENV{PGDATABASE} = 'postgres';
# Initialize master node, setting wal-segsize to 1MB
my $node_master = get_new_node('master');
$node_master->init(allows_streaming => 1, extra => ['--wal-segsize=1']);
$node_master->append_conf(
# Initialize primary node, setting wal-segsize to 1MB
my $node_primary = get_new_node('primary');
$node_primary->init(allows_streaming => 1, extra => ['--wal-segsize=1']);
$node_primary->append_conf(
'postgresql.conf', qq(
min_wal_size = 2MB
max_wal_size = 4MB
log_checkpoints = yes
));
$node_master->start;
$node_master->safe_psql('postgres',
$node_primary->start;
$node_primary->safe_psql('postgres',
"SELECT pg_create_physical_replication_slot('rep1')");
# The slot state and remain should be null before the first connection
my $result = $node_master->safe_psql('postgres',
my $result = $node_primary->safe_psql('postgres',
"SELECT restart_lsn IS NULL, wal_status is NULL, safe_wal_size is NULL FROM pg_replication_slots WHERE slot_name = 'rep1'"
);
is($result, "t|t|t", 'check the state of non-reserved slot is "unknown"');
@ -35,133 +35,133 @@ is($result, "t|t|t", 'check the state of non-reserved slot is "unknown"');
# Take backup
my $backup_name = 'my_backup';
$node_master->backup($backup_name);
$node_primary->backup($backup_name);
# Create a standby linking to it using the replication slot
my $node_standby = get_new_node('standby_1');
$node_standby->init_from_backup($node_master, $backup_name,
$node_standby->init_from_backup($node_primary, $backup_name,
has_streaming => 1);
$node_standby->append_conf('postgresql.conf', "primary_slot_name = 'rep1'");
$node_standby->start;
# Wait until standby has replayed enough data
my $start_lsn = $node_master->lsn('write');
$node_master->wait_for_catchup($node_standby, 'replay', $start_lsn);
my $start_lsn = $node_primary->lsn('write');
$node_primary->wait_for_catchup($node_standby, 'replay', $start_lsn);
# Stop standby
$node_standby->stop;
# Preparation done, the slot is the state "reserved" now
$result = $node_master->safe_psql('postgres',
$result = $node_primary->safe_psql('postgres',
"SELECT wal_status, safe_wal_size IS NULL FROM pg_replication_slots WHERE slot_name = 'rep1'"
);
is($result, "reserved|t", 'check the catching-up state');
# Advance WAL by five segments (= 5MB) on master
advance_wal($node_master, 1);
$node_master->safe_psql('postgres', "CHECKPOINT;");
# Advance WAL by five segments (= 5MB) on primary
advance_wal($node_primary, 1);
$node_primary->safe_psql('postgres', "CHECKPOINT;");
# The slot is always "safe" when fitting max_wal_size
$result = $node_master->safe_psql('postgres',
$result = $node_primary->safe_psql('postgres',
"SELECT wal_status, safe_wal_size IS NULL FROM pg_replication_slots WHERE slot_name = 'rep1'"
);
is($result, "reserved|t",
'check that it is safe if WAL fits in max_wal_size');
advance_wal($node_master, 4);
$node_master->safe_psql('postgres', "CHECKPOINT;");
advance_wal($node_primary, 4);
$node_primary->safe_psql('postgres', "CHECKPOINT;");
# The slot is always "safe" when max_slot_wal_keep_size is not set
$result = $node_master->safe_psql('postgres',
$result = $node_primary->safe_psql('postgres',
"SELECT wal_status, safe_wal_size IS NULL FROM pg_replication_slots WHERE slot_name = 'rep1'"
);
is($result, "reserved|t", 'check that slot is working');
# The standby can reconnect to master
# The standby can reconnect to primary
$node_standby->start;
$start_lsn = $node_master->lsn('write');
$node_master->wait_for_catchup($node_standby, 'replay', $start_lsn);
$start_lsn = $node_primary->lsn('write');
$node_primary->wait_for_catchup($node_standby, 'replay', $start_lsn);
$node_standby->stop;
# Set max_slot_wal_keep_size on master
# Set max_slot_wal_keep_size on primary
my $max_slot_wal_keep_size_mb = 6;
$node_master->append_conf(
$node_primary->append_conf(
'postgresql.conf', qq(
max_slot_wal_keep_size = ${max_slot_wal_keep_size_mb}MB
));
$node_master->reload;
$node_primary->reload;
# The slot is in safe state.
$result = $node_master->safe_psql('postgres',
$result = $node_primary->safe_psql('postgres',
"SELECT wal_status FROM pg_replication_slots WHERE slot_name = 'rep1'");
is($result, "reserved", 'check that max_slot_wal_keep_size is working');
# Advance WAL again then checkpoint, reducing remain by 2 MB.
advance_wal($node_master, 2);
$node_master->safe_psql('postgres', "CHECKPOINT;");
advance_wal($node_primary, 2);
$node_primary->safe_psql('postgres', "CHECKPOINT;");
# The slot is still working
$result = $node_master->safe_psql('postgres',
$result = $node_primary->safe_psql('postgres',
"SELECT wal_status FROM pg_replication_slots WHERE slot_name = 'rep1'");
is($result, "reserved",
'check that safe_wal_size gets close to the current LSN');
# The standby can reconnect to master
# The standby can reconnect to primary
$node_standby->start;
$start_lsn = $node_master->lsn('write');
$node_master->wait_for_catchup($node_standby, 'replay', $start_lsn);
$start_lsn = $node_primary->lsn('write');
$node_primary->wait_for_catchup($node_standby, 'replay', $start_lsn);
$node_standby->stop;
# wal_keep_segments overrides max_slot_wal_keep_size
$result = $node_master->safe_psql('postgres',
$result = $node_primary->safe_psql('postgres',
"ALTER SYSTEM SET wal_keep_segments to 8; SELECT pg_reload_conf();");
# Advance WAL again then checkpoint, reducing remain by 6 MB.
advance_wal($node_master, 6);
$result = $node_master->safe_psql('postgres',
advance_wal($node_primary, 6);
$result = $node_primary->safe_psql('postgres',
"SELECT wal_status as remain FROM pg_replication_slots WHERE slot_name = 'rep1'"
);
is($result, "extended",
'check that wal_keep_segments overrides max_slot_wal_keep_size');
# restore wal_keep_segments
$result = $node_master->safe_psql('postgres',
$result = $node_primary->safe_psql('postgres',
"ALTER SYSTEM SET wal_keep_segments to 0; SELECT pg_reload_conf();");
# The standby can reconnect to master
# The standby can reconnect to primary
$node_standby->start;
$start_lsn = $node_master->lsn('write');
$node_master->wait_for_catchup($node_standby, 'replay', $start_lsn);
$start_lsn = $node_primary->lsn('write');
$node_primary->wait_for_catchup($node_standby, 'replay', $start_lsn);
$node_standby->stop;
# Advance WAL again without checkpoint, reducing remain by 6 MB.
advance_wal($node_master, 6);
advance_wal($node_primary, 6);
# Slot gets into 'reserved' state
$result = $node_master->safe_psql('postgres',
$result = $node_primary->safe_psql('postgres',
"SELECT wal_status FROM pg_replication_slots WHERE slot_name = 'rep1'");
is($result, "extended", 'check that the slot state changes to "extended"');
# do checkpoint so that the next checkpoint runs too early
$node_master->safe_psql('postgres', "CHECKPOINT;");
$node_primary->safe_psql('postgres', "CHECKPOINT;");
# Advance WAL again without checkpoint; remain goes to 0.
advance_wal($node_master, 1);
advance_wal($node_primary, 1);
# Slot gets into 'unreserved' state and safe_wal_size is negative
$result = $node_master->safe_psql('postgres',
$result = $node_primary->safe_psql('postgres',
"SELECT wal_status, safe_wal_size <= 0 FROM pg_replication_slots WHERE slot_name = 'rep1'"
);
is($result, "unreserved|t",
'check that the slot state changes to "unreserved"');
# The standby still can connect to master before a checkpoint
# The standby still can connect to primary before a checkpoint
$node_standby->start;
$start_lsn = $node_master->lsn('write');
$node_master->wait_for_catchup($node_standby, 'replay', $start_lsn);
$start_lsn = $node_primary->lsn('write');
$node_primary->wait_for_catchup($node_standby, 'replay', $start_lsn);
$node_standby->stop;
@ -171,25 +171,25 @@ ok( !find_in_log(
'check that required WAL segments are still available');
# Advance WAL again, the slot loses the oldest segment.
my $logstart = get_log_size($node_master);
advance_wal($node_master, 7);
$node_master->safe_psql('postgres', "CHECKPOINT;");
my $logstart = get_log_size($node_primary);
advance_wal($node_primary, 7);
$node_primary->safe_psql('postgres', "CHECKPOINT;");
# WARNING should be issued
ok( find_in_log(
$node_master,
$node_primary,
"invalidating slot \"rep1\" because its restart_lsn [0-9A-F/]+ exceeds max_slot_wal_keep_size",
$logstart),
'check that the warning is logged');
# This slot should be broken
$result = $node_master->safe_psql('postgres',
$result = $node_primary->safe_psql('postgres',
"SELECT slot_name, active, restart_lsn IS NULL, wal_status, safe_wal_size FROM pg_replication_slots WHERE slot_name = 'rep1'"
);
is($result, "rep1|f|t|lost|",
'check that the slot became inactive and the state "lost" persists');
# The standby no longer can connect to the master
# The standby no longer can connect to the primary
$logstart = get_log_size($node_standby);
$node_standby->start;
@ -208,39 +208,39 @@ for (my $i = 0; $i < 10000; $i++)
}
ok($failed, 'check that replication has been broken');
$node_master->stop('immediate');
$node_primary->stop('immediate');
$node_standby->stop('immediate');
my $node_master2 = get_new_node('master2');
$node_master2->init(allows_streaming => 1);
$node_master2->append_conf(
my $node_primary2 = get_new_node('primary2');
$node_primary2->init(allows_streaming => 1);
$node_primary2->append_conf(
'postgresql.conf', qq(
min_wal_size = 32MB
max_wal_size = 32MB
log_checkpoints = yes
));
$node_master2->start;
$node_master2->safe_psql('postgres',
$node_primary2->start;
$node_primary2->safe_psql('postgres',
"SELECT pg_create_physical_replication_slot('rep1')");
$backup_name = 'my_backup2';
$node_master2->backup($backup_name);
$node_primary2->backup($backup_name);
$node_master2->stop;
$node_master2->append_conf(
$node_primary2->stop;
$node_primary2->append_conf(
'postgresql.conf', qq(
max_slot_wal_keep_size = 0
));
$node_master2->start;
$node_primary2->start;
$node_standby = get_new_node('standby_2');
$node_standby->init_from_backup($node_master2, $backup_name,
$node_standby->init_from_backup($node_primary2, $backup_name,
has_streaming => 1);
$node_standby->append_conf('postgresql.conf', "primary_slot_name = 'rep1'");
$node_standby->start;
my @result =
split(
'\n',
$node_master2->safe_psql(
$node_primary2->safe_psql(
'postgres',
"CREATE TABLE tt();
DROP TABLE tt;
@ -256,7 +256,7 @@ sub advance_wal
{
my ($node, $n) = @_;
# Advance by $n segments (= (16 * $n) MB) on master
# Advance by $n segments (= (16 * $n) MB) on primary
for (my $i = 0; $i < $n; $i++)
{
$node->safe_psql('postgres',

View File

@ -8,7 +8,7 @@ use TestLib;
use Test::More tests => 16;
use Config;
my $primary = get_new_node('master');
my $primary = get_new_node('primary');
$primary->init(
has_archiving => 1,
allows_streaming => 1);

View File

@ -61,7 +61,7 @@ push @keys, 'client_wrongperms';
#### Set up the server.
note "setting up data directory";
my $node = get_new_node('master');
my $node = get_new_node('primary');
$node->init;
# PGHOST is enforced here to set up the node, subsequent connections

View File

@ -35,7 +35,7 @@ my $common_connstr;
# Set up the server.
note "setting up data directory";
my $node = get_new_node('master');
my $node = get_new_node('primary');
$node->init;
# PGHOST is enforced here to set up the node, subsequent connections