Post-PG 10 beta1 pgperltidy run

This commit is contained in:
Bruce Momjian 2017-05-17 19:01:23 -04:00
parent a6fd7b7a5f
commit ce55481032
66 changed files with 1869 additions and 1184 deletions

View File

@ -92,7 +92,8 @@ if ($opt{v})
if ($opt{e})
{
my @plan = map { "$_->[0]\n" } @{$dbi->selectall_arrayref("explain $sql")};
my @plan =
map { "$_->[0]\n" } @{ $dbi->selectall_arrayref("explain $sql") };
print @plan;
}

View File

@ -80,10 +80,11 @@ sub Catalogs
{
$catalog{natts} = $1;
}
elsif (/^DATA\(insert(\s+OID\s+=\s+(\d+))?\s+\(\s*(.*)\s*\)\s*\)$/)
elsif (
/^DATA\(insert(\s+OID\s+=\s+(\d+))?\s+\(\s*(.*)\s*\)\s*\)$/)
{
check_natts($filename, $catalog{natts}, $3,
$input_file, $input_line_number);
check_natts($filename, $catalog{natts}, $3, $input_file,
$input_line_number);
push @{ $catalog{data} }, { oid => $2, bki_values => $3 };
}
@ -256,14 +257,15 @@ sub check_natts
{
my ($catname, $natts, $bki_val, $file, $line) = @_;
die "Could not find definition for Natts_${catname} before start of DATA() in $file\n"
unless defined $natts;
die
"Could not find definition for Natts_${catname} before start of DATA() in $file\n"
unless defined $natts;
my $nfields = scalar(SplitDataLine($bki_val));
die sprintf
"Wrong number of attributes in DATA() entry at %s:%d (expected %d but got %d)\n",
$file, $line, $natts, $nfields
"Wrong number of attributes in DATA() entry at %s:%d (expected %d but got %d)\n",
$file, $line, $natts, $nfields
unless $natts == $nfields;
}

View File

@ -163,11 +163,13 @@ foreach my $catname (@{ $catalogs->{names} })
# Split line into tokens without interpreting their meaning.
my %bki_values;
@bki_values{@attnames} = Catalog::SplitDataLine($row->{bki_values});
@bki_values{@attnames} =
Catalog::SplitDataLine($row->{bki_values});
# Perform required substitutions on fields
foreach my $att (keys %bki_values)
{
# Substitute constant values we acquired above.
# (It's intentional that this can apply to parts of a field).
$bki_values{$att} =~ s/\bPGUID\b/$BOOTSTRAP_SUPERUSERID/g;
@ -178,9 +180,9 @@ foreach my $catname (@{ $catalogs->{names} })
# just do nothing (regprocin will complain).
if ($bki_attr{$att}->{type} eq 'regproc')
{
my $procoid = $regprocoids{$bki_values{$att}};
my $procoid = $regprocoids{ $bki_values{$att} };
$bki_values{$att} = $procoid
if defined($procoid) && $procoid ne 'MULTIPLE';
if defined($procoid) && $procoid ne 'MULTIPLE';
}
}
@ -188,13 +190,13 @@ foreach my $catname (@{ $catalogs->{names} })
# This relies on the order we process the files in!
if ($catname eq 'pg_proc')
{
if (defined($regprocoids{$bki_values{proname}}))
if (defined($regprocoids{ $bki_values{proname} }))
{
$regprocoids{$bki_values{proname}} = 'MULTIPLE';
$regprocoids{ $bki_values{proname} } = 'MULTIPLE';
}
else
{
$regprocoids{$bki_values{proname}} = $row->{oid};
$regprocoids{ $bki_values{proname} } = $row->{oid};
}
}
@ -211,7 +213,7 @@ foreach my $catname (@{ $catalogs->{names} })
printf $bki "insert %s( %s )\n", $oid,
join(' ', @bki_values{@attnames});
# Write comments to postgres.description and postgres.shdescription
# Write comments to postgres.description and postgres.shdescription
if (defined $row->{descr})
{
printf $descr "%s\t%s\t0\t%s\n", $row->{oid}, $catname,
@ -459,7 +461,8 @@ sub bki_insert
my $row = shift;
my @attnames = @_;
my $oid = $row->{oid} ? "OID = $row->{oid} " : '';
my $bki_values = join ' ', map { $_ eq '' ? '""' : $_ } map $row->{$_}, @attnames;
my $bki_values = join ' ', map { $_ eq '' ? '""' : $_ } map $row->{$_},
@attnames;
printf $bki "insert %s( %s )\n", $oid, $bki_values;
}
@ -474,9 +477,9 @@ sub emit_schemapg_row
$row->{attidentity} ||= '\0';
# Supply appropriate quoting for these fields.
$row->{attname} = q|{"| . $row->{attname} . q|"}|;
$row->{attstorage} = q|'| . $row->{attstorage} . q|'|;
$row->{attalign} = q|'| . $row->{attalign} . q|'|;
$row->{attname} = q|{"| . $row->{attname} . q|"}|;
$row->{attstorage} = q|'| . $row->{attstorage} . q|'|;
$row->{attalign} = q|'| . $row->{attalign} . q|'|;
$row->{attidentity} = q|'| . $row->{attidentity} . q|'|;
# We don't emit initializers for the variable length fields at all.

View File

@ -149,7 +149,8 @@ while (my ($kcat, $kcat_id) = each(%keyword_categories))
# Now read in kwlist.h
open(my $kwlist, '<', $kwlist_filename) || die("Could not open : $kwlist_filename");
open(my $kwlist, '<', $kwlist_filename)
|| die("Could not open : $kwlist_filename");
my $prevkwstring = '';
my $bare_kwname;

View File

@ -58,6 +58,7 @@ foreach my $column (@{ $catalogs->{pg_proc}->{columns} })
my $data = $catalogs->{pg_proc}->{data};
foreach my $row (@$data)
{
# Split line into tokens without interpreting their meaning.
my %bki_values;
@bki_values{@attnames} = Catalog::SplitDataLine($row->{bki_values});
@ -75,14 +76,17 @@ foreach my $row (@$data)
}
# Emit headers for both files
my $tmpext = ".tmp$$";
my $oidsfile = $output_path . 'fmgroids.h';
my $tmpext = ".tmp$$";
my $oidsfile = $output_path . 'fmgroids.h';
my $protosfile = $output_path . 'fmgrprotos.h';
my $tabfile = $output_path . 'fmgrtab.c';
my $tabfile = $output_path . 'fmgrtab.c';
open my $ofh, '>', $oidsfile . $tmpext or die "Could not open $oidsfile$tmpext: $!";
open my $pfh, '>', $protosfile . $tmpext or die "Could not open $protosfile$tmpext: $!";
open my $tfh, '>', $tabfile . $tmpext or die "Could not open $tabfile$tmpext: $!";
open my $ofh, '>', $oidsfile . $tmpext
or die "Could not open $oidsfile$tmpext: $!";
open my $pfh, '>', $protosfile . $tmpext
or die "Could not open $protosfile$tmpext: $!";
open my $tfh, '>', $tabfile . $tmpext
or die "Could not open $tabfile$tmpext: $!";
print $ofh
qq|/*-------------------------------------------------------------------------
@ -218,9 +222,9 @@ close($pfh);
close($tfh);
# Finally, rename the completed files into place.
Catalog::RenameTempFile($oidsfile, $tmpext);
Catalog::RenameTempFile($oidsfile, $tmpext);
Catalog::RenameTempFile($protosfile, $tmpext);
Catalog::RenameTempFile($tabfile, $tmpext);
Catalog::RenameTempFile($tabfile, $tmpext);
sub usage
{

View File

@ -35,9 +35,10 @@ my $all = &read_source("BIG5.TXT");
# Load CP950.TXT
my $cp950txt = &read_source("CP950.TXT");
foreach my $i (@$cp950txt) {
foreach my $i (@$cp950txt)
{
my $code = $i->{code};
my $ucs = $i->{ucs};
my $ucs = $i->{ucs};
# Pick only the ETEN extended characters in the range 0xf9d6 - 0xf9dc
# from CP950.TXT
@ -46,22 +47,24 @@ foreach my $i (@$cp950txt) {
&& $code >= 0xf9d6
&& $code <= 0xf9dc)
{
push @$all, {code => $code,
ucs => $ucs,
comment => $i->{comment},
direction => BOTH,
f => $i->{f},
l => $i->{l} };
push @$all,
{ code => $code,
ucs => $ucs,
comment => $i->{comment},
direction => BOTH,
f => $i->{f},
l => $i->{l} };
}
}
foreach my $i (@$all) {
foreach my $i (@$all)
{
my $code = $i->{code};
my $ucs = $i->{ucs};
my $ucs = $i->{ucs};
# BIG5.TXT maps several BIG5 characters to U+FFFD. The UTF-8 to BIG5 mapping can
# contain only one of them. XXX: Doesn't really make sense to include any of them,
# but for historical reasons, we map the first one of them.
# BIG5.TXT maps several BIG5 characters to U+FFFD. The UTF-8 to BIG5 mapping can
# contain only one of them. XXX: Doesn't really make sense to include any of them,
# but for historical reasons, we map the first one of them.
if ($i->{ucs} == 0xFFFD && $i->{code} != 0xA15A)
{
$i->{direction} = TO_UNICODE;

View File

@ -38,8 +38,10 @@ while (<$in>)
# a lot of extra characters on top of the GB2312 character set that
# EUC_CN encodes. Filter out those extra characters.
next if (($code & 0xFF) < 0xA1);
next if (!($code >= 0xA100 && $code <= 0xA9FF ||
$code >= 0xB000 && $code <= 0xF7FF));
next
if (
!( $code >= 0xA100 && $code <= 0xA9FF
|| $code >= 0xB000 && $code <= 0xF7FF));
next if ($code >= 0xA2A1 && $code <= 0xA2B0);
next if ($code >= 0xA2E3 && $code <= 0xA2E4);
@ -67,13 +69,12 @@ while (<$in>)
$ucs = 0x2015;
}
push @mapping, {
ucs => $ucs,
code => $code,
push @mapping,
{ ucs => $ucs,
code => $code,
direction => BOTH,
f => $in_file,
l => $.
};
f => $in_file,
l => $. };
}
close($in);

View File

@ -24,6 +24,7 @@ while (my $line = <$in>)
{
if ($line =~ /^0x(.*)[ \t]*U\+(.*)\+(.*)[ \t]*#(.*)$/)
{
# combined characters
my ($c, $u1, $u2) = ($1, $2, $3);
my $rest = "U+" . $u1 . "+" . $u2 . $4;
@ -31,17 +32,18 @@ while (my $line = <$in>)
my $ucs1 = hex($u1);
my $ucs2 = hex($u2);
push @all, { direction => BOTH,
ucs => $ucs1,
ucs_second => $ucs2,
code => $code,
comment => $rest,
f => $in_file,
l => $.
};
push @all,
{ direction => BOTH,
ucs => $ucs1,
ucs_second => $ucs2,
code => $code,
comment => $rest,
f => $in_file,
l => $. };
}
elsif ($line =~ /^0x(.*)[ \t]*U\+(.*)[ \t]*#(.*)$/)
{
# non-combined characters
my ($c, $u, $rest) = ($1, $2, "U+" . $2 . $3);
my $ucs = hex($u);
@ -49,13 +51,13 @@ while (my $line = <$in>)
next if ($code < 0x80 && $ucs < 0x80);
push @all, { direction => BOTH,
ucs => $ucs,
code => $code,
comment => $rest,
f => $in_file,
l => $.
};
push @all,
{ direction => BOTH,
ucs => $ucs,
code => $code,
comment => $rest,
f => $in_file,
l => $. };
}
}
close($in);

View File

@ -21,7 +21,9 @@ my $jis0212 = &read_source("JIS0212.TXT");
my @mapping;
foreach my $i (@$jis0212) {
foreach my $i (@$jis0212)
{
# We have a different mapping for this in the EUC_JP to UTF-8 direction.
if ($i->{code} == 0x2243)
{
@ -48,13 +50,14 @@ foreach my $i (@$jis0212) {
# Load CP932.TXT.
my $ct932 = &read_source("CP932.TXT");
foreach my $i (@$ct932) {
foreach my $i (@$ct932)
{
my $sjis = $i->{code};
# We have a different mapping for this in the EUC_JP to UTF-8 direction.
if ($sjis == 0xeefa ||
$sjis == 0xeefb ||
$sjis == 0xeefc)
if ( $sjis == 0xeefa
|| $sjis == 0xeefb
|| $sjis == 0xeefc)
{
next;
}
@ -63,8 +66,10 @@ foreach my $i (@$ct932) {
{
my $jis = &sjis2jis($sjis);
$i->{code} = $jis | ($jis < 0x100 ? 0x8e00 :
($sjis >= 0xeffd ? 0x8f8080 : 0x8080));
$i->{code} = $jis | (
$jis < 0x100
? 0x8e00
: ($sjis >= 0xeffd ? 0x8f8080 : 0x8080));
# Remember the SJIS code for later.
$i->{sjis} = $sjis;
@ -73,13 +78,14 @@ foreach my $i (@$ct932) {
}
}
foreach my $i (@mapping) {
foreach my $i (@mapping)
{
my $sjis = $i->{sjis};
# These SJIS characters are excluded completely.
if ($sjis >= 0xed00 && $sjis <= 0xeef9 ||
$sjis >= 0xfa54 && $sjis <= 0xfa56 ||
$sjis >= 0xfa58 && $sjis <= 0xfc4b)
if ( $sjis >= 0xed00 && $sjis <= 0xeef9
|| $sjis >= 0xfa54 && $sjis <= 0xfa56
|| $sjis >= 0xfa58 && $sjis <= 0xfc4b)
{
$i->{direction} = NONE;
next;
@ -92,10 +98,16 @@ foreach my $i (@mapping) {
next;
}
if ($sjis == 0x8790 || $sjis == 0x8791 || $sjis == 0x8792 ||
$sjis == 0x8795 || $sjis == 0x8796 || $sjis == 0x8797 ||
$sjis == 0x879a || $sjis == 0x879b || $sjis == 0x879c ||
($sjis >= 0xfa4a && $sjis <= 0xfa53))
if ( $sjis == 0x8790
|| $sjis == 0x8791
|| $sjis == 0x8792
|| $sjis == 0x8795
|| $sjis == 0x8796
|| $sjis == 0x8797
|| $sjis == 0x879a
|| $sjis == 0x879b
|| $sjis == 0x879c
|| ($sjis >= 0xfa4a && $sjis <= 0xfa53))
{
$i->{direction} = TO_UNICODE;
next;
@ -103,95 +115,352 @@ foreach my $i (@mapping) {
}
push @mapping, (
{direction => BOTH, ucs => 0x4efc, code => 0x8ff4af, comment => '# CJK(4EFC)'},
{direction => BOTH, ucs => 0x50f4, code => 0x8ff4b0, comment => '# CJK(50F4)'},
{direction => BOTH, ucs => 0x51EC, code => 0x8ff4b1, comment => '# CJK(51EC)'},
{direction => BOTH, ucs => 0x5307, code => 0x8ff4b2, comment => '# CJK(5307)'},
{direction => BOTH, ucs => 0x5324, code => 0x8ff4b3, comment => '# CJK(5324)'},
{direction => BOTH, ucs => 0x548A, code => 0x8ff4b5, comment => '# CJK(548A)'},
{direction => BOTH, ucs => 0x5759, code => 0x8ff4b6, comment => '# CJK(5759)'},
{direction => BOTH, ucs => 0x589E, code => 0x8ff4b9, comment => '# CJK(589E)'},
{direction => BOTH, ucs => 0x5BEC, code => 0x8ff4ba, comment => '# CJK(5BEC)'},
{direction => BOTH, ucs => 0x5CF5, code => 0x8ff4bb, comment => '# CJK(5CF5)'},
{direction => BOTH, ucs => 0x5D53, code => 0x8ff4bc, comment => '# CJK(5D53)'},
{direction => BOTH, ucs => 0x5FB7, code => 0x8ff4be, comment => '# CJK(5FB7)'},
{direction => BOTH, ucs => 0x6085, code => 0x8ff4bf, comment => '# CJK(6085)'},
{direction => BOTH, ucs => 0x6120, code => 0x8ff4c0, comment => '# CJK(6120)'},
{direction => BOTH, ucs => 0x654E, code => 0x8ff4c1, comment => '# CJK(654E)'},
{direction => BOTH, ucs => 0x663B, code => 0x8ff4c2, comment => '# CJK(663B)'},
{direction => BOTH, ucs => 0x6665, code => 0x8ff4c3, comment => '# CJK(6665)'},
{direction => BOTH, ucs => 0x6801, code => 0x8ff4c6, comment => '# CJK(6801)'},
{direction => BOTH, ucs => 0x6A6B, code => 0x8ff4c9, comment => '# CJK(6A6B)'},
{direction => BOTH, ucs => 0x6AE2, code => 0x8ff4ca, comment => '# CJK(6AE2)'},
{direction => BOTH, ucs => 0x6DF2, code => 0x8ff4cc, comment => '# CJK(6DF2)'},
{direction => BOTH, ucs => 0x6DF8, code => 0x8ff4cb, comment => '# CJK(6DF8)'},
{direction => BOTH, ucs => 0x7028, code => 0x8ff4cd, comment => '# CJK(7028)'},
{direction => BOTH, ucs => 0x70BB, code => 0x8ff4ae, comment => '# CJK(70BB)'},
{direction => BOTH, ucs => 0x7501, code => 0x8ff4d0, comment => '# CJK(7501)'},
{direction => BOTH, ucs => 0x7682, code => 0x8ff4d1, comment => '# CJK(7682)'},
{direction => BOTH, ucs => 0x769E, code => 0x8ff4d2, comment => '# CJK(769E)'},
{direction => BOTH, ucs => 0x7930, code => 0x8ff4d4, comment => '# CJK(7930)'},
{direction => BOTH, ucs => 0x7AE7, code => 0x8ff4d9, comment => '# CJK(7AE7)'},
{direction => BOTH, ucs => 0x7DA0, code => 0x8ff4dc, comment => '# CJK(7DA0)'},
{direction => BOTH, ucs => 0x7DD6, code => 0x8ff4dd, comment => '# CJK(7DD6)'},
{direction => BOTH, ucs => 0x8362, code => 0x8ff4df, comment => '# CJK(8362)'},
{direction => BOTH, ucs => 0x85B0, code => 0x8ff4e1, comment => '# CJK(85B0)'},
{direction => BOTH, ucs => 0x8807, code => 0x8ff4e4, comment => '# CJK(8807)'},
{direction => BOTH, ucs => 0x8B7F, code => 0x8ff4e6, comment => '# CJK(8B7F)'},
{direction => BOTH, ucs => 0x8CF4, code => 0x8ff4e7, comment => '# CJK(8CF4)'},
{direction => BOTH, ucs => 0x8D76, code => 0x8ff4e8, comment => '# CJK(8D76)'},
{direction => BOTH, ucs => 0x90DE, code => 0x8ff4ec, comment => '# CJK(90DE)'},
{direction => BOTH, ucs => 0x9115, code => 0x8ff4ee, comment => '# CJK(9115)'},
{direction => BOTH, ucs => 0x9592, code => 0x8ff4f1, comment => '# CJK(9592)'},
{direction => BOTH, ucs => 0x973B, code => 0x8ff4f4, comment => '# CJK(973B)'},
{direction => BOTH, ucs => 0x974D, code => 0x8ff4f5, comment => '# CJK(974D)'},
{direction => BOTH, ucs => 0x9751, code => 0x8ff4f6, comment => '# CJK(9751)'},
{direction => BOTH, ucs => 0x999E, code => 0x8ff4fa, comment => '# CJK(999E)'},
{direction => BOTH, ucs => 0x9AD9, code => 0x8ff4fb, comment => '# CJK(9AD9)'},
{direction => BOTH, ucs => 0x9B72, code => 0x8ff4fc, comment => '# CJK(9B72)'},
{direction => BOTH, ucs => 0x9ED1, code => 0x8ff4fe, comment => '# CJK(9ED1)'},
{direction => BOTH, ucs => 0xF929, code => 0x8ff4c5, comment => '# CJK COMPATIBILITY IDEOGRAPH-F929'},
{direction => BOTH, ucs => 0xF9DC, code => 0x8ff4f2, comment => '# CJK COMPATIBILITY IDEOGRAPH-F9DC'},
{direction => BOTH, ucs => 0xFA0E, code => 0x8ff4b4, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA0E'},
{direction => BOTH, ucs => 0xFA0F, code => 0x8ff4b7, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA0F'},
{direction => BOTH, ucs => 0xFA10, code => 0x8ff4b8, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA10'},
{direction => BOTH, ucs => 0xFA11, code => 0x8ff4bd, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA11'},
{direction => BOTH, ucs => 0xFA12, code => 0x8ff4c4, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA12'},
{direction => BOTH, ucs => 0xFA13, code => 0x8ff4c7, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA13'},
{direction => BOTH, ucs => 0xFA14, code => 0x8ff4c8, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA14'},
{direction => BOTH, ucs => 0xFA15, code => 0x8ff4ce, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA15'},
{direction => BOTH, ucs => 0xFA16, code => 0x8ff4cf, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA16'},
{direction => BOTH, ucs => 0xFA17, code => 0x8ff4d3, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA17'},
{direction => BOTH, ucs => 0xFA18, code => 0x8ff4d5, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA18'},
{direction => BOTH, ucs => 0xFA19, code => 0x8ff4d6, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA19'},
{direction => BOTH, ucs => 0xFA1A, code => 0x8ff4d7, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1A'},
{direction => BOTH, ucs => 0xFA1B, code => 0x8ff4d8, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1B'},
{direction => BOTH, ucs => 0xFA1C, code => 0x8ff4da, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1C'},
{direction => BOTH, ucs => 0xFA1D, code => 0x8ff4db, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1D'},
{direction => BOTH, ucs => 0xFA1E, code => 0x8ff4de, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1E'},
{direction => BOTH, ucs => 0xFA1F, code => 0x8ff4e0, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1F'},
{direction => BOTH, ucs => 0xFA20, code => 0x8ff4e2, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA20'},
{direction => BOTH, ucs => 0xFA21, code => 0x8ff4e3, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA21'},
{direction => BOTH, ucs => 0xFA22, code => 0x8ff4e5, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA22'},
{direction => BOTH, ucs => 0xFA23, code => 0x8ff4e9, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA23'},
{direction => BOTH, ucs => 0xFA24, code => 0x8ff4ea, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA24'},
{direction => BOTH, ucs => 0xFA25, code => 0x8ff4eb, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA25'},
{direction => BOTH, ucs => 0xFA26, code => 0x8ff4ed, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA26'},
{direction => BOTH, ucs => 0xFA27, code => 0x8ff4ef, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA27'},
{direction => BOTH, ucs => 0xFA28, code => 0x8ff4f0, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA28'},
{direction => BOTH, ucs => 0xFA29, code => 0x8ff4f3, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA29'},
{direction => BOTH, ucs => 0xFA2A, code => 0x8ff4f7, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA2A'},
{direction => BOTH, ucs => 0xFA2B, code => 0x8ff4f8, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA2B'},
{direction => BOTH, ucs => 0xFA2C, code => 0x8ff4f9, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA2C'},
{direction => BOTH, ucs => 0xFA2D, code => 0x8ff4fd, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA2D'},
{direction => BOTH, ucs => 0xFF07, code => 0x8ff4a9, comment => '# FULLWIDTH APOSTROPHE'},
{direction => BOTH, ucs => 0xFFE4, code => 0x8fa2c3, comment => '# FULLWIDTH BROKEN BAR'},
{ direction => BOTH,
ucs => 0x4efc,
code => 0x8ff4af,
comment => '# CJK(4EFC)' },
{ direction => BOTH,
ucs => 0x50f4,
code => 0x8ff4b0,
comment => '# CJK(50F4)' },
{ direction => BOTH,
ucs => 0x51EC,
code => 0x8ff4b1,
comment => '# CJK(51EC)' },
{ direction => BOTH,
ucs => 0x5307,
code => 0x8ff4b2,
comment => '# CJK(5307)' },
{ direction => BOTH,
ucs => 0x5324,
code => 0x8ff4b3,
comment => '# CJK(5324)' },
{ direction => BOTH,
ucs => 0x548A,
code => 0x8ff4b5,
comment => '# CJK(548A)' },
{ direction => BOTH,
ucs => 0x5759,
code => 0x8ff4b6,
comment => '# CJK(5759)' },
{ direction => BOTH,
ucs => 0x589E,
code => 0x8ff4b9,
comment => '# CJK(589E)' },
{ direction => BOTH,
ucs => 0x5BEC,
code => 0x8ff4ba,
comment => '# CJK(5BEC)' },
{ direction => BOTH,
ucs => 0x5CF5,
code => 0x8ff4bb,
comment => '# CJK(5CF5)' },
{ direction => BOTH,
ucs => 0x5D53,
code => 0x8ff4bc,
comment => '# CJK(5D53)' },
{ direction => BOTH,
ucs => 0x5FB7,
code => 0x8ff4be,
comment => '# CJK(5FB7)' },
{ direction => BOTH,
ucs => 0x6085,
code => 0x8ff4bf,
comment => '# CJK(6085)' },
{ direction => BOTH,
ucs => 0x6120,
code => 0x8ff4c0,
comment => '# CJK(6120)' },
{ direction => BOTH,
ucs => 0x654E,
code => 0x8ff4c1,
comment => '# CJK(654E)' },
{ direction => BOTH,
ucs => 0x663B,
code => 0x8ff4c2,
comment => '# CJK(663B)' },
{ direction => BOTH,
ucs => 0x6665,
code => 0x8ff4c3,
comment => '# CJK(6665)' },
{ direction => BOTH,
ucs => 0x6801,
code => 0x8ff4c6,
comment => '# CJK(6801)' },
{ direction => BOTH,
ucs => 0x6A6B,
code => 0x8ff4c9,
comment => '# CJK(6A6B)' },
{ direction => BOTH,
ucs => 0x6AE2,
code => 0x8ff4ca,
comment => '# CJK(6AE2)' },
{ direction => BOTH,
ucs => 0x6DF2,
code => 0x8ff4cc,
comment => '# CJK(6DF2)' },
{ direction => BOTH,
ucs => 0x6DF8,
code => 0x8ff4cb,
comment => '# CJK(6DF8)' },
{ direction => BOTH,
ucs => 0x7028,
code => 0x8ff4cd,
comment => '# CJK(7028)' },
{ direction => BOTH,
ucs => 0x70BB,
code => 0x8ff4ae,
comment => '# CJK(70BB)' },
{ direction => BOTH,
ucs => 0x7501,
code => 0x8ff4d0,
comment => '# CJK(7501)' },
{ direction => BOTH,
ucs => 0x7682,
code => 0x8ff4d1,
comment => '# CJK(7682)' },
{ direction => BOTH,
ucs => 0x769E,
code => 0x8ff4d2,
comment => '# CJK(769E)' },
{ direction => BOTH,
ucs => 0x7930,
code => 0x8ff4d4,
comment => '# CJK(7930)' },
{ direction => BOTH,
ucs => 0x7AE7,
code => 0x8ff4d9,
comment => '# CJK(7AE7)' },
{ direction => BOTH,
ucs => 0x7DA0,
code => 0x8ff4dc,
comment => '# CJK(7DA0)' },
{ direction => BOTH,
ucs => 0x7DD6,
code => 0x8ff4dd,
comment => '# CJK(7DD6)' },
{ direction => BOTH,
ucs => 0x8362,
code => 0x8ff4df,
comment => '# CJK(8362)' },
{ direction => BOTH,
ucs => 0x85B0,
code => 0x8ff4e1,
comment => '# CJK(85B0)' },
{ direction => BOTH,
ucs => 0x8807,
code => 0x8ff4e4,
comment => '# CJK(8807)' },
{ direction => BOTH,
ucs => 0x8B7F,
code => 0x8ff4e6,
comment => '# CJK(8B7F)' },
{ direction => BOTH,
ucs => 0x8CF4,
code => 0x8ff4e7,
comment => '# CJK(8CF4)' },
{ direction => BOTH,
ucs => 0x8D76,
code => 0x8ff4e8,
comment => '# CJK(8D76)' },
{ direction => BOTH,
ucs => 0x90DE,
code => 0x8ff4ec,
comment => '# CJK(90DE)' },
{ direction => BOTH,
ucs => 0x9115,
code => 0x8ff4ee,
comment => '# CJK(9115)' },
{ direction => BOTH,
ucs => 0x9592,
code => 0x8ff4f1,
comment => '# CJK(9592)' },
{ direction => BOTH,
ucs => 0x973B,
code => 0x8ff4f4,
comment => '# CJK(973B)' },
{ direction => BOTH,
ucs => 0x974D,
code => 0x8ff4f5,
comment => '# CJK(974D)' },
{ direction => BOTH,
ucs => 0x9751,
code => 0x8ff4f6,
comment => '# CJK(9751)' },
{ direction => BOTH,
ucs => 0x999E,
code => 0x8ff4fa,
comment => '# CJK(999E)' },
{ direction => BOTH,
ucs => 0x9AD9,
code => 0x8ff4fb,
comment => '# CJK(9AD9)' },
{ direction => BOTH,
ucs => 0x9B72,
code => 0x8ff4fc,
comment => '# CJK(9B72)' },
{ direction => BOTH,
ucs => 0x9ED1,
code => 0x8ff4fe,
comment => '# CJK(9ED1)' },
{ direction => BOTH,
ucs => 0xF929,
code => 0x8ff4c5,
comment => '# CJK COMPATIBILITY IDEOGRAPH-F929' },
{ direction => BOTH,
ucs => 0xF9DC,
code => 0x8ff4f2,
comment => '# CJK COMPATIBILITY IDEOGRAPH-F9DC' },
{ direction => BOTH,
ucs => 0xFA0E,
code => 0x8ff4b4,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA0E' },
{ direction => BOTH,
ucs => 0xFA0F,
code => 0x8ff4b7,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA0F' },
{ direction => BOTH,
ucs => 0xFA10,
code => 0x8ff4b8,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA10' },
{ direction => BOTH,
ucs => 0xFA11,
code => 0x8ff4bd,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA11' },
{ direction => BOTH,
ucs => 0xFA12,
code => 0x8ff4c4,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA12' },
{ direction => BOTH,
ucs => 0xFA13,
code => 0x8ff4c7,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA13' },
{ direction => BOTH,
ucs => 0xFA14,
code => 0x8ff4c8,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA14' },
{ direction => BOTH,
ucs => 0xFA15,
code => 0x8ff4ce,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA15' },
{ direction => BOTH,
ucs => 0xFA16,
code => 0x8ff4cf,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA16' },
{ direction => BOTH,
ucs => 0xFA17,
code => 0x8ff4d3,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA17' },
{ direction => BOTH,
ucs => 0xFA18,
code => 0x8ff4d5,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA18' },
{ direction => BOTH,
ucs => 0xFA19,
code => 0x8ff4d6,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA19' },
{ direction => BOTH,
ucs => 0xFA1A,
code => 0x8ff4d7,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1A' },
{ direction => BOTH,
ucs => 0xFA1B,
code => 0x8ff4d8,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1B' },
{ direction => BOTH,
ucs => 0xFA1C,
code => 0x8ff4da,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1C' },
{ direction => BOTH,
ucs => 0xFA1D,
code => 0x8ff4db,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1D' },
{ direction => BOTH,
ucs => 0xFA1E,
code => 0x8ff4de,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1E' },
{ direction => BOTH,
ucs => 0xFA1F,
code => 0x8ff4e0,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1F' },
{ direction => BOTH,
ucs => 0xFA20,
code => 0x8ff4e2,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA20' },
{ direction => BOTH,
ucs => 0xFA21,
code => 0x8ff4e3,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA21' },
{ direction => BOTH,
ucs => 0xFA22,
code => 0x8ff4e5,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA22' },
{ direction => BOTH,
ucs => 0xFA23,
code => 0x8ff4e9,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA23' },
{ direction => BOTH,
ucs => 0xFA24,
code => 0x8ff4ea,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA24' },
{ direction => BOTH,
ucs => 0xFA25,
code => 0x8ff4eb,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA25' },
{ direction => BOTH,
ucs => 0xFA26,
code => 0x8ff4ed,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA26' },
{ direction => BOTH,
ucs => 0xFA27,
code => 0x8ff4ef,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA27' },
{ direction => BOTH,
ucs => 0xFA28,
code => 0x8ff4f0,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA28' },
{ direction => BOTH,
ucs => 0xFA29,
code => 0x8ff4f3,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA29' },
{ direction => BOTH,
ucs => 0xFA2A,
code => 0x8ff4f7,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA2A' },
{ direction => BOTH,
ucs => 0xFA2B,
code => 0x8ff4f8,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA2B' },
{ direction => BOTH,
ucs => 0xFA2C,
code => 0x8ff4f9,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA2C' },
{ direction => BOTH,
ucs => 0xFA2D,
code => 0x8ff4fd,
comment => '# CJK COMPATIBILITY IDEOGRAPH-FA2D' },
{ direction => BOTH,
ucs => 0xFF07,
code => 0x8ff4a9,
comment => '# FULLWIDTH APOSTROPHE' },
{ direction => BOTH,
ucs => 0xFFE4,
code => 0x8fa2c3,
comment => '# FULLWIDTH BROKEN BAR' },
# additional conversions for EUC_JP -> UTF-8 conversion
{direction => TO_UNICODE, ucs => 0x2116, code => 0x8ff4ac, comment => '# NUMERO SIGN'},
{direction => TO_UNICODE, ucs => 0x2121, code => 0x8ff4ad, comment => '# TELEPHONE SIGN'},
{direction => TO_UNICODE, ucs => 0x3231, code => 0x8ff4ab, comment => '# PARENTHESIZED IDEOGRAPH STOCK'}
);
# additional conversions for EUC_JP -> UTF-8 conversion
{ direction => TO_UNICODE,
ucs => 0x2116,
code => 0x8ff4ac,
comment => '# NUMERO SIGN' },
{ direction => TO_UNICODE,
ucs => 0x2121,
code => 0x8ff4ad,
comment => '# TELEPHONE SIGN' },
{ direction => TO_UNICODE,
ucs => 0x3231,
code => 0x8ff4ab,
comment => '# PARENTHESIZED IDEOGRAPH STOCK' });
print_conversion_tables($this_script, "EUC_JP", \@mapping);
@ -215,6 +484,7 @@ sub sjis2jis
if ($pos >= 114 * 0x5e && $pos <= 115 * 0x5e + 0x1b)
{
# This region (115-ku) is out of range of JIS code but for
# convenient to generate code in EUC CODESET 3, move this to
# seemingly duplicate region (83-84-ku).

View File

@ -31,10 +31,24 @@ foreach my $i (@$mapping)
}
# Some extra characters that are not in KSX1001.TXT
push @$mapping,(
{direction => BOTH, ucs => 0x20AC, code => 0xa2e6, comment => '# EURO SIGN', f => $this_script, l => __LINE__},
{direction => BOTH, ucs => 0x00AE, code => 0xa2e7, comment => '# REGISTERED SIGN', f => $this_script, l => __LINE__ },
{direction => BOTH, ucs => 0x327E, code => 0xa2e8, comment => '# CIRCLED HANGUL IEUNG U', f => $this_script, l => __LINE__ }
);
push @$mapping,
( { direction => BOTH,
ucs => 0x20AC,
code => 0xa2e6,
comment => '# EURO SIGN',
f => $this_script,
l => __LINE__ },
{ direction => BOTH,
ucs => 0x00AE,
code => 0xa2e7,
comment => '# REGISTERED SIGN',
f => $this_script,
l => __LINE__ },
{ direction => BOTH,
ucs => 0x327E,
code => 0xa2e8,
comment => '# CIRCLED HANGUL IEUNG U',
f => $this_script,
l => __LINE__ });
print_conversion_tables($this_script, "EUC_KR", $mapping);

View File

@ -28,8 +28,8 @@ my @extras;
foreach my $i (@$mapping)
{
my $ucs = $i->{ucs};
my $code = $i->{code};
my $ucs = $i->{ucs};
my $code = $i->{code};
my $origcode = $i->{code};
my $plane = ($code & 0x1f0000) >> 16;
@ -52,14 +52,13 @@ foreach my $i (@$mapping)
# Some codes are mapped twice in the EUC_TW to UTF-8 table.
if ($origcode >= 0x12121 && $origcode <= 0x20000)
{
push @extras, {
ucs => $i->{ucs},
code => ($i->{code} + 0x8ea10000),
rest => $i->{rest},
push @extras,
{ ucs => $i->{ucs},
code => ($i->{code} + 0x8ea10000),
rest => $i->{rest},
direction => TO_UNICODE,
f => $i->{f},
l => $i->{l}
};
f => $i->{f},
l => $i->{l} };
}
}

View File

@ -35,13 +35,12 @@ while (<$in>)
my $code = hex($c);
if ($code >= 0x80 && $ucs >= 0x0080)
{
push @mapping, {
ucs => $ucs,
code => $code,
push @mapping,
{ ucs => $ucs,
code => $code,
direction => BOTH,
f => $in_file,
l => $.
};
f => $in_file,
l => $. };
}
}
close($in);

View File

@ -25,10 +25,24 @@ my $this_script = $0;
my $mapping = &read_source("JOHAB.TXT");
# Some extra characters that are not in JOHAB.TXT
push @$mapping, (
{direction => BOTH, ucs => 0x20AC, code => 0xd9e6, comment => '# EURO SIGN', f => $this_script, l => __LINE__ },
{direction => BOTH, ucs => 0x00AE, code => 0xd9e7, comment => '# REGISTERED SIGN', f => $this_script, l => __LINE__ },
{direction => BOTH, ucs => 0x327E, code => 0xd9e8, comment => '# CIRCLED HANGUL IEUNG U', f => $this_script, l => __LINE__ }
);
push @$mapping,
( { direction => BOTH,
ucs => 0x20AC,
code => 0xd9e6,
comment => '# EURO SIGN',
f => $this_script,
l => __LINE__ },
{ direction => BOTH,
ucs => 0x00AE,
code => 0xd9e7,
comment => '# REGISTERED SIGN',
f => $this_script,
l => __LINE__ },
{ direction => BOTH,
ucs => 0x327E,
code => 0xd9e8,
comment => '# CIRCLED HANGUL IEUNG U',
f => $this_script,
l => __LINE__ });
print_conversion_tables($this_script, "JOHAB", $mapping);

View File

@ -24,6 +24,7 @@ while (my $line = <$in>)
{
if ($line =~ /^0x(.*)[ \t]*U\+(.*)\+(.*)[ \t]*#(.*)$/)
{
# combined characters
my ($c, $u1, $u2) = ($1, $2, $3);
my $rest = "U+" . $u1 . "+" . $u2 . $4;
@ -31,18 +32,18 @@ while (my $line = <$in>)
my $ucs1 = hex($u1);
my $ucs2 = hex($u2);
push @mapping, {
code => $code,
ucs => $ucs1,
push @mapping,
{ code => $code,
ucs => $ucs1,
ucs_second => $ucs2,
comment => $rest,
direction => BOTH,
f => $in_file,
l => $.
};
comment => $rest,
direction => BOTH,
f => $in_file,
l => $. };
}
elsif ($line =~ /^0x(.*)[ \t]*U\+(.*)[ \t]*#(.*)$/)
{
# non-combined characters
my ($c, $u, $rest) = ($1, $2, "U+" . $2 . $3);
my $ucs = hex($u);
@ -66,14 +67,13 @@ while (my $line = <$in>)
$direction = BOTH;
}
push @mapping, {
code => $code,
ucs => $ucs,
comment => $rest,
push @mapping,
{ code => $code,
ucs => $ucs,
comment => $rest,
direction => $direction,
f => $in_file,
l => $.
};
f => $in_file,
l => $. };
}
}
close($in);

View File

@ -18,33 +18,71 @@ my $this_script = $0;
my $mapping = read_source("CP932.TXT");
# Drop these SJIS codes from the source for UTF8=>SJIS conversion
my @reject_sjis =(
0xed40..0xeefc, 0x8754..0x875d, 0x878a, 0x8782,
0x8784, 0xfa5b, 0xfa54, 0x8790..0x8792, 0x8795..0x8797,
0x879a..0x879c
);
my @reject_sjis = (
0xed40 .. 0xeefc, 0x8754 .. 0x875d, 0x878a, 0x8782,
0x8784, 0xfa5b, 0xfa54, 0x8790 .. 0x8792,
0x8795 .. 0x8797, 0x879a .. 0x879c);
foreach my $i (@$mapping)
{
my $code = $i->{code};
my $ucs = $i->{ucs};
my $ucs = $i->{ucs};
if (grep {$code == $_} @reject_sjis)
if (grep { $code == $_ } @reject_sjis)
{
$i->{direction} = TO_UNICODE;
}
}
# Add these UTF8->SJIS pairs to the table.
push @$mapping, (
{direction => FROM_UNICODE, ucs => 0x00a2, code => 0x8191, comment => '# CENT SIGN', f => $this_script, l => __LINE__ },
{direction => FROM_UNICODE, ucs => 0x00a3, code => 0x8192, comment => '# POUND SIGN', f => $this_script, l => __LINE__ },
{direction => FROM_UNICODE, ucs => 0x00a5, code => 0x5c, comment => '# YEN SIGN', f => $this_script, l => __LINE__ },
{direction => FROM_UNICODE, ucs => 0x00ac, code => 0x81ca, comment => '# NOT SIGN', f => $this_script, l => __LINE__ },
{direction => FROM_UNICODE, ucs => 0x2016, code => 0x8161, comment => '# DOUBLE VERTICAL LINE', f => $this_script, l => __LINE__ },
{direction => FROM_UNICODE, ucs => 0x203e, code => 0x7e, comment => '# OVERLINE', f => $this_script, l => __LINE__ },
{direction => FROM_UNICODE, ucs => 0x2212, code => 0x817c, comment => '# MINUS SIGN', f => $this_script, l => __LINE__ },
{direction => FROM_UNICODE, ucs => 0x301c, code => 0x8160, comment => '# WAVE DASH', f => $this_script, l => __LINE__ }
);
push @$mapping,
( { direction => FROM_UNICODE,
ucs => 0x00a2,
code => 0x8191,
comment => '# CENT SIGN',
f => $this_script,
l => __LINE__ },
{ direction => FROM_UNICODE,
ucs => 0x00a3,
code => 0x8192,
comment => '# POUND SIGN',
f => $this_script,
l => __LINE__ },
{ direction => FROM_UNICODE,
ucs => 0x00a5,
code => 0x5c,
comment => '# YEN SIGN',
f => $this_script,
l => __LINE__ },
{ direction => FROM_UNICODE,
ucs => 0x00ac,
code => 0x81ca,
comment => '# NOT SIGN',
f => $this_script,
l => __LINE__ },
{ direction => FROM_UNICODE,
ucs => 0x2016,
code => 0x8161,
comment => '# DOUBLE VERTICAL LINE',
f => $this_script,
l => __LINE__ },
{ direction => FROM_UNICODE,
ucs => 0x203e,
code => 0x7e,
comment => '# OVERLINE',
f => $this_script,
l => __LINE__ },
{ direction => FROM_UNICODE,
ucs => 0x2212,
code => 0x817c,
comment => '# MINUS SIGN',
f => $this_script,
l => __LINE__ },
{ direction => FROM_UNICODE,
ucs => 0x301c,
code => 0x8160,
comment => '# WAVE DASH',
f => $this_script,
l => __LINE__ });
print_conversion_tables($this_script, "SJIS", $mapping);

View File

@ -38,18 +38,23 @@ while (<$in>)
if ($code >= 0x80 && $ucs >= 0x0080)
{
push @mapping, {
ucs => $ucs,
code => $code,
push @mapping,
{ ucs => $ucs,
code => $code,
direction => BOTH,
f => $in_file,
l => $.
};
f => $in_file,
l => $. };
}
}
close($in);
# One extra character that's not in the source file.
push @mapping, { direction => BOTH, code => 0xa2e8, ucs => 0x327e, comment => 'CIRCLED HANGUL IEUNG U', f => $this_script, l => __LINE__ };
push @mapping,
{ direction => BOTH,
code => 0xa2e8,
ucs => 0x327e,
comment => 'CIRCLED HANGUL IEUNG U',
f => $this_script,
l => __LINE__ };
print_conversion_tables($this_script, "UHC", \@mapping);

View File

@ -9,15 +9,15 @@ use strict;
use Exporter 'import';
our @EXPORT = qw( NONE TO_UNICODE FROM_UNICODE BOTH read_source print_conversion_tables);
our @EXPORT =
qw( NONE TO_UNICODE FROM_UNICODE BOTH read_source print_conversion_tables);
# Constants used in the 'direction' field of the character maps
use constant {
NONE => 0,
TO_UNICODE => 1,
FROM_UNICODE => 2,
BOTH => 3
};
BOTH => 3 };
#######################################################################
# read_source - common routine to read source file
@ -36,7 +36,7 @@ sub read_source
next if (/^#/);
chop;
next if (/^$/); # Ignore empty lines
next if (/^$/); # Ignore empty lines
next if (/^0x([0-9A-F]+)\s+(#.*)$/);
@ -49,13 +49,13 @@ sub read_source
print STDERR "READ ERROR at line $. in $fname: $_\n";
exit;
}
my $out = {code => hex($1),
ucs => hex($2),
comment => $4,
direction => BOTH,
f => $fname,
l => $.
};
my $out = {
code => hex($1),
ucs => hex($2),
comment => $4,
direction => BOTH,
f => $fname,
l => $. };
# Ignore pure ASCII mappings. PostgreSQL character conversion code
# never even passes these to the conversion code.
@ -92,8 +92,10 @@ sub print_conversion_tables
{
my ($this_script, $csname, $charset) = @_;
print_conversion_tables_direction($this_script, $csname, FROM_UNICODE, $charset);
print_conversion_tables_direction($this_script, $csname, TO_UNICODE, $charset);
print_conversion_tables_direction($this_script, $csname, FROM_UNICODE,
$charset);
print_conversion_tables_direction($this_script, $csname, TO_UNICODE,
$charset);
}
#############################################################################
@ -117,14 +119,14 @@ sub print_conversion_tables_direction
my $tblname;
if ($direction == TO_UNICODE)
{
$fname = lc("${csname}_to_utf8.map");
$fname = lc("${csname}_to_utf8.map");
$tblname = lc("${csname}_to_unicode_tree");
print "- Writing ${csname}=>UTF8 conversion table: $fname\n";
}
else
{
$fname = lc("utf8_to_${csname}.map");
$fname = lc("utf8_to_${csname}.map");
$tblname = lc("${csname}_from_unicode_tree");
print "- Writing UTF8=>${csname} conversion table: $fname\n";
@ -135,24 +137,22 @@ sub print_conversion_tables_direction
print $out "/* src/backend/utils/mb/Unicode/$fname */\n";
print $out "/* This file is generated by $this_script */\n\n";
# Collect regular, non-combined, mappings, and create the radix tree from them.
# Collect regular, non-combined, mappings, and create the radix tree from them.
my $charmap = &make_charmap($out, $charset, $direction, 0);
print_radix_table($out, $tblname, $charmap);
# Collect combined characters, and create combined character table (if any)
# Collect combined characters, and create combined character table (if any)
my $charmap_combined = &make_charmap_combined($charset, $direction);
if (scalar @{$charmap_combined} > 0)
{
if ($direction == TO_UNICODE)
{
print_to_utf8_combined_map($out, $csname,
$charmap_combined, 1);
print_to_utf8_combined_map($out, $csname, $charmap_combined, 1);
}
else
{
print_from_utf8_combined_map($out, $csname,
$charmap_combined, 1);
print_from_utf8_combined_map($out, $csname, $charmap_combined, 1);
}
}
@ -166,14 +166,16 @@ sub print_from_utf8_combined_map
my $last_comment = "";
printf $out "\n/* Combined character map */\n";
printf $out "static const pg_utf_to_local_combined ULmap${charset}_combined[ %d ] = {",
printf $out
"static const pg_utf_to_local_combined ULmap${charset}_combined[ %d ] = {",
scalar(@$table);
my $first = 1;
foreach my $i (sort {$a->{utf8} <=> $b->{utf8}} @$table)
{
foreach my $i (sort { $a->{utf8} <=> $b->{utf8} } @$table)
{
print($out ",") if (!$first);
$first = 0;
print $out "\t/* $last_comment */" if ($verbose && $last_comment ne "");
print $out "\t/* $last_comment */"
if ($verbose && $last_comment ne "");
printf $out "\n {0x%08x, 0x%08x, 0x%04x}",
$i->{utf8}, $i->{utf8_second}, $i->{code};
@ -198,15 +200,17 @@ sub print_to_utf8_combined_map
my $last_comment = "";
printf $out "\n/* Combined character map */\n";
printf $out "static const pg_local_to_utf_combined LUmap${charset}_combined[ %d ] = {",
printf $out
"static const pg_local_to_utf_combined LUmap${charset}_combined[ %d ] = {",
scalar(@$table);
my $first = 1;
foreach my $i (sort {$a->{code} <=> $b->{code}} @$table)
{
foreach my $i (sort { $a->{code} <=> $b->{code} } @$table)
{
print($out ",") if (!$first);
$first = 0;
print $out "\t/* $last_comment */" if ($verbose && $last_comment ne "");
print $out "\t/* $last_comment */"
if ($verbose && $last_comment ne "");
printf $out "\n {0x%04x, 0x%08x, 0x%08x}",
$i->{code}, $i->{utf8}, $i->{utf8_second};
@ -214,7 +218,7 @@ sub print_to_utf8_combined_map
if ($verbose >= 2)
{
$last_comment =
sprintf("%s:%d %s", $i->{f}, $i->{l}, $i->{comment});
sprintf("%s:%d %s", $i->{f}, $i->{l}, $i->{comment});
}
elsif ($verbose >= 1)
{
@ -255,25 +259,25 @@ sub print_radix_table
}
elsif ($in < 0x10000)
{
my $b1 = $in >> 8;
my $b2 = $in & 0xff;
my $b1 = $in >> 8;
my $b2 = $in & 0xff;
$b2map{$b1}{$b2} = $out;
}
elsif ($in < 0x1000000)
{
my $b1 = $in >> 16;
my $b2 = ($in >> 8) & 0xff;
my $b3 = $in & 0xff;
my $b1 = $in >> 16;
my $b2 = ($in >> 8) & 0xff;
my $b3 = $in & 0xff;
$b3map{$b1}{$b2}{$b3} = $out;
}
elsif ($in < 0x100000000)
{
my $b1 = $in >> 24;
my $b2 = ($in >> 16) & 0xff;
my $b3 = ($in >> 8) & 0xff;
my $b4 = $in & 0xff;
my $b1 = $in >> 24;
my $b2 = ($in >> 16) & 0xff;
my $b3 = ($in >> 8) & 0xff;
my $b4 = $in & 0xff;
$b4map{$b1}{$b2}{$b3}{$b4} = $out;
}
@ -309,10 +313,14 @@ sub print_radix_table
###
# Add the segments for the radix trees themselves.
push @segments, build_segments_from_tree("Single byte table", "1-byte", 1, \%b1map);
push @segments, build_segments_from_tree("Two byte table", "2-byte", 2, \%b2map);
push @segments, build_segments_from_tree("Three byte table", "3-byte", 3, \%b3map);
push @segments, build_segments_from_tree("Four byte table", "4-byte", 4, \%b4map);
push @segments,
build_segments_from_tree("Single byte table", "1-byte", 1, \%b1map);
push @segments,
build_segments_from_tree("Two byte table", "2-byte", 2, \%b2map);
push @segments,
build_segments_from_tree("Three byte table", "3-byte", 3, \%b3map);
push @segments,
build_segments_from_tree("Four byte table", "4-byte", 4, \%b4map);
###
### Find min and max index used in each level of each tree.
@ -325,23 +333,24 @@ sub print_radix_table
my %max_idx;
foreach my $seg (@segments)
{
my $this_min = $min_idx{$seg->{depth}}->{$seg->{level}};
my $this_max = $max_idx{$seg->{depth}}->{$seg->{level}};
my $this_min = $min_idx{ $seg->{depth} }->{ $seg->{level} };
my $this_max = $max_idx{ $seg->{depth} }->{ $seg->{level} };
foreach my $i (keys %{$seg->{values}})
foreach my $i (keys %{ $seg->{values} })
{
$this_min = $i if (!defined $this_min || $i < $this_min);
$this_max = $i if (!defined $this_max || $i > $this_max);
}
$min_idx{$seg->{depth}}{$seg->{level}} = $this_min;
$max_idx{$seg->{depth}}{$seg->{level}} = $this_max;
$min_idx{ $seg->{depth} }{ $seg->{level} } = $this_min;
$max_idx{ $seg->{depth} }{ $seg->{level} } = $this_max;
}
# Copy the mins and max's back to every segment, for convenience.
foreach my $seg (@segments)
{
$seg->{min_idx} = $min_idx{$seg->{depth}}{$seg->{level}};
$seg->{max_idx} = $max_idx{$seg->{depth}}{$seg->{level}};
$seg->{min_idx} = $min_idx{ $seg->{depth} }{ $seg->{level} };
$seg->{max_idx} = $max_idx{ $seg->{depth} }{ $seg->{level} };
}
###
@ -359,11 +368,10 @@ sub print_radix_table
$widest_range = $this_range if ($this_range > $widest_range);
}
unshift @segments, {
header => "Dummy map, for invalid values",
unshift @segments,
{ header => "Dummy map, for invalid values",
min_idx => 0,
max_idx => $widest_range
};
max_idx => $widest_range };
###
### Eliminate overlapping zeros
@ -378,26 +386,34 @@ sub print_radix_table
###
for (my $j = 0; $j < $#segments - 1; $j++)
{
my $seg = $segments[$j];
my $nextseg = $segments[$j + 1];
my $seg = $segments[$j];
my $nextseg = $segments[ $j + 1 ];
# Count the number of zero values at the end of this segment.
my $this_trail_zeros = 0;
for (my $i = $seg->{max_idx}; $i >= $seg->{min_idx} && !$seg->{values}->{$i}; $i--)
for (
my $i = $seg->{max_idx};
$i >= $seg->{min_idx} && !$seg->{values}->{$i};
$i--)
{
$this_trail_zeros++;
}
# Count the number of zeros at the beginning of next segment.
my $next_lead_zeros = 0;
for (my $i = $nextseg->{min_idx}; $i <= $nextseg->{max_idx} && !$nextseg->{values}->{$i}; $i++)
for (
my $i = $nextseg->{min_idx};
$i <= $nextseg->{max_idx} && !$nextseg->{values}->{$i};
$i++)
{
$next_lead_zeros++;
}
# How many zeros in common?
my $overlaid_trail_zeros =
($this_trail_zeros > $next_lead_zeros) ? $next_lead_zeros : $this_trail_zeros;
($this_trail_zeros > $next_lead_zeros)
? $next_lead_zeros
: $this_trail_zeros;
$seg->{overlaid_trail_zeros} = $overlaid_trail_zeros;
$seg->{max_idx} = $seg->{max_idx} - $overlaid_trail_zeros;
@ -419,7 +435,7 @@ sub print_radix_table
foreach my $seg (@segments)
{
$seg->{offset} = $flatoff;
$segmap{$seg->{label}} = $flatoff;
$segmap{ $seg->{label} } = $flatoff;
$flatoff += $seg->{max_idx} - $seg->{min_idx} + 1;
}
my $tblsize = $flatoff;
@ -427,9 +443,9 @@ sub print_radix_table
# Second pass: look up the offset of each label reference in the hash.
foreach my $seg (@segments)
{
while (my ($i, $val) = each %{$seg->{values}})
while (my ($i, $val) = each %{ $seg->{values} })
{
if (!($val =~ /^[0-9,.E]+$/ ))
if (!($val =~ /^[0-9,.E]+$/))
{
my $segoff = $segmap{$val};
if ($segoff)
@ -482,7 +498,7 @@ sub print_radix_table
my $max_val = 0;
foreach my $seg (@segments)
{
foreach my $val (values %{$seg->{values}})
foreach my $val (values %{ $seg->{values} })
{
$max_val = $val if ($val > $max_val);
}
@ -498,17 +514,17 @@ sub print_radix_table
if ($max_val <= 0xffff)
{
$vals_per_line = 8;
$colwidth = 4;
$colwidth = 4;
}
elsif ($max_val <= 0xffffff)
{
$vals_per_line = 4;
$colwidth = 6;
$colwidth = 6;
}
else
{
$vals_per_line = 4;
$colwidth = 8;
$colwidth = 8;
}
###
@ -529,17 +545,20 @@ sub print_radix_table
print $out " ${tblname}_table,\n";
}
printf $out "\n";
printf $out " 0x%04x, /* offset of table for 1-byte inputs */\n", $b1root;
printf $out " 0x%04x, /* offset of table for 1-byte inputs */\n",
$b1root;
printf $out " 0x%02x, /* b1_lower */\n", $b1_lower;
printf $out " 0x%02x, /* b1_upper */\n", $b1_upper;
printf $out "\n";
printf $out " 0x%04x, /* offset of table for 2-byte inputs */\n", $b2root;
printf $out " 0x%04x, /* offset of table for 2-byte inputs */\n",
$b2root;
printf $out " 0x%02x, /* b2_1_lower */\n", $b2_1_lower;
printf $out " 0x%02x, /* b2_1_upper */\n", $b2_1_upper;
printf $out " 0x%02x, /* b2_2_lower */\n", $b2_2_lower;
printf $out " 0x%02x, /* b2_2_upper */\n", $b2_2_upper;
printf $out "\n";
printf $out " 0x%04x, /* offset of table for 3-byte inputs */\n", $b3root;
printf $out " 0x%04x, /* offset of table for 3-byte inputs */\n",
$b3root;
printf $out " 0x%02x, /* b3_1_lower */\n", $b3_1_lower;
printf $out " 0x%02x, /* b3_1_upper */\n", $b3_1_upper;
printf $out " 0x%02x, /* b3_2_lower */\n", $b3_2_lower;
@ -547,7 +566,8 @@ sub print_radix_table
printf $out " 0x%02x, /* b3_3_lower */\n", $b3_3_lower;
printf $out " 0x%02x, /* b3_3_upper */\n", $b3_3_upper;
printf $out "\n";
printf $out " 0x%04x, /* offset of table for 3-byte inputs */\n", $b4root;
printf $out " 0x%04x, /* offset of table for 3-byte inputs */\n",
$b4root;
printf $out " 0x%02x, /* b4_1_lower */\n", $b4_1_lower;
printf $out " 0x%02x, /* b4_1_upper */\n", $b4_1_upper;
printf $out " 0x%02x, /* b4_2_lower */\n", $b4_2_lower;
@ -561,18 +581,21 @@ sub print_radix_table
print $out "static const $datatype ${tblname}_table[$tblsize] =\n";
print $out "{";
my $off = 0;
foreach my $seg (@segments)
{
printf $out "\n";
printf $out " /*** %s - offset 0x%05x ***/\n", $seg->{header}, $off;
printf $out "\n";
for (my $i=$seg->{min_idx}; $i <= $seg->{max_idx};)
for (my $i = $seg->{min_idx}; $i <= $seg->{max_idx};)
{
# Print the next line's worth of values.
# XXX pad to begin at a nice boundary
printf $out " /* %02x */ ", $i;
for (my $j = 0; $j < $vals_per_line && $i <= $seg->{max_idx}; $j++)
for (my $j = 0;
$j < $vals_per_line && $i <= $seg->{max_idx}; $j++)
{
my $val = $seg->{values}->{$i};
@ -588,7 +611,8 @@ sub print_radix_table
}
if ($seg->{overlaid_trail_zeros})
{
printf $out " /* $seg->{overlaid_trail_zeros} trailing zero values shared with next segment */\n";
printf $out
" /* $seg->{overlaid_trail_zeros} trailing zero values shared with next segment */\n";
}
}
@ -607,13 +631,14 @@ sub build_segments_from_tree
if (%{$map})
{
@segments = build_segments_recurse($header, $rootlabel, "", 1, $depth, $map);
@segments =
build_segments_recurse($header, $rootlabel, "", 1, $depth, $map);
# Sort the segments into "breadth-first" order. Not strictly required,
# but makes the maps nicer to read.
@segments = sort { $a->{level} cmp $b->{level} or
$a->{path} cmp $b->{path}}
@segments;
@segments =
sort { $a->{level} cmp $b->{level} or $a->{path} cmp $b->{path} }
@segments;
}
return @segments;
@ -628,14 +653,13 @@ sub build_segments_recurse
if ($level == $depth)
{
push @segments, {
header => $header . ", leaf: ${path}xx",
label => $label,
level => $level,
depth => $depth,
path => $path,
values => $map
};
push @segments,
{ header => $header . ", leaf: ${path}xx",
label => $label,
level => $level,
depth => $depth,
path => $path,
values => $map };
}
else
{
@ -646,19 +670,19 @@ sub build_segments_recurse
my $childpath = $path . sprintf("%02x", $i);
my $childlabel = "$depth-level-$level-$childpath";
push @segments, build_segments_recurse($header, $childlabel, $childpath,
$level + 1, $depth, $val);
push @segments,
build_segments_recurse($header, $childlabel, $childpath,
$level + 1, $depth, $val);
$children{$i} = $childlabel;
}
push @segments, {
header => $header . ", byte #$level: ${path}xx",
label => $label,
level => $level,
depth => $depth,
path => $path,
values => \%children
};
push @segments,
{ header => $header . ", byte #$level: ${path}xx",
label => $label,
level => $level,
depth => $depth,
path => $path,
values => \%children };
}
return @segments;
}
@ -688,29 +712,31 @@ sub make_charmap
my %charmap;
foreach my $c (@$charset)
{
# combined characters are handled elsewhere
next if (defined $c->{ucs_second});
next if ($c->{direction} != $direction && $c->{direction} != BOTH);
my ($src, $dst) =
$direction == TO_UNICODE
? ($c->{code}, ucs2utf($c->{ucs}))
: (ucs2utf($c->{ucs}), $c->{code});
$direction == TO_UNICODE
? ($c->{code}, ucs2utf($c->{ucs}))
: (ucs2utf($c->{ucs}), $c->{code});
# check for duplicate source codes
if (defined $charmap{$src})
{
printf STDERR
"Error: duplicate source code on %s:%d: 0x%04x => 0x%04x, 0x%04x\n",
$c->{f}, $c->{l}, $src, $charmap{$src}, $dst;
"Error: duplicate source code on %s:%d: 0x%04x => 0x%04x, 0x%04x\n",
$c->{f}, $c->{l}, $src, $charmap{$src}, $dst;
exit;
}
$charmap{$src} = $dst;
if ($verbose)
{
printf $out "0x%04x 0x%04x %s:%d %s\n", $src, $dst, $c->{f}, $c->{l}, $c->{comment};
printf $out "0x%04x 0x%04x %s:%d %s\n", $src, $dst, $c->{f},
$c->{l}, $c->{comment};
}
}
if ($verbose)
@ -743,11 +769,13 @@ sub make_charmap_combined
if (defined $c->{ucs_second})
{
my $entry = {utf8 => ucs2utf($c->{ucs}),
utf8_second => ucs2utf($c->{ucs_second}),
code => $c->{code},
comment => $c->{comment},
f => $c->{f}, l => $c->{l}};
my $entry = {
utf8 => ucs2utf($c->{ucs}),
utf8_second => ucs2utf($c->{ucs_second}),
code => $c->{code},
comment => $c->{comment},
f => $c->{f},
l => $c->{l} };
push @combined, $entry;
}
}

View File

@ -38,6 +38,7 @@ mkdir $datadir;
# make sure we run one successful test without a TZ setting so we test
# initdb's time zone setting code
{
# delete local only works from perl 5.12, so use the older way to do this
local (%ENV) = %ENV;
delete $ENV{TZ};

View File

@ -37,10 +37,9 @@ $node->command_fails(
[ 'pg_basebackup', '-D', "$tempdir/backup" ],
'pg_basebackup fails because of WAL configuration');
ok(! -d "$tempdir/backup", 'backup directory was cleaned up');
ok(!-d "$tempdir/backup", 'backup directory was cleaned up');
$node->command_fails(
[ 'pg_basebackup', '-D', "$tempdir/backup", '-n' ],
$node->command_fails([ 'pg_basebackup', '-D', "$tempdir/backup", '-n' ],
'failing run with no-clean option');
ok(-d "$tempdir/backup", 'backup directory was created and left behind');
@ -53,7 +52,9 @@ close $conf;
$node->restart;
# Write some files to test that they are not copied.
foreach my $filename (qw(backup_label tablespace_map postgresql.auto.conf.tmp current_logfiles.tmp))
foreach my $filename (
qw(backup_label tablespace_map postgresql.auto.conf.tmp current_logfiles.tmp)
)
{
open my $file, '>>', "$pgdata/$filename";
print $file "DONOTCOPY";
@ -71,7 +72,9 @@ is_deeply(
'no WAL files copied');
# Contents of these directories should not be copied.
foreach my $dirname (qw(pg_dynshmem pg_notify pg_replslot pg_serial pg_snapshots pg_stat_tmp pg_subtrans))
foreach my $dirname (
qw(pg_dynshmem pg_notify pg_replslot pg_serial pg_snapshots pg_stat_tmp pg_subtrans)
)
{
is_deeply(
[ sort(slurp_dir("$tempdir/backup/$dirname/")) ],
@ -80,14 +83,16 @@ foreach my $dirname (qw(pg_dynshmem pg_notify pg_replslot pg_serial pg_snapshots
}
# These files should not be copied.
foreach my $filename (qw(postgresql.auto.conf.tmp postmaster.opts postmaster.pid tablespace_map current_logfiles.tmp))
foreach my $filename (
qw(postgresql.auto.conf.tmp postmaster.opts postmaster.pid tablespace_map current_logfiles.tmp)
)
{
ok(! -f "$tempdir/backup/$filename", "$filename not copied");
ok(!-f "$tempdir/backup/$filename", "$filename not copied");
}
# Make sure existing backup_label was ignored.
isnt(slurp_file("$tempdir/backup/backup_label"), 'DONOTCOPY',
'existing backup_label not copied');
isnt(slurp_file("$tempdir/backup/backup_label"),
'DONOTCOPY', 'existing backup_label not copied');
$node->command_ok(
[ 'pg_basebackup', '-D', "$tempdir/backup2", '--waldir',
@ -124,7 +129,8 @@ $node->command_fails(
my $superlongname = "superlongname_" . ("x" x 100);
my $superlongpath = "$pgdata/$superlongname";
open my $file, '>', "$superlongpath" or die "unable to create file $superlongpath";
open my $file, '>', "$superlongpath"
or die "unable to create file $superlongpath";
close $file;
$node->command_fails(
[ 'pg_basebackup', '-D', "$tempdir/tarbackup_l1", '-Ft' ],
@ -141,9 +147,9 @@ SKIP:
$node->stop;
rename("$pgdata/pg_replslot", "$tempdir/pg_replslot")
or BAIL_OUT "could not move $pgdata/pg_replslot";
or BAIL_OUT "could not move $pgdata/pg_replslot";
symlink("$tempdir/pg_replslot", "$pgdata/pg_replslot")
or BAIL_OUT "could not symlink to $pgdata/pg_replslot";
or BAIL_OUT "could not symlink to $pgdata/pg_replslot";
$node->start;
@ -183,7 +189,8 @@ SKIP:
"tablespace symlink was updated");
closedir $dh;
ok(-d "$tempdir/backup1/pg_replslot", 'pg_replslot symlink copied as directory');
ok( -d "$tempdir/backup1/pg_replslot",
'pg_replslot symlink copied as directory');
mkdir "$tempdir/tbl=spc2";
$node->safe_psql('postgres', "DROP TABLE test1;");
@ -222,7 +229,8 @@ like(
qr/^primary_conninfo = '.*port=$port.*'\n/m,
'recovery.conf sets primary_conninfo');
$node->command_ok([ 'pg_basebackup', '-D', "$tempdir/backupxd" ],
$node->command_ok(
[ 'pg_basebackup', '-D', "$tempdir/backupxd" ],
'pg_basebackup runs in default xlog mode');
ok(grep(/^[0-9A-F]{24}$/, slurp_dir("$tempdir/backupxd/pg_wal")),
'WAL files copied');
@ -242,7 +250,9 @@ $node->command_ok(
'pg_basebackup -X stream runs in tar mode');
ok(-f "$tempdir/backupxst/pg_wal.tar", "tar file was created");
$node->command_ok(
[ 'pg_basebackup', '-D', "$tempdir/backupnoslot", '-X', 'stream', '--no-slot' ],
[ 'pg_basebackup', '-D',
"$tempdir/backupnoslot", '-X',
'stream', '--no-slot' ],
'pg_basebackup -X stream runs with --no-slot');
$node->command_fails(

View File

@ -12,7 +12,8 @@ my $node = get_new_node('main');
# Initialize node without replication settings
$node->init(allows_streaming => 1, has_archiving => 1);
$node->append_conf('postgresql.conf', q{
$node->append_conf(
'postgresql.conf', q{
wal_level = 'logical'
max_replication_slots = 4
max_wal_senders = 4
@ -22,25 +23,34 @@ log_error_verbosity = verbose
$node->dump_info;
$node->start;
$node->command_fails(['pg_recvlogical'],
'pg_recvlogical needs a slot name');
$node->command_fails(['pg_recvlogical', '-S', 'test'],
$node->command_fails(['pg_recvlogical'], 'pg_recvlogical needs a slot name');
$node->command_fails([ 'pg_recvlogical', '-S', 'test' ],
'pg_recvlogical needs a database');
$node->command_fails(['pg_recvlogical', '-S', 'test', '-d', 'postgres'],
$node->command_fails([ 'pg_recvlogical', '-S', 'test', '-d', 'postgres' ],
'pg_recvlogical needs an action');
$node->command_fails(['pg_recvlogical', '-S', 'test', '-d', $node->connstr('postgres'), '--start'],
$node->command_fails(
[ 'pg_recvlogical', '-S',
'test', '-d',
$node->connstr('postgres'), '--start' ],
'no destination file');
$node->command_ok(['pg_recvlogical', '-S', 'test', '-d', $node->connstr('postgres'), '--create-slot'],
$node->command_ok(
[ 'pg_recvlogical', '-S',
'test', '-d',
$node->connstr('postgres'), '--create-slot' ],
'slot created');
my $slot = $node->slot('test');
isnt($slot->{'restart_lsn'}, '', 'restart lsn is defined for new slot');
$node->psql('postgres', 'CREATE TABLE test_table(x integer)');
$node->psql('postgres', 'INSERT INTO test_table(x) SELECT y FROM generate_series(1, 10) a(y);');
my $nextlsn = $node->safe_psql('postgres', 'SELECT pg_current_wal_insert_lsn()');
$node->psql('postgres',
'INSERT INTO test_table(x) SELECT y FROM generate_series(1, 10) a(y);');
my $nextlsn =
$node->safe_psql('postgres', 'SELECT pg_current_wal_insert_lsn()');
chomp($nextlsn);
$node->command_ok(['pg_recvlogical', '-S', 'test', '-d', $node->connstr('postgres'), '--start', '--endpos', "$nextlsn", '--no-loop', '-f', '-'],
$node->command_ok(
[ 'pg_recvlogical', '-S', 'test', '-d', $node->connstr('postgres'),
'--start', '--endpos', "$nextlsn", '--no-loop', '-f', '-' ],
'replayed a transaction');

View File

@ -22,7 +22,7 @@ command_ok([ $ENV{PG_REGRESS}, '--config-auth', "$tempdir/data" ],
'configure authentication');
open my $conf, '>>', "$tempdir/data/postgresql.conf";
print $conf "fsync = off\n";
if (! $windows_os)
if (!$windows_os)
{
print $conf "listen_addresses = ''\n";
print $conf "unix_socket_directories = '$tempdir_short'\n";
@ -32,8 +32,7 @@ else
print $conf "listen_addresses = '127.0.0.1'\n";
}
close $conf;
command_ok([ 'pg_ctl', 'start', '-D', "$tempdir/data" ],
'pg_ctl start');
command_ok([ 'pg_ctl', 'start', '-D', "$tempdir/data" ], 'pg_ctl start');
# sleep here is because Windows builds can't check postmaster.pid exactly,
# so they may mistake a pre-existing postmaster.pid for one created by the
@ -42,12 +41,12 @@ command_ok([ 'pg_ctl', 'start', '-D', "$tempdir/data" ],
sleep 3 if ($windows_os);
command_fails([ 'pg_ctl', 'start', '-D', "$tempdir/data" ],
'second pg_ctl start fails');
command_ok([ 'pg_ctl', 'stop', '-D', "$tempdir/data" ],
'pg_ctl stop');
command_ok([ 'pg_ctl', 'stop', '-D', "$tempdir/data" ], 'pg_ctl stop');
command_fails([ 'pg_ctl', 'stop', '-D', "$tempdir/data" ],
'second pg_ctl stop fails');
command_ok([ 'pg_ctl', 'restart', '-D', "$tempdir/data" ],
command_ok(
[ 'pg_ctl', 'restart', '-D', "$tempdir/data" ],
'pg_ctl restart with server not running');
command_ok([ 'pg_ctl', 'restart', '-D', "$tempdir/data" ],
'pg_ctl restart with server running');

View File

@ -7,49 +7,55 @@ use Test::More tests => 12;
my $tempdir = TestLib::tempdir;
command_fails_like([ 'pg_ctl', '-D', "$tempdir/nonexistent", 'promote' ],
qr/directory .* does not exist/,
'pg_ctl promote with nonexistent directory');
command_fails_like(
[ 'pg_ctl', '-D', "$tempdir/nonexistent", 'promote' ],
qr/directory .* does not exist/,
'pg_ctl promote with nonexistent directory');
my $node_primary = get_new_node('primary');
$node_primary->init(allows_streaming => 1);
command_fails_like([ 'pg_ctl', '-D', $node_primary->data_dir, 'promote' ],
qr/PID file .* does not exist/,
'pg_ctl promote of not running instance fails');
command_fails_like(
[ 'pg_ctl', '-D', $node_primary->data_dir, 'promote' ],
qr/PID file .* does not exist/,
'pg_ctl promote of not running instance fails');
$node_primary->start;
command_fails_like([ 'pg_ctl', '-D', $node_primary->data_dir, 'promote' ],
qr/not in standby mode/,
'pg_ctl promote of primary instance fails');
command_fails_like(
[ 'pg_ctl', '-D', $node_primary->data_dir, 'promote' ],
qr/not in standby mode/,
'pg_ctl promote of primary instance fails');
my $node_standby = get_new_node('standby');
$node_primary->backup('my_backup');
$node_standby->init_from_backup($node_primary, 'my_backup', has_streaming => 1);
$node_standby->init_from_backup($node_primary, 'my_backup',
has_streaming => 1);
$node_standby->start;
is($node_standby->safe_psql('postgres', 'SELECT pg_is_in_recovery()'),
't', 'standby is in recovery');
't', 'standby is in recovery');
command_ok([ 'pg_ctl', '-D', $node_standby->data_dir, '-W', 'promote' ],
'pg_ctl -W promote of standby runs');
'pg_ctl -W promote of standby runs');
ok($node_standby->poll_query_until('postgres', 'SELECT NOT pg_is_in_recovery()'),
'promoted standby is not in recovery');
ok( $node_standby->poll_query_until(
'postgres', 'SELECT NOT pg_is_in_recovery()'),
'promoted standby is not in recovery');
# same again with default wait option
$node_standby = get_new_node('standby2');
$node_standby->init_from_backup($node_primary, 'my_backup', has_streaming => 1);
$node_standby->init_from_backup($node_primary, 'my_backup',
has_streaming => 1);
$node_standby->start;
is($node_standby->safe_psql('postgres', 'SELECT pg_is_in_recovery()'),
't', 'standby is in recovery');
't', 'standby is in recovery');
command_ok([ 'pg_ctl', '-D', $node_standby->data_dir, 'promote' ],
'pg_ctl promote of standby runs');
'pg_ctl promote of standby runs');
# no wait here
is($node_standby->safe_psql('postgres', 'SELECT pg_is_in_recovery()'),
'f', 'promoted standby is not in recovery');
'f', 'promoted standby is not in recovery');

File diff suppressed because it is too large Load Diff

View File

@ -91,8 +91,8 @@ $node->safe_psql($dbname1, 'CREATE TABLE t0()');
# XXX no printed message when this fails, just SIGPIPE termination
$node->command_ok(
[ 'pg_dump', '-Fd', '--no-sync', '-j2', '-f', $dirfmt,
'-U', $dbname1, $node->connstr($dbname1) ],
[ 'pg_dump', '-Fd', '--no-sync', '-j2', '-f', $dirfmt, '-U', $dbname1,
$node->connstr($dbname1) ],
'parallel dump');
# recreate $dbname1 for restore test

View File

@ -30,4 +30,4 @@ $node->issues_sql_like(
'cluster specific table');
$node->command_ok([qw(clusterdb --echo --verbose dbname=template1)],
'clusterdb with connection string');
'clusterdb with connection string');

View File

@ -44,8 +44,10 @@ $node->issues_sql_like(
'reindex with verbose output');
$node->command_ok([qw(reindexdb --echo --table=pg_am dbname=template1)],
'reindexdb table with connection string');
$node->command_ok([qw(reindexdb --echo dbname=template1)],
'reindexdb database with connection string');
$node->command_ok([qw(reindexdb --echo --system dbname=template1)],
'reindexdb system with connection string');
'reindexdb table with connection string');
$node->command_ok(
[qw(reindexdb --echo dbname=template1)],
'reindexdb database with connection string');
$node->command_ok(
[qw(reindexdb --echo --system dbname=template1)],
'reindexdb system with connection string');

View File

@ -34,4 +34,4 @@ $node->issues_sql_like(
qr/statement: ANALYZE;/,
'vacuumdb -Z');
$node->command_ok([qw(vacuumdb -Z --table=pg_am dbname=template1)],
'vacuumdb with connection string');
'vacuumdb with connection string');

View File

@ -11,28 +11,31 @@ use Test::More tests => 3;
# interpret everything as UTF8. We're going to use byte sequences
# that aren't valid UTF-8 strings, so that would fail. Use LATIN1,
# which accepts any byte and has a conversion from each byte to UTF-8.
$ENV{LC_ALL} = 'C';
$ENV{LC_ALL} = 'C';
$ENV{PGCLIENTENCODING} = 'LATIN1';
# Create database names covering the range of LATIN1 characters and
# run the utilities' --all options over them.
my $dbname1 = generate_ascii_string(1, 63); # contains '='
my $dbname2 = generate_ascii_string(67, 129); # skip 64-66 to keep length to 62
my $dbname1 = generate_ascii_string(1, 63); # contains '='
my $dbname2 =
generate_ascii_string(67, 129); # skip 64-66 to keep length to 62
my $dbname3 = generate_ascii_string(130, 192);
my $dbname4 = generate_ascii_string(193, 255);
my $node = get_new_node('main');
$node->init(extra => ['--locale=C', '--encoding=LATIN1']);
$node->init(extra => [ '--locale=C', '--encoding=LATIN1' ]);
$node->start;
foreach my $dbname ($dbname1, $dbname2, $dbname3, $dbname4, 'CamelCase')
{
$node->run_log(['createdb', $dbname]);
$node->run_log([ 'createdb', $dbname ]);
}
$node->command_ok([qw(vacuumdb --all --echo --analyze-only)],
'vacuumdb --all with unusual database names');
$node->command_ok(
[qw(vacuumdb --all --echo --analyze-only)],
'vacuumdb --all with unusual database names');
$node->command_ok([qw(reindexdb --all --echo)],
'reindexdb --all with unusual database names');
$node->command_ok([qw(clusterdb --all --echo --verbose)],
'clusterdb --all with unusual database names');
'reindexdb --all with unusual database names');
$node->command_ok(
[qw(clusterdb --all --echo --verbose)],
'clusterdb --all with unusual database names');

View File

@ -32,9 +32,11 @@ close $FH;
# and character decomposition mapping
my @characters = ();
my %character_hash = ();
open($FH, '<', "UnicodeData.txt") or die "Could not open UnicodeData.txt: $!.";
open($FH, '<', "UnicodeData.txt")
or die "Could not open UnicodeData.txt: $!.";
while (my $line = <$FH>)
{
# Split the line wanted and get the fields needed:
# - Unicode code value
# - Canonical Combining Class
@ -141,6 +143,7 @@ foreach my $char (@characters)
if ($decomp_size == 2)
{
# Should this be used for recomposition?
if ($compat)
{
@ -173,6 +176,7 @@ foreach my $char (@characters)
}
elsif ($decomp_size == 1 && length($first_decomp) <= 4)
{
# The decomposition consists of a single codepoint, and it fits
# in a uint16, so we can store it "inline" in the main table.
$flags .= " | DECOMP_INLINE";
@ -201,6 +205,7 @@ foreach my $char (@characters)
print $OUTPUT "," unless ($code eq $last_code);
if ($comment ne "")
{
# If the line is wide already, indent the comment with one tab,
# otherwise with two. This is to make the output match the way
# pgindent would mangle it. (This is quite hacky. To do this

View File

@ -35,7 +35,8 @@ while (<$regress_in_fh>)
}
# restore STDOUT/ERR so we can print the outcome to the user
open(STDERR, ">&", $olderr_fh) or die; # can't complain as STDERR is still duped
open(STDERR, ">&", $olderr_fh)
or die; # can't complain as STDERR is still duped
open(STDOUT, ">&", $oldout_fh) or die "can't restore STDOUT: $!";
# just in case

View File

@ -52,7 +52,8 @@ sub ::encode_array_constructor
{
package PostgreSQL::InServer; ## no critic (RequireFilenameMatchesPackage);
package PostgreSQL::InServer
; ## no critic (RequireFilenameMatchesPackage);
use strict;
use warnings;

View File

@ -1,6 +1,7 @@
# src/pl/plperl/plc_trusted.pl
package PostgreSQL::InServer::safe; ## no critic (RequireFilenameMatchesPackage);
package PostgreSQL::InServer::safe
; ## no critic (RequireFilenameMatchesPackage);
# Load widely useful pragmas into plperl to make them available.
#

View File

@ -11,7 +11,7 @@ use warnings;
use PostgresNode;
use TestLib;
use Test::More;
if ($windows_os)
if ($windows_os)
{
plan skip_all => "authentication tests cannot run on Windows";
}
@ -25,7 +25,7 @@ else
# and then execute a reload to refresh it.
sub reset_pg_hba
{
my $node = shift;
my $node = shift;
my $hba_method = shift;
unlink($node->data_dir . '/pg_hba.conf');
@ -36,17 +36,18 @@ sub reset_pg_hba
# Test access for a single role, useful to wrap all tests into one.
sub test_role
{
my $node = shift;
my $role = shift;
my $method = shift;
my $expected_res = shift;
my $node = shift;
my $role = shift;
my $method = shift;
my $expected_res = shift;
my $status_string = 'failed';
$status_string = 'success' if ($expected_res eq 0);
my $res = $node->psql('postgres', 'SELECT 1', extra_params => ['-U', $role]);
my $res =
$node->psql('postgres', 'SELECT 1', extra_params => [ '-U', $role ]);
is($res, $expected_res,
"authentication $status_string for method $method, role $role");
"authentication $status_string for method $method, role $role");
}
# Initialize master node
@ -56,27 +57,30 @@ $node->start;
# Create 3 roles with different password methods for each one. The same
# password is used for all of them.
$node->safe_psql('postgres', "SET password_encryption='scram-sha-256'; CREATE ROLE scram_role LOGIN PASSWORD 'pass';");
$node->safe_psql('postgres', "SET password_encryption='md5'; CREATE ROLE md5_role LOGIN PASSWORD 'pass';");
$node->safe_psql('postgres',
"SET password_encryption='scram-sha-256'; CREATE ROLE scram_role LOGIN PASSWORD 'pass';"
);
$node->safe_psql('postgres',
"SET password_encryption='md5'; CREATE ROLE md5_role LOGIN PASSWORD 'pass';");
$ENV{"PGPASSWORD"} = 'pass';
# For "trust" method, all users should be able to connect.
reset_pg_hba($node, 'trust');
test_role($node, 'scram_role', 'trust', 0);
test_role($node, 'md5_role', 'trust', 0);
test_role($node, 'md5_role', 'trust', 0);
# For plain "password" method, all users should also be able to connect.
reset_pg_hba($node, 'password');
test_role($node, 'scram_role', 'password', 0);
test_role($node, 'md5_role', 'password', 0);
test_role($node, 'md5_role', 'password', 0);
# For "scram-sha-256" method, user "scram_role" should be able to connect.
reset_pg_hba($node, 'scram-sha-256');
test_role($node, 'scram_role', 'scram-sha-256', 0);
test_role($node, 'md5_role', 'scram-sha-256', 2);
test_role($node, 'md5_role', 'scram-sha-256', 2);
# For "md5" method, all users should be able to connect (SCRAM
# authentication will be performed for the user with a scram verifier.)
reset_pg_hba($node, 'md5');
test_role($node, 'scram_role', 'md5', 0);
test_role($node, 'md5_role', 'md5', 0);
test_role($node, 'md5_role', 'md5', 0);

View File

@ -8,7 +8,7 @@ use warnings;
use PostgresNode;
use TestLib;
use Test::More;
if ($windows_os)
if ($windows_os)
{
plan skip_all => "authentication tests cannot run on Windows";
}
@ -21,7 +21,7 @@ else
# and then execute a reload to refresh it.
sub reset_pg_hba
{
my $node = shift;
my $node = shift;
my $hba_method = shift;
unlink($node->data_dir . '/pg_hba.conf');
@ -32,24 +32,26 @@ sub reset_pg_hba
# Test access for a single role, useful to wrap all tests into one.
sub test_login
{
my $node = shift;
my $role = shift;
my $password = shift;
my $expected_res = shift;
my $node = shift;
my $role = shift;
my $password = shift;
my $expected_res = shift;
my $status_string = 'failed';
$status_string = 'success' if ($expected_res eq 0);
$ENV{"PGPASSWORD"} = $password;
my $res = $node->psql('postgres', 'SELECT 1', extra_params => ['-U', $role]);
my $res =
$node->psql('postgres', 'SELECT 1', extra_params => [ '-U', $role ]);
is($res, $expected_res,
"authentication $status_string for role $role with password $password");
"authentication $status_string for role $role with password $password"
);
}
# Initialize master node. Force UTF-8 encoding, so that we can use non-ASCII
# characters in the passwords below.
my $node = get_new_node('master');
$node->init(extra => ['--locale=C', '--encoding=UTF8']);
$node->init(extra => [ '--locale=C', '--encoding=UTF8' ]);
$node->start;
# These tests are based on the example strings from RFC4013.txt,
@ -66,8 +68,9 @@ $node->start;
# 7 <U+0627><U+0031> Error - bidirectional check
# Create test roles.
$node->safe_psql('postgres',
"SET password_encryption='scram-sha-256';
$node->safe_psql(
'postgres',
"SET password_encryption='scram-sha-256';
SET client_encoding='utf8';
CREATE ROLE saslpreptest1_role LOGIN PASSWORD 'IX';
CREATE ROLE saslpreptest4a_role LOGIN PASSWORD 'a';
@ -80,23 +83,23 @@ CREATE ROLE saslpreptest7_role LOGIN PASSWORD E'foo\\u0627\\u0031bar';
reset_pg_hba($node, 'scram-sha-256');
# Check that #1 and #5 are treated the same as just 'IX'
test_login($node, 'saslpreptest1_role', "I\xc2\xadX", 0);
test_login($node, 'saslpreptest1_role', "I\xc2\xadX", 0);
test_login($node, 'saslpreptest1_role', "\xe2\x85\xa8", 0);
# but different from lower case 'ix'
test_login($node, 'saslpreptest1_role', "ix", 2);
# Check #4
test_login($node, 'saslpreptest4a_role', "a", 0);
test_login($node, 'saslpreptest4a_role', "a", 0);
test_login($node, 'saslpreptest4a_role', "\xc2\xaa", 0);
test_login($node, 'saslpreptest4b_role', "a", 0);
test_login($node, 'saslpreptest4b_role', "a", 0);
test_login($node, 'saslpreptest4b_role', "\xc2\xaa", 0);
# Check #6 and #7 - In PostgreSQL, contrary to the spec, if the password
# contains prohibited characters, we use it as is, without normalization.
test_login($node, 'saslpreptest6_role', "foo\x07bar", 0);
test_login($node, 'saslpreptest6_role', "foobar", 2);
test_login($node, 'saslpreptest6_role', "foobar", 2);
test_login($node, 'saslpreptest7_role', "foo\xd8\xa71bar", 0);
test_login($node, 'saslpreptest7_role', "foo1\xd8\xa7bar", 2);
test_login($node, 'saslpreptest7_role', "foobar", 2);
test_login($node, 'saslpreptest7_role', "foobar", 2);

View File

@ -44,8 +44,7 @@ is($master_ts, $standby_ts, "standby gives same value as master");
$master->append_conf('postgresql.conf', 'track_commit_timestamp = off');
$master->restart;
$master->safe_psql('postgres', 'checkpoint');
$master_lsn =
$master->safe_psql('postgres', 'select pg_current_wal_lsn()');
$master_lsn = $master->safe_psql('postgres', 'select pg_current_wal_lsn()');
$standby->poll_query_until('postgres',
qq{SELECT '$master_lsn'::pg_lsn <= pg_last_wal_replay_lsn()})
or die "slave never caught up";

View File

@ -22,12 +22,12 @@ like(
($ret, $stdout, $stderr) =
$node_master->psql('postgres', qq[SELECT pg_xact_commit_timestamp('1');]);
is($ret, 0, 'getting ts of BootstrapTransactionId succeeds');
is($ret, 0, 'getting ts of BootstrapTransactionId succeeds');
is($stdout, '', 'timestamp of BootstrapTransactionId is null');
($ret, $stdout, $stderr) =
$node_master->psql('postgres', qq[SELECT pg_xact_commit_timestamp('2');]);
is($ret, 0, 'getting ts of FrozenTransactionId succeeds');
is($ret, 0, 'getting ts of FrozenTransactionId succeeds');
is($stdout, '', 'timestamp of FrozenTransactionId is null');
# Since FirstNormalTransactionId will've occurred during initdb, long before we

View File

@ -41,12 +41,9 @@ my $tempdir_short = TestLib::tempdir_short;
my %pgdump_runs = (
binary_upgrade => {
dump_cmd => [
'pg_dump',
'--no-sync',
"--file=$tempdir/binary_upgrade.sql",
'--schema-only',
'--binary-upgrade',
'--dbname=postgres', ], },
'pg_dump', '--no-sync',
"--file=$tempdir/binary_upgrade.sql", '--schema-only',
'--binary-upgrade', '--dbname=postgres', ], },
clean => {
dump_cmd => [
'pg_dump', "--file=$tempdir/clean.sql",
@ -63,19 +60,16 @@ my %pgdump_runs = (
'postgres', ], },
column_inserts => {
dump_cmd => [
'pg_dump',
'--no-sync',
"--file=$tempdir/column_inserts.sql",
'-a',
'--column-inserts',
'postgres', ], },
'pg_dump', '--no-sync',
"--file=$tempdir/column_inserts.sql", '-a',
'--column-inserts', 'postgres', ], },
createdb => {
dump_cmd => [
'pg_dump',
'--no-sync',
"--file=$tempdir/createdb.sql",
'-C',
'-R', # no-op, just for testing
'-R', # no-op, just for testing
'postgres', ], },
data_only => {
dump_cmd => [
@ -83,7 +77,7 @@ my %pgdump_runs = (
'--no-sync',
"--file=$tempdir/data_only.sql",
'-a',
'-v', # no-op, just make sure it works
'-v', # no-op, just make sure it works
'postgres', ], },
defaults => {
dump_cmd => [ 'pg_dump', '-f', "$tempdir/defaults.sql", 'postgres', ],
@ -126,52 +120,35 @@ my %pgdump_runs = (
"$tempdir/defaults_tar_format.tar", ], },
pg_dumpall_globals => {
dump_cmd => [
'pg_dumpall',
'--no-sync',
"--file=$tempdir/pg_dumpall_globals.sql",
'-g', ],
},
'pg_dumpall', '--no-sync',
"--file=$tempdir/pg_dumpall_globals.sql", '-g', ], },
no_privs => {
dump_cmd => [
'pg_dump',
'--no-sync',
"--file=$tempdir/no_privs.sql",
'-x',
'pg_dump', '--no-sync',
"--file=$tempdir/no_privs.sql", '-x',
'postgres', ], },
no_owner => {
dump_cmd => [
'pg_dump',
'--no-sync',
"--file=$tempdir/no_owner.sql",
'-O',
'pg_dump', '--no-sync',
"--file=$tempdir/no_owner.sql", '-O',
'postgres', ], },
schema_only => {
dump_cmd => [
'pg_dump',
'--no-sync',
"--file=$tempdir/schema_only.sql",
'-s',
'postgres', ],
},
'pg_dump', '--no-sync', "--file=$tempdir/schema_only.sql",
'-s', 'postgres', ], },
section_pre_data => {
dump_cmd => [
'pg_dump',
'--no-sync',
"--file=$tempdir/section_pre_data.sql",
'--section=pre-data',
'pg_dump', '--no-sync',
"--file=$tempdir/section_pre_data.sql", '--section=pre-data',
'postgres', ], },
section_data => {
dump_cmd => [
'pg_dump',
'--no-sync',
"--file=$tempdir/section_data.sql",
'--section=data',
'pg_dump', '--no-sync',
"--file=$tempdir/section_data.sql", '--section=data',
'postgres', ], },
section_post_data => {
dump_cmd => [
'pg_dump',
'--no-sync',
"--file=$tempdir/section_post_data.sql",
'pg_dump', '--no-sync', "--file=$tempdir/section_post_data.sql",
'--section=post-data', 'postgres', ], },);
###############################################################
@ -492,9 +469,8 @@ my %tests = (
pg_dumpall_globals => 1,
section_post_data => 1, }, },
'GRANT SELECT(col2) ON regress_pg_dump_table TO regress_dump_test_role'
=> {
create_order => 4,
'GRANT SELECT(col2) ON regress_pg_dump_table TO regress_dump_test_role' =>
{ create_order => 4,
create_sql => 'GRANT SELECT(col2) ON regress_pg_dump_table
TO regress_dump_test_role;',
regexp => qr/^

View File

@ -729,7 +729,7 @@ sub restart
my $name = $self->name;
print "### Restarting node \"$name\"\n";
TestLib::system_or_bail('pg_ctl', '-D', $pgdata, '-l', $logfile,
'restart');
'restart');
$self->_update_pid(1);
}
@ -750,7 +750,7 @@ sub promote
my $name = $self->name;
print "### Promoting node \"$name\"\n";
TestLib::system_or_bail('pg_ctl', '-D', $pgdata, '-l', $logfile,
'promote');
'promote');
}
# Internal routine to enable streaming replication on a standby node.
@ -846,6 +846,7 @@ sub _update_pid
$self->{_pid} = undef;
print "# No postmaster PID for node \"$name\"\n";
# Complain if we expected to find a pidfile.
BAIL_OUT("postmaster.pid unexpectedly not present") if $is_running;
}
@ -1140,10 +1141,12 @@ sub psql
my $exc_save = $@;
if ($exc_save)
{
# IPC::Run::run threw an exception. re-throw unless it's a
# timeout, which we'll handle by testing is_expired
die $exc_save
if (blessed($exc_save) || $exc_save !~ /^\Q$timeout_exception\E/);
if (blessed($exc_save)
|| $exc_save !~ /^\Q$timeout_exception\E/);
$ret = undef;
@ -1191,7 +1194,8 @@ sub psql
if $ret == 1;
die "connection error: '$$stderr'\nwhile running '@psql_params'"
if $ret == 2;
die "error running SQL: '$$stderr'\nwhile running '@psql_params' with sql '$sql'"
die
"error running SQL: '$$stderr'\nwhile running '@psql_params' with sql '$sql'"
if $ret == 3;
die "psql returns $ret: '$$stderr'\nwhile running '@psql_params'";
}
@ -1362,15 +1366,17 @@ mode must be specified.
sub lsn
{
my ($self, $mode) = @_;
my %modes = ('insert' => 'pg_current_wal_insert_lsn()',
'flush' => 'pg_current_wal_flush_lsn()',
'write' => 'pg_current_wal_lsn()',
'receive' => 'pg_last_wal_receive_lsn()',
'replay' => 'pg_last_wal_replay_lsn()');
my %modes = (
'insert' => 'pg_current_wal_insert_lsn()',
'flush' => 'pg_current_wal_flush_lsn()',
'write' => 'pg_current_wal_lsn()',
'receive' => 'pg_last_wal_receive_lsn()',
'replay' => 'pg_last_wal_replay_lsn()');
$mode = '<undef>' if !defined($mode);
die "unknown mode for 'lsn': '$mode', valid modes are " . join(', ', keys %modes)
if !defined($modes{$mode});
die "unknown mode for 'lsn': '$mode', valid modes are "
. join(', ', keys %modes)
if !defined($modes{$mode});
my $result = $self->safe_psql('postgres', "SELECT $modes{$mode}");
chomp($result);
@ -1409,18 +1415,29 @@ sub wait_for_catchup
{
my ($self, $standby_name, $mode, $target_lsn) = @_;
$mode = defined($mode) ? $mode : 'replay';
my %valid_modes = ( 'sent' => 1, 'write' => 1, 'flush' => 1, 'replay' => 1 );
die "unknown mode $mode for 'wait_for_catchup', valid modes are " . join(', ', keys(%valid_modes)) unless exists($valid_modes{$mode});
my %valid_modes =
('sent' => 1, 'write' => 1, 'flush' => 1, 'replay' => 1);
die "unknown mode $mode for 'wait_for_catchup', valid modes are "
. join(', ', keys(%valid_modes))
unless exists($valid_modes{$mode});
# Allow passing of a PostgresNode instance as shorthand
if ( blessed( $standby_name ) && $standby_name->isa("PostgresNode") )
if (blessed($standby_name) && $standby_name->isa("PostgresNode"))
{
$standby_name = $standby_name->name;
}
die 'target_lsn must be specified' unless defined($target_lsn);
print "Waiting for replication conn " . $standby_name . "'s " . $mode . "_lsn to pass " . $target_lsn . " on " . $self->name . "\n";
my $query = qq[SELECT '$target_lsn' <= ${mode}_lsn FROM pg_catalog.pg_stat_replication WHERE application_name = '$standby_name';];
print "Waiting for replication conn "
. $standby_name . "'s "
. $mode
. "_lsn to pass "
. $target_lsn . " on "
. $self->name . "\n";
my $query =
qq[SELECT '$target_lsn' <= ${mode}_lsn FROM pg_catalog.pg_stat_replication WHERE application_name = '$standby_name';];
$self->poll_query_until('postgres', $query)
or die "timed out waiting for catchup, current location is " . ($self->safe_psql('postgres', $query) || '(unknown)');
or die "timed out waiting for catchup, current location is "
. ($self->safe_psql('postgres', $query) || '(unknown)');
print "done\n";
}
@ -1453,10 +1470,17 @@ sub wait_for_slot_catchup
die "valid modes are restart, confirmed_flush";
}
die 'target lsn must be specified' unless defined($target_lsn);
print "Waiting for replication slot " . $slot_name . "'s " . $mode . "_lsn to pass " . $target_lsn . " on " . $self->name . "\n";
my $query = qq[SELECT '$target_lsn' <= ${mode}_lsn FROM pg_catalog.pg_replication_slots WHERE slot_name = '$slot_name';];
print "Waiting for replication slot "
. $slot_name . "'s "
. $mode
. "_lsn to pass "
. $target_lsn . " on "
. $self->name . "\n";
my $query =
qq[SELECT '$target_lsn' <= ${mode}_lsn FROM pg_catalog.pg_replication_slots WHERE slot_name = '$slot_name';];
$self->poll_query_until('postgres', $query)
or die "timed out waiting for catchup, current location is " . ($self->safe_psql('postgres', $query) || '(unknown)');
or die "timed out waiting for catchup, current location is "
. ($self->safe_psql('postgres', $query) || '(unknown)');
print "done\n";
}
@ -1485,18 +1509,23 @@ null columns.
sub query_hash
{
my ($self, $dbname, $query, @columns) = @_;
die 'calls in array context for multi-row results not supported yet' if (wantarray);
die 'calls in array context for multi-row results not supported yet'
if (wantarray);
# Replace __COLUMNS__ if found
substr($query, index($query, '__COLUMNS__'), length('__COLUMNS__')) = join(', ', @columns)
if index($query, '__COLUMNS__') >= 0;
substr($query, index($query, '__COLUMNS__'), length('__COLUMNS__')) =
join(', ', @columns)
if index($query, '__COLUMNS__') >= 0;
my $result = $self->safe_psql($dbname, $query);
# hash slice, see http://stackoverflow.com/a/16755894/398670 .
#
# Fills the hash with empty strings produced by x-operator element
# duplication if result is an empty row
#
my %val;
@val{@columns} = $result ne '' ? split(qr/\|/, $result) : ('',) x scalar(@columns);
@val{@columns} =
$result ne '' ? split(qr/\|/, $result) : ('',) x scalar(@columns);
return \%val;
}
@ -1518,8 +1547,14 @@ either.
sub slot
{
my ($self, $slot_name) = @_;
my @columns = ('plugin', 'slot_type', 'datoid', 'database', 'active', 'active_pid', 'xmin', 'catalog_xmin', 'restart_lsn');
return $self->query_hash('postgres', "SELECT __COLUMNS__ FROM pg_catalog.pg_replication_slots WHERE slot_name = '$slot_name'", @columns);
my @columns = (
'plugin', 'slot_type', 'datoid', 'database',
'active', 'active_pid', 'xmin', 'catalog_xmin',
'restart_lsn');
return $self->query_hash(
'postgres',
"SELECT __COLUMNS__ FROM pg_catalog.pg_replication_slots WHERE slot_name = '$slot_name'",
@columns);
}
=pod
@ -1543,29 +1578,36 @@ to check for timeout. retval is undef on timeout.
sub pg_recvlogical_upto
{
my ($self, $dbname, $slot_name, $endpos, $timeout_secs, %plugin_options) = @_;
my ($self, $dbname, $slot_name, $endpos, $timeout_secs, %plugin_options) =
@_;
my ($stdout, $stderr);
my $timeout_exception = 'pg_recvlogical timed out';
die 'slot name must be specified' unless defined($slot_name);
die 'endpos must be specified' unless defined($endpos);
die 'endpos must be specified' unless defined($endpos);
my @cmd = ('pg_recvlogical', '-S', $slot_name, '--dbname', $self->connstr($dbname));
my @cmd = (
'pg_recvlogical', '-S', $slot_name, '--dbname',
$self->connstr($dbname));
push @cmd, '--endpos', $endpos;
push @cmd, '-f', '-', '--no-loop', '--start';
while (my ($k, $v) = each %plugin_options)
{
die "= is not permitted to appear in replication option name" if ($k =~ qr/=/);
die "= is not permitted to appear in replication option name"
if ($k =~ qr/=/);
push @cmd, "-o", "$k=$v";
}
my $timeout;
$timeout = IPC::Run::timeout($timeout_secs, exception => $timeout_exception ) if $timeout_secs;
$timeout =
IPC::Run::timeout($timeout_secs, exception => $timeout_exception)
if $timeout_secs;
my $ret = 0;
do {
do
{
local $@;
eval {
IPC::Run::run(\@cmd, ">", \$stdout, "2>", \$stderr, $timeout);
@ -1574,6 +1616,7 @@ sub pg_recvlogical_upto
my $exc_save = $@;
if ($exc_save)
{
# IPC::Run::run threw an exception. re-throw unless it's a
# timeout, which we'll handle by testing is_expired
die $exc_save
@ -1584,8 +1627,9 @@ sub pg_recvlogical_upto
die "Got timeout exception '$exc_save' but timer not expired?!"
unless $timeout->is_expired;
die "$exc_save waiting for endpos $endpos with stdout '$stdout', stderr '$stderr'"
unless wantarray;
die
"$exc_save waiting for endpos $endpos with stdout '$stdout', stderr '$stderr'"
unless wantarray;
}
};
@ -1598,7 +1642,9 @@ sub pg_recvlogical_upto
}
else
{
die "pg_recvlogical exited with code '$ret', stdout '$stdout' and stderr '$stderr'" if $ret;
die
"pg_recvlogical exited with code '$ret', stdout '$stdout' and stderr '$stderr'"
if $ret;
return $stdout;
}
}

View File

@ -17,6 +17,7 @@ use File::Spec;
use File::Temp ();
use IPC::Run;
use SimpleTee;
# specify a recent enough version of Test::More to support the note() function
use Test::More 0.82;
@ -91,8 +92,8 @@ INIT
# Hijack STDOUT and STDERR to the log file
open(my $orig_stdout, '>&', \*STDOUT);
open(my $orig_stderr, '>&', \*STDERR);
open(STDOUT, '>&', $testlog);
open(STDERR, '>&', $testlog);
open(STDOUT, '>&', $testlog);
open(STDERR, '>&', $testlog);
# The test output (ok ...) needs to be printed to the original STDOUT so
# that the 'prove' program can parse it, and display it to the user in

View File

@ -40,8 +40,10 @@ $node_master->safe_psql('postgres',
"CREATE TABLE tab_int AS SELECT generate_series(1,1002) AS a");
# Wait for standbys to catch up
$node_master->wait_for_catchup($node_standby_1, 'replay', $node_master->lsn('insert'));
$node_standby_1->wait_for_catchup($node_standby_2, 'replay', $node_standby_1->lsn('replay'));
$node_master->wait_for_catchup($node_standby_1, 'replay',
$node_master->lsn('insert'));
$node_standby_1->wait_for_catchup($node_standby_2, 'replay',
$node_standby_1->lsn('replay'));
my $result =
$node_standby_1->safe_psql('postgres', "SELECT count(*) FROM tab_int");
@ -66,11 +68,11 @@ note "testing connection parameter \"target_session_attrs\"";
# target_session_attrs with multiple nodes.
sub test_target_session_attrs
{
my $node1 = shift;
my $node2 = shift;
my $node1 = shift;
my $node2 = shift;
my $target_node = shift;
my $mode = shift;
my $status = shift;
my $mode = shift;
my $status = shift;
my $node1_host = $node1->host;
my $node1_port = $node1->port;
@ -89,25 +91,32 @@ sub test_target_session_attrs
# The client used for the connection does not matter, only the backend
# point does.
my ($ret, $stdout, $stderr) =
$node1->psql('postgres', 'SHOW port;', extra_params => ['-d', $connstr]);
is($status == $ret && $stdout eq $target_node->port, 1,
"connect to node $target_name if mode \"$mode\" and $node1_name,$node2_name listed");
$node1->psql('postgres', 'SHOW port;',
extra_params => [ '-d', $connstr ]);
is( $status == $ret && $stdout eq $target_node->port,
1,
"connect to node $target_name if mode \"$mode\" and $node1_name,$node2_name listed"
);
}
# Connect to master in "read-write" mode with master,standby1 list.
test_target_session_attrs($node_master, $node_standby_1, $node_master,
"read-write", 0);
"read-write", 0);
# Connect to master in "read-write" mode with standby1,master list.
test_target_session_attrs($node_standby_1, $node_master, $node_master,
"read-write", 0);
"read-write", 0);
# Connect to master in "any" mode with master,standby1 list.
test_target_session_attrs($node_master, $node_standby_1, $node_master,
"any", 0);
test_target_session_attrs($node_master, $node_standby_1, $node_master, "any",
0);
# Connect to standby1 in "any" mode with standby1,master list.
test_target_session_attrs($node_standby_1, $node_master, $node_standby_1,
"any", 0);
"any", 0);
note "switching to physical replication slot";
# Switch to using a physical replication slot. We can do this without a new
# backup since physical slots can go backwards if needed. Do so on both
# standbys. Since we're going to be testing things that affect the slot state,
@ -115,14 +124,26 @@ note "switching to physical replication slot";
my ($slotname_1, $slotname_2) = ('standby_1', 'standby_2');
$node_master->append_conf('postgresql.conf', "max_replication_slots = 4");
$node_master->restart;
is($node_master->psql('postgres', qq[SELECT pg_create_physical_replication_slot('$slotname_1');]), 0, 'physical slot created on master');
$node_standby_1->append_conf('recovery.conf', "primary_slot_name = $slotname_1");
$node_standby_1->append_conf('postgresql.conf', "wal_receiver_status_interval = 1");
is( $node_master->psql(
'postgres',
qq[SELECT pg_create_physical_replication_slot('$slotname_1');]),
0,
'physical slot created on master');
$node_standby_1->append_conf('recovery.conf',
"primary_slot_name = $slotname_1");
$node_standby_1->append_conf('postgresql.conf',
"wal_receiver_status_interval = 1");
$node_standby_1->append_conf('postgresql.conf', "max_replication_slots = 4");
$node_standby_1->restart;
is($node_standby_1->psql('postgres', qq[SELECT pg_create_physical_replication_slot('$slotname_2');]), 0, 'physical slot created on intermediate replica');
$node_standby_2->append_conf('recovery.conf', "primary_slot_name = $slotname_2");
$node_standby_2->append_conf('postgresql.conf', "wal_receiver_status_interval = 1");
is( $node_standby_1->psql(
'postgres',
qq[SELECT pg_create_physical_replication_slot('$slotname_2');]),
0,
'physical slot created on intermediate replica');
$node_standby_2->append_conf('recovery.conf',
"primary_slot_name = $slotname_2");
$node_standby_2->append_conf('postgresql.conf',
"wal_receiver_status_interval = 1");
$node_standby_2->restart;
sub get_slot_xmins
@ -135,11 +156,11 @@ sub get_slot_xmins
# There's no hot standby feedback and there are no logical slots on either peer
# so xmin and catalog_xmin should be null on both slots.
my ($xmin, $catalog_xmin) = get_slot_xmins($node_master, $slotname_1);
is($xmin, '', 'non-cascaded slot xmin null with no hs_feedback');
is($xmin, '', 'non-cascaded slot xmin null with no hs_feedback');
is($catalog_xmin, '', 'non-cascaded slot xmin null with no hs_feedback');
($xmin, $catalog_xmin) = get_slot_xmins($node_standby_1, $slotname_2);
is($xmin, '', 'cascaded slot xmin null with no hs_feedback');
is($xmin, '', 'cascaded slot xmin null with no hs_feedback');
is($catalog_xmin, '', 'cascaded slot xmin null with no hs_feedback');
# Replication still works?
@ -147,23 +168,32 @@ $node_master->safe_psql('postgres', 'CREATE TABLE replayed(val integer);');
sub replay_check
{
my $newval = $node_master->safe_psql('postgres', 'INSERT INTO replayed(val) SELECT coalesce(max(val),0) + 1 AS newval FROM replayed RETURNING val');
$node_master->wait_for_catchup($node_standby_1, 'replay', $node_master->lsn('insert'));
$node_standby_1->wait_for_catchup($node_standby_2, 'replay', $node_standby_1->lsn('replay'));
$node_standby_1->safe_psql('postgres', qq[SELECT 1 FROM replayed WHERE val = $newval])
or die "standby_1 didn't replay master value $newval";
$node_standby_2->safe_psql('postgres', qq[SELECT 1 FROM replayed WHERE val = $newval])
or die "standby_2 didn't replay standby_1 value $newval";
my $newval = $node_master->safe_psql('postgres',
'INSERT INTO replayed(val) SELECT coalesce(max(val),0) + 1 AS newval FROM replayed RETURNING val'
);
$node_master->wait_for_catchup($node_standby_1, 'replay',
$node_master->lsn('insert'));
$node_standby_1->wait_for_catchup($node_standby_2, 'replay',
$node_standby_1->lsn('replay'));
$node_standby_1->safe_psql('postgres',
qq[SELECT 1 FROM replayed WHERE val = $newval])
or die "standby_1 didn't replay master value $newval";
$node_standby_2->safe_psql('postgres',
qq[SELECT 1 FROM replayed WHERE val = $newval])
or die "standby_2 didn't replay standby_1 value $newval";
}
replay_check();
note "enabling hot_standby_feedback";
# Enable hs_feedback. The slot should gain an xmin. We set the status interval
# so we'll see the results promptly.
$node_standby_1->safe_psql('postgres', 'ALTER SYSTEM SET hot_standby_feedback = on;');
$node_standby_1->safe_psql('postgres',
'ALTER SYSTEM SET hot_standby_feedback = on;');
$node_standby_1->reload;
$node_standby_2->safe_psql('postgres', 'ALTER SYSTEM SET hot_standby_feedback = on;');
$node_standby_2->safe_psql('postgres',
'ALTER SYSTEM SET hot_standby_feedback = on;');
$node_standby_2->reload;
replay_check();
sleep(2);
@ -177,7 +207,8 @@ isnt($xmin, '', 'cascaded slot xmin non-null with hs feedback');
is($catalog_xmin, '', 'cascaded slot xmin still null with hs_feedback');
note "doing some work to advance xmin";
for my $i (10000..11000) {
for my $i (10000 .. 11000)
{
$node_master->safe_psql('postgres', qq[INSERT INTO tab_int VALUES ($i);]);
}
$node_master->safe_psql('postgres', 'VACUUM;');
@ -186,38 +217,46 @@ $node_master->safe_psql('postgres', 'CHECKPOINT;');
my ($xmin2, $catalog_xmin2) = get_slot_xmins($node_master, $slotname_1);
note "new xmin $xmin2, old xmin $xmin";
isnt($xmin2, $xmin, 'non-cascaded slot xmin with hs feedback has changed');
is($catalog_xmin2, '', 'non-cascaded slot xmin still null with hs_feedback unchanged');
is($catalog_xmin2, '',
'non-cascaded slot xmin still null with hs_feedback unchanged');
($xmin2, $catalog_xmin2) = get_slot_xmins($node_standby_1, $slotname_2);
note "new xmin $xmin2, old xmin $xmin";
isnt($xmin2, $xmin, 'cascaded slot xmin with hs feedback has changed');
is($catalog_xmin2, '', 'cascaded slot xmin still null with hs_feedback unchanged');
is($catalog_xmin2, '',
'cascaded slot xmin still null with hs_feedback unchanged');
note "disabling hot_standby_feedback";
# Disable hs_feedback. Xmin should be cleared.
$node_standby_1->safe_psql('postgres', 'ALTER SYSTEM SET hot_standby_feedback = off;');
$node_standby_1->safe_psql('postgres',
'ALTER SYSTEM SET hot_standby_feedback = off;');
$node_standby_1->reload;
$node_standby_2->safe_psql('postgres', 'ALTER SYSTEM SET hot_standby_feedback = off;');
$node_standby_2->safe_psql('postgres',
'ALTER SYSTEM SET hot_standby_feedback = off;');
$node_standby_2->reload;
replay_check();
sleep(2);
($xmin, $catalog_xmin) = get_slot_xmins($node_master, $slotname_1);
is($xmin, '', 'non-cascaded slot xmin null with hs feedback reset');
is($catalog_xmin, '', 'non-cascaded slot xmin still null with hs_feedback reset');
is($catalog_xmin, '',
'non-cascaded slot xmin still null with hs_feedback reset');
($xmin, $catalog_xmin) = get_slot_xmins($node_standby_1, $slotname_2);
is($xmin, '', 'cascaded slot xmin null with hs feedback reset');
is($xmin, '', 'cascaded slot xmin null with hs feedback reset');
is($catalog_xmin, '', 'cascaded slot xmin still null with hs_feedback reset');
note "re-enabling hot_standby_feedback and disabling while stopped";
$node_standby_2->safe_psql('postgres', 'ALTER SYSTEM SET hot_standby_feedback = on;');
$node_standby_2->safe_psql('postgres',
'ALTER SYSTEM SET hot_standby_feedback = on;');
$node_standby_2->reload;
$node_master->safe_psql('postgres', qq[INSERT INTO tab_int VALUES (11000);]);
replay_check();
$node_standby_2->safe_psql('postgres', 'ALTER SYSTEM SET hot_standby_feedback = off;');
$node_standby_2->safe_psql('postgres',
'ALTER SYSTEM SET hot_standby_feedback = off;');
$node_standby_2->stop;
($xmin, $catalog_xmin) = get_slot_xmins($node_standby_1, $slotname_2);
@ -227,4 +266,5 @@ isnt($xmin, '', 'cascaded slot xmin non-null with postgres shut down');
$node_standby_2->start;
($xmin, $catalog_xmin) = get_slot_xmins($node_standby_1, $slotname_2);
is($xmin, '', 'cascaded slot xmin reset after startup with hs feedback reset');
is($xmin, '',
'cascaded slot xmin reset after startup with hs feedback reset');

View File

@ -22,8 +22,7 @@ sub test_recovery_standby
foreach my $param_item (@$recovery_params)
{
$node_standby->append_conf(
'recovery.conf', qq($param_item));
$node_standby->append_conf('recovery.conf', qq($param_item));
}
$node_standby->start;
@ -71,8 +70,8 @@ my ($lsn2, $recovery_txid) = split /\|/, $ret;
# More data, with recovery target timestamp
$node_master->safe_psql('postgres',
"INSERT INTO tab_int VALUES (generate_series(2001,3000))");
$ret = $node_master->safe_psql('postgres',
"SELECT pg_current_wal_lsn(), now();");
$ret =
$node_master->safe_psql('postgres', "SELECT pg_current_wal_lsn(), now();");
my ($lsn3, $recovery_time) = split /\|/, $ret;
# Even more data, this time with a recovery target name
@ -87,7 +86,8 @@ $node_master->safe_psql('postgres',
# And now for a recovery target LSN
$node_master->safe_psql('postgres',
"INSERT INTO tab_int VALUES (generate_series(4001,5000))");
my $recovery_lsn = $node_master->safe_psql('postgres', "SELECT pg_current_wal_lsn()");
my $recovery_lsn =
$node_master->safe_psql('postgres', "SELECT pg_current_wal_lsn()");
my $lsn5 =
$node_master->safe_psql('postgres', "SELECT pg_current_wal_lsn();");

View File

@ -34,7 +34,8 @@ $node_master->safe_psql('postgres',
"CREATE TABLE tab_int AS SELECT generate_series(1,1000) AS a");
# Wait until standby has replayed enough data on standby 1
$node_master->wait_for_catchup($node_standby_1, 'replay', $node_master->lsn('write'));
$node_master->wait_for_catchup($node_standby_1, 'replay',
$node_master->lsn('write'));
# Stop and remove master, and promote standby 1, switching it to a new timeline
$node_master->teardown_node;
@ -55,7 +56,8 @@ $node_standby_2->restart;
# to ensure that the timeline switch has been done.
$node_standby_1->safe_psql('postgres',
"INSERT INTO tab_int VALUES (generate_series(1001,2000))");
$node_standby_1->wait_for_catchup($node_standby_2, 'replay', $node_standby_1->lsn('write'));
$node_standby_1->wait_for_catchup($node_standby_2, 'replay',
$node_standby_1->lsn('write'));
my $result =
$node_standby_2->safe_psql('postgres', "SELECT count(*) FROM tab_int");

View File

@ -50,8 +50,7 @@ while ($remaining-- > 0)
# Done waiting?
my $replay_status = $node_standby->safe_psql('postgres',
"SELECT (pg_last_wal_replay_lsn() - '$until_lsn'::pg_lsn) >= 0"
);
"SELECT (pg_last_wal_replay_lsn() - '$until_lsn'::pg_lsn) >= 0");
last if $replay_status eq 't';
# No, sleep some more.

View File

@ -14,21 +14,27 @@ use Config;
my $node_master = get_new_node('master');
$node_master->init(allows_streaming => 1);
$node_master->append_conf(
'postgresql.conf', qq(
'postgresql.conf', qq(
wal_level = logical
));
$node_master->start;
my $backup_name = 'master_backup';
$node_master->safe_psql('postgres', qq[CREATE TABLE decoding_test(x integer, y text);]);
$node_master->safe_psql('postgres',
qq[CREATE TABLE decoding_test(x integer, y text);]);
$node_master->safe_psql('postgres', qq[SELECT pg_create_logical_replication_slot('test_slot', 'test_decoding');]);
$node_master->safe_psql('postgres',
qq[SELECT pg_create_logical_replication_slot('test_slot', 'test_decoding');]);
$node_master->safe_psql('postgres', qq[INSERT INTO decoding_test(x,y) SELECT s, s::text FROM generate_series(1,10) s;]);
$node_master->safe_psql('postgres',
qq[INSERT INTO decoding_test(x,y) SELECT s, s::text FROM generate_series(1,10) s;]
);
# Basic decoding works
my($result) = $node_master->safe_psql('postgres', qq[SELECT pg_logical_slot_get_changes('test_slot', NULL, NULL);]);
is(scalar(my @foobar = split /^/m, $result), 12, 'Decoding produced 12 rows inc BEGIN/COMMIT');
my ($result) = $node_master->safe_psql('postgres',
qq[SELECT pg_logical_slot_get_changes('test_slot', NULL, NULL);]);
is(scalar(my @foobar = split /^/m, $result),
12, 'Decoding produced 12 rows inc BEGIN/COMMIT');
# If we immediately crash the server we might lose the progress we just made
# and replay the same changes again. But a clean shutdown should never repeat
@ -36,13 +42,16 @@ is(scalar(my @foobar = split /^/m, $result), 12, 'Decoding produced 12 rows inc
$node_master->restart('fast');
# There are no new writes, so the result should be empty.
$result = $node_master->safe_psql('postgres', qq[SELECT pg_logical_slot_get_changes('test_slot', NULL, NULL);]);
$result = $node_master->safe_psql('postgres',
qq[SELECT pg_logical_slot_get_changes('test_slot', NULL, NULL);]);
chomp($result);
is($result, '', 'Decoding after fast restart repeats no rows');
# Insert some rows and verify that we get the same results from pg_recvlogical
# and the SQL interface.
$node_master->safe_psql('postgres', qq[INSERT INTO decoding_test(x,y) SELECT s, s::text FROM generate_series(1,4) s;]);
$node_master->safe_psql('postgres',
qq[INSERT INTO decoding_test(x,y) SELECT s, s::text FROM generate_series(1,4) s;]
);
my $expected = q{BEGIN
table public.decoding_test: INSERT: x[integer]:1 y[text]:'1'
@ -51,59 +60,91 @@ table public.decoding_test: INSERT: x[integer]:3 y[text]:'3'
table public.decoding_test: INSERT: x[integer]:4 y[text]:'4'
COMMIT};
my $stdout_sql = $node_master->safe_psql('postgres', qq[SELECT data FROM pg_logical_slot_peek_changes('test_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1');]);
my $stdout_sql = $node_master->safe_psql('postgres',
qq[SELECT data FROM pg_logical_slot_peek_changes('test_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1');]
);
is($stdout_sql, $expected, 'got expected output from SQL decoding session');
my $endpos = $node_master->safe_psql('postgres', "SELECT lsn FROM pg_logical_slot_peek_changes('test_slot', NULL, NULL) ORDER BY lsn DESC LIMIT 1;");
my $endpos = $node_master->safe_psql('postgres',
"SELECT lsn FROM pg_logical_slot_peek_changes('test_slot', NULL, NULL) ORDER BY lsn DESC LIMIT 1;"
);
print "waiting to replay $endpos\n";
my $stdout_recv = $node_master->pg_recvlogical_upto('postgres', 'test_slot', $endpos, 10, 'include-xids' => '0', 'skip-empty-xacts' => '1');
my $stdout_recv = $node_master->pg_recvlogical_upto(
'postgres', 'test_slot', $endpos, 10,
'include-xids' => '0',
'skip-empty-xacts' => '1');
chomp($stdout_recv);
is($stdout_recv, $expected, 'got same expected output from pg_recvlogical decoding session');
is($stdout_recv, $expected,
'got same expected output from pg_recvlogical decoding session');
$stdout_recv = $node_master->pg_recvlogical_upto('postgres', 'test_slot', $endpos, 10, 'include-xids' => '0', 'skip-empty-xacts' => '1');
$stdout_recv = $node_master->pg_recvlogical_upto(
'postgres', 'test_slot', $endpos, 10,
'include-xids' => '0',
'skip-empty-xacts' => '1');
chomp($stdout_recv);
is($stdout_recv, '', 'pg_recvlogical acknowledged changes, nothing pending on slot');
is($stdout_recv, '',
'pg_recvlogical acknowledged changes, nothing pending on slot');
$node_master->safe_psql('postgres', 'CREATE DATABASE otherdb');
is($node_master->psql('otherdb', "SELECT lsn FROM pg_logical_slot_peek_changes('test_slot', NULL, NULL) ORDER BY lsn DESC LIMIT 1;"), 3,
is( $node_master->psql(
'otherdb',
"SELECT lsn FROM pg_logical_slot_peek_changes('test_slot', NULL, NULL) ORDER BY lsn DESC LIMIT 1;"
),
3,
'replaying logical slot from another database fails');
$node_master->safe_psql('otherdb', qq[SELECT pg_create_logical_replication_slot('otherdb_slot', 'test_decoding');]);
$node_master->safe_psql('otherdb',
qq[SELECT pg_create_logical_replication_slot('otherdb_slot', 'test_decoding');]
);
# make sure you can't drop a slot while active
SKIP:
{
# some Windows Perls at least don't like IPC::Run's start/kill_kill regime.
# some Windows Perls at least don't like IPC::Run's start/kill_kill regime.
skip "Test fails on Windows perl", 2 if $Config{osname} eq 'MSWin32';
my $pg_recvlogical = IPC::Run::start(['pg_recvlogical', '-d', $node_master->connstr('otherdb'), '-S', 'otherdb_slot', '-f', '-', '--start']);
$node_master->poll_query_until('otherdb', "SELECT EXISTS (SELECT 1 FROM pg_replication_slots WHERE slot_name = 'otherdb_slot' AND active_pid IS NOT NULL)");
is($node_master->psql('postgres', 'DROP DATABASE otherdb'), 3,
'dropping a DB with inactive logical slots fails');
my $pg_recvlogical = IPC::Run::start(
[ 'pg_recvlogical', '-d', $node_master->connstr('otherdb'),
'-S', 'otherdb_slot', '-f', '-', '--start' ]);
$node_master->poll_query_until('otherdb',
"SELECT EXISTS (SELECT 1 FROM pg_replication_slots WHERE slot_name = 'otherdb_slot' AND active_pid IS NOT NULL)"
);
is($node_master->psql('postgres', 'DROP DATABASE otherdb'),
3, 'dropping a DB with inactive logical slots fails');
$pg_recvlogical->kill_kill;
is($node_master->slot('otherdb_slot')->{'slot_name'}, undef,
'logical slot still exists');
is($node_master->slot('otherdb_slot')->{'slot_name'},
undef, 'logical slot still exists');
}
$node_master->poll_query_until('otherdb', "SELECT EXISTS (SELECT 1 FROM pg_replication_slots WHERE slot_name = 'otherdb_slot' AND active_pid IS NULL)");
is($node_master->psql('postgres', 'DROP DATABASE otherdb'), 0,
'dropping a DB with inactive logical slots succeeds');
is($node_master->slot('otherdb_slot')->{'slot_name'}, undef,
'logical slot was actually dropped with DB');
$node_master->poll_query_until('otherdb',
"SELECT EXISTS (SELECT 1 FROM pg_replication_slots WHERE slot_name = 'otherdb_slot' AND active_pid IS NULL)"
);
is($node_master->psql('postgres', 'DROP DATABASE otherdb'),
0, 'dropping a DB with inactive logical slots succeeds');
is($node_master->slot('otherdb_slot')->{'slot_name'},
undef, 'logical slot was actually dropped with DB');
# Restarting a node with wal_level = logical that has existing
# slots must succeed, but decoding from those slots must fail.
$node_master->safe_psql('postgres', 'ALTER SYSTEM SET wal_level = replica');
is($node_master->safe_psql('postgres', 'SHOW wal_level'), 'logical', 'wal_level is still logical before restart');
is($node_master->safe_psql('postgres', 'SHOW wal_level'),
'logical', 'wal_level is still logical before restart');
$node_master->restart;
is($node_master->safe_psql('postgres', 'SHOW wal_level'), 'replica', 'wal_level is replica');
isnt($node_master->slot('test_slot')->{'catalog_xmin'}, '0',
'restored slot catalog_xmin is nonzero');
is($node_master->psql('postgres', qq[SELECT pg_logical_slot_get_changes('test_slot', NULL, NULL);]), 3,
is($node_master->safe_psql('postgres', 'SHOW wal_level'),
'replica', 'wal_level is replica');
isnt($node_master->slot('test_slot')->{'catalog_xmin'},
'0', 'restored slot catalog_xmin is nonzero');
is( $node_master->psql(
'postgres',
qq[SELECT pg_logical_slot_get_changes('test_slot', NULL, NULL);]),
3,
'reading from slot with wal_level < logical fails');
is($node_master->psql('postgres', q[SELECT pg_drop_replication_slot('test_slot')]), 0,
is( $node_master->psql(
'postgres', q[SELECT pg_drop_replication_slot('test_slot')]),
0,
'can drop logical slot while wal_level = replica');
is($node_master->slot('test_slot')->{'catalog_xmin'}, '', 'slot was dropped');

View File

@ -176,20 +176,20 @@ standby4|1|potential),
# Check that standby1 and standby2 are chosen as sync standbys
# based on their priorities.
test_sync_state(
$node_master, qq(standby1|1|sync
$node_master, qq(standby1|1|sync
standby2|2|sync
standby4|0|async),
'priority-based sync replication specified by FIRST keyword',
'FIRST 2(standby1, standby2)');
'priority-based sync replication specified by FIRST keyword',
'FIRST 2(standby1, standby2)');
# Check that all the listed standbys are considered as candidates
# for sync standbys in a quorum-based sync replication.
test_sync_state(
$node_master, qq(standby1|1|quorum
$node_master, qq(standby1|1|quorum
standby2|1|quorum
standby4|0|async),
'2 quorum and 1 async',
'ANY 2(standby1, standby2)');
'2 quorum and 1 async',
'ANY 2(standby1, standby2)');
# Start Standby3 which will be considered in 'quorum' state.
$node_standby_3->start;
@ -197,9 +197,9 @@ $node_standby_3->start;
# Check that the setting of 'ANY 2(*)' chooses all standbys as
# candidates for quorum sync standbys.
test_sync_state(
$node_master, qq(standby1|1|quorum
$node_master, qq(standby1|1|quorum
standby2|1|quorum
standby3|1|quorum
standby4|1|quorum),
'all standbys are considered as candidates for quorum sync standbys',
'ANY 2(*)');
'all standbys are considered as candidates for quorum sync standbys',
'ANY 2(*)');

View File

@ -12,7 +12,8 @@ use Test::More tests => 1;
my $node_master = get_new_node('master');
$node_master->init(allows_streaming => 1);
$node_master->append_conf('postgresql.conf', qq{
$node_master->append_conf(
'postgresql.conf', qq{
fsync = on
wal_log_hints = on
max_prepared_transactions = 5
@ -29,7 +30,8 @@ $node_standby->init_from_backup($node_master, 'master_backup',
has_streaming => 1);
$node_standby->start;
$node_master->psql('postgres', qq{
$node_master->psql(
'postgres', qq{
create table testtab (a int, b char(100));
insert into testtab select generate_series(1,1000), 'foo';
insert into testtab select generate_series(1,1000), 'foo';
@ -37,7 +39,8 @@ delete from testtab where ctid > '(8,0)';
});
# Take a lock on the table to prevent following vacuum from truncating it
$node_master->psql('postgres', qq{
$node_master->psql(
'postgres', qq{
begin;
lock table testtab in row share mode;
prepare transaction 'p1';
@ -51,7 +54,8 @@ $node_master->psql('postgres', 'checkpoint');
# Now do some more insert/deletes, another vacuum to ensure full-page writes
# are done
$node_master->psql('postgres', qq{
$node_master->psql(
'postgres', qq{
insert into testtab select generate_series(1,1000), 'foo';
delete from testtab where ctid > '(8,0)';
vacuum verbose testtab;
@ -61,25 +65,25 @@ vacuum verbose testtab;
$node_standby->psql('postgres', 'checkpoint');
# Release the lock, vacuum again which should lead to truncation
$node_master->psql('postgres', qq{
$node_master->psql(
'postgres', qq{
rollback prepared 'p1';
vacuum verbose testtab;
});
$node_master->psql('postgres', 'checkpoint');
my $until_lsn =
$node_master->safe_psql('postgres', "SELECT pg_current_wal_lsn();");
$node_master->safe_psql('postgres', "SELECT pg_current_wal_lsn();");
# Wait long enough for standby to receive and apply all WAL
my $caughtup_query =
"SELECT '$until_lsn'::pg_lsn <= pg_last_wal_replay_lsn()";
"SELECT '$until_lsn'::pg_lsn <= pg_last_wal_replay_lsn()";
$node_standby->poll_query_until('postgres', $caughtup_query)
or die "Timed out while waiting for standby to catch up";
or die "Timed out while waiting for standby to catch up";
# Promote the standby
$node_standby->promote;
$node_standby->poll_query_until('postgres',
"SELECT NOT pg_is_in_recovery()")
$node_standby->poll_query_until('postgres', "SELECT NOT pg_is_in_recovery()")
or die "Timed out while waiting for promotion of standby";
$node_standby->psql('postgres', 'checkpoint');
@ -87,6 +91,8 @@ $node_standby->psql('postgres', 'checkpoint');
$node_standby->restart;
# Insert should work on standby
is($node_standby->psql('postgres',
qq{insert into testtab select generate_series(1,1000), 'foo';}),
0, 'INSERT succeeds with truncated relation FSM');
is( $node_standby->psql(
'postgres',
qq{insert into testtab select generate_series(1,1000), 'foo';}),
0,
'INSERT succeeds with truncated relation FSM');

View File

@ -9,7 +9,8 @@ use Test::More tests => 12;
# Setup master node
my $node_master = get_new_node("master");
$node_master->init(allows_streaming => 1);
$node_master->append_conf('postgresql.conf', qq(
$node_master->append_conf(
'postgresql.conf', qq(
max_prepared_transactions = 10
log_checkpoints = true
));
@ -19,17 +20,19 @@ $node_master->psql('postgres', "CREATE TABLE t_009_tbl (id int)");
# Setup slave node
my $node_slave = get_new_node('slave');
$node_slave->init_from_backup($node_master, 'master_backup', has_streaming => 1);
$node_slave->init_from_backup($node_master, 'master_backup',
has_streaming => 1);
$node_slave->start;
# Switch to synchronous replication
$node_master->append_conf('postgresql.conf', qq(
$node_master->append_conf(
'postgresql.conf', qq(
synchronous_standby_names = '*'
));
$node_master->psql('postgres', "SELECT pg_reload_conf()");
my $psql_out = '';
my $psql_rc = '';
my $psql_rc = '';
###############################################################################
# Check that we can commit and abort transaction after soft restart.
@ -38,7 +41,8 @@ my $psql_rc = '';
# files.
###############################################################################
$node_master->psql('postgres', "
$node_master->psql(
'postgres', "
BEGIN;
INSERT INTO t_009_tbl VALUES (42);
SAVEPOINT s1;
@ -64,7 +68,8 @@ is($psql_rc, '0', 'Rollback prepared transaction after restart');
# transaction using dedicated WAL records.
###############################################################################
$node_master->psql('postgres', "
$node_master->psql(
'postgres', "
CHECKPOINT;
BEGIN;
INSERT INTO t_009_tbl VALUES (42);
@ -89,7 +94,8 @@ is($psql_rc, '0', 'Rollback prepared transaction after teardown');
# Check that WAL replay can handle several transactions with same GID name.
###############################################################################
$node_master->psql('postgres', "
$node_master->psql(
'postgres', "
CHECKPOINT;
BEGIN;
INSERT INTO t_009_tbl VALUES (42);
@ -113,7 +119,8 @@ is($psql_rc, '0', 'Replay several transactions with same GID');
# while replaying transaction commits.
###############################################################################
$node_master->psql('postgres', "
$node_master->psql(
'postgres', "
BEGIN;
INSERT INTO t_009_tbl VALUES (42);
SAVEPOINT s1;
@ -122,7 +129,8 @@ $node_master->psql('postgres', "
COMMIT PREPARED 'xact_009_1';");
$node_master->teardown_node;
$node_master->start;
$psql_rc = $node_master->psql('postgres', "
$psql_rc = $node_master->psql(
'postgres', "
BEGIN;
INSERT INTO t_009_tbl VALUES (42);
SAVEPOINT s1;
@ -138,24 +146,28 @@ $node_master->psql('postgres', "COMMIT PREPARED 'xact_009_1'");
# Check that WAL replay will cleanup its shared memory state on running slave.
###############################################################################
$node_master->psql('postgres', "
$node_master->psql(
'postgres', "
BEGIN;
INSERT INTO t_009_tbl VALUES (42);
SAVEPOINT s1;
INSERT INTO t_009_tbl VALUES (43);
PREPARE TRANSACTION 'xact_009_1';
COMMIT PREPARED 'xact_009_1';");
$node_slave->psql('postgres', "SELECT count(*) FROM pg_prepared_xacts",
stdout => \$psql_out);
$node_slave->psql(
'postgres',
"SELECT count(*) FROM pg_prepared_xacts",
stdout => \$psql_out);
is($psql_out, '0',
"Cleanup of shared memory state on running standby without checkpoint");
"Cleanup of shared memory state on running standby without checkpoint");
###############################################################################
# Same as in previous case, but let's force checkpoint on slave between
# prepare and commit to use on-disk twophase files.
###############################################################################
$node_master->psql('postgres', "
$node_master->psql(
'postgres', "
BEGIN;
INSERT INTO t_009_tbl VALUES (42);
SAVEPOINT s1;
@ -163,16 +175,19 @@ $node_master->psql('postgres', "
PREPARE TRANSACTION 'xact_009_1';");
$node_slave->psql('postgres', "CHECKPOINT");
$node_master->psql('postgres', "COMMIT PREPARED 'xact_009_1'");
$node_slave->psql('postgres', "SELECT count(*) FROM pg_prepared_xacts",
stdout => \$psql_out);
$node_slave->psql(
'postgres',
"SELECT count(*) FROM pg_prepared_xacts",
stdout => \$psql_out);
is($psql_out, '0',
"Cleanup of shared memory state on running standby after checkpoint");
"Cleanup of shared memory state on running standby after checkpoint");
###############################################################################
# Check that prepared transactions can be committed on promoted slave.
###############################################################################
$node_master->psql('postgres', "
$node_master->psql(
'postgres', "
BEGIN;
INSERT INTO t_009_tbl VALUES (42);
SAVEPOINT s1;
@ -180,8 +195,7 @@ $node_master->psql('postgres', "
PREPARE TRANSACTION 'xact_009_1';");
$node_master->teardown_node;
$node_slave->promote;
$node_slave->poll_query_until('postgres',
"SELECT NOT pg_is_in_recovery()")
$node_slave->poll_query_until('postgres', "SELECT NOT pg_is_in_recovery()")
or die "Timed out while waiting for promotion of standby";
$psql_rc = $node_slave->psql('postgres', "COMMIT PREPARED 'xact_009_1'");
@ -190,7 +204,8 @@ is($psql_rc, '0', "Restore of prepared transaction on promoted slave");
# change roles
($node_master, $node_slave) = ($node_slave, $node_master);
$node_slave->enable_streaming($node_master);
$node_slave->append_conf('recovery.conf', qq(
$node_slave->append_conf(
'recovery.conf', qq(
recovery_target_timeline='latest'
));
$node_slave->start;
@ -202,7 +217,8 @@ $node_slave->start;
# consistent.
###############################################################################
$node_master->psql('postgres', "
$node_master->psql(
'postgres', "
BEGIN;
INSERT INTO t_009_tbl VALUES (42);
SAVEPOINT s1;
@ -211,19 +227,21 @@ $node_master->psql('postgres', "
$node_master->stop;
$node_slave->restart;
$node_slave->promote;
$node_slave->poll_query_until('postgres',
"SELECT NOT pg_is_in_recovery()")
$node_slave->poll_query_until('postgres', "SELECT NOT pg_is_in_recovery()")
or die "Timed out while waiting for promotion of standby";
$node_slave->psql('postgres', "SELECT count(*) FROM pg_prepared_xacts",
stdout => \$psql_out);
$node_slave->psql(
'postgres',
"SELECT count(*) FROM pg_prepared_xacts",
stdout => \$psql_out);
is($psql_out, '1',
"Restore prepared transactions from files with master down");
"Restore prepared transactions from files with master down");
# restore state
($node_master, $node_slave) = ($node_slave, $node_master);
$node_slave->enable_streaming($node_master);
$node_slave->append_conf('recovery.conf', qq(
$node_slave->append_conf(
'recovery.conf', qq(
recovery_target_timeline='latest'
));
$node_slave->start;
@ -234,7 +252,8 @@ $node_master->psql('postgres', "COMMIT PREPARED 'xact_009_1'");
# restart while master is down.
###############################################################################
$node_master->psql('postgres', "
$node_master->psql(
'postgres', "
BEGIN;
INSERT INTO t_009_tbl VALUES (242);
SAVEPOINT s1;
@ -245,19 +264,21 @@ $node_master->stop;
$node_slave->teardown_node;
$node_slave->start;
$node_slave->promote;
$node_slave->poll_query_until('postgres',
"SELECT NOT pg_is_in_recovery()")
$node_slave->poll_query_until('postgres', "SELECT NOT pg_is_in_recovery()")
or die "Timed out while waiting for promotion of standby";
$node_slave->psql('postgres', "SELECT count(*) FROM pg_prepared_xacts",
stdout => \$psql_out);
$node_slave->psql(
'postgres',
"SELECT count(*) FROM pg_prepared_xacts",
stdout => \$psql_out);
is($psql_out, '1',
"Restore prepared transactions from records with master down");
"Restore prepared transactions from records with master down");
# restore state
($node_master, $node_slave) = ($node_slave, $node_master);
$node_slave->enable_streaming($node_master);
$node_slave->append_conf('recovery.conf', qq(
$node_slave->append_conf(
'recovery.conf', qq(
recovery_target_timeline='latest'
));
$node_slave->start;
@ -269,7 +290,8 @@ $node_master->psql('postgres', "COMMIT PREPARED 'xact_009_1'");
# XLOG_STANDBY_LOCK wal record.
###############################################################################
$node_master->psql('postgres', "
$node_master->psql(
'postgres', "
BEGIN;
CREATE TABLE t_009_tbl2 (id int);
SAVEPOINT s1;
@ -280,6 +302,8 @@ $node_master->psql('postgres', "
CHECKPOINT;
COMMIT PREPARED 'xact_009_1';");
$node_slave->psql('postgres', "SELECT count(*) FROM pg_prepared_xacts",
stdout => \$psql_out);
$node_slave->psql(
'postgres',
"SELECT count(*) FROM pg_prepared_xacts",
stdout => \$psql_out);
is($psql_out, '0', "Replay prepared transaction with DDL");

View File

@ -34,7 +34,8 @@ my ($stdout, $stderr, $ret);
# Initialize master node
my $node_master = get_new_node('master');
$node_master->init(allows_streaming => 1, has_archiving => 1);
$node_master->append_conf('postgresql.conf', q[
$node_master->append_conf(
'postgresql.conf', q[
wal_level = 'logical'
max_replication_slots = 3
max_wal_senders = 2
@ -60,8 +61,7 @@ $node_master->safe_psql('postgres',
# the same physical copy trick, so:
$node_master->safe_psql('postgres', 'CREATE DATABASE dropme;');
$node_master->safe_psql('dropme',
"SELECT pg_create_logical_replication_slot('dropme_slot', 'test_decoding');"
);
"SELECT pg_create_logical_replication_slot('dropme_slot', 'test_decoding');");
$node_master->safe_psql('postgres', 'CHECKPOINT;');
@ -76,20 +76,23 @@ $node_replica->init_from_backup(
$node_master, $backup_name,
has_streaming => 1,
has_restoring => 1);
$node_replica->append_conf(
'recovery.conf', q[primary_slot_name = 'phys_slot']);
$node_replica->append_conf('recovery.conf',
q[primary_slot_name = 'phys_slot']);
$node_replica->start;
# If we drop 'dropme' on the master, the standby should drop the
# db and associated slot.
is($node_master->psql('postgres', 'DROP DATABASE dropme'), 0,
'dropped DB with logical slot OK on master');
$node_master->wait_for_catchup($node_replica, 'replay', $node_master->lsn('insert'));
is($node_replica->safe_psql('postgres', q[SELECT 1 FROM pg_database WHERE datname = 'dropme']), '',
is($node_master->psql('postgres', 'DROP DATABASE dropme'),
0, 'dropped DB with logical slot OK on master');
$node_master->wait_for_catchup($node_replica, 'replay',
$node_master->lsn('insert'));
is( $node_replica->safe_psql(
'postgres', q[SELECT 1 FROM pg_database WHERE datname = 'dropme']),
'',
'dropped DB dropme on standby');
is($node_master->slot('dropme_slot')->{'slot_name'}, undef,
'logical slot was actually dropped on standby');
is($node_master->slot('dropme_slot')->{'slot_name'},
undef, 'logical slot was actually dropped on standby');
# Back to testing failover...
$node_master->safe_psql('postgres',
@ -109,19 +112,22 @@ is($stdout, 'before_basebackup',
# from the master to make sure its hot_standby_feedback
# has locked in a catalog_xmin on the physical slot, and that
# any xmin is < the catalog_xmin
$node_master->poll_query_until('postgres', q[
$node_master->poll_query_until(
'postgres', q[
SELECT catalog_xmin IS NOT NULL
FROM pg_replication_slots
WHERE slot_name = 'phys_slot'
]);
my $phys_slot = $node_master->slot('phys_slot');
isnt($phys_slot->{'xmin'}, '',
'xmin assigned on physical slot of master');
isnt($phys_slot->{'catalog_xmin'}, '',
'catalog_xmin assigned on physical slot of master');
isnt($phys_slot->{'xmin'}, '', 'xmin assigned on physical slot of master');
isnt($phys_slot->{'catalog_xmin'},
'', 'catalog_xmin assigned on physical slot of master');
# Ignore wrap-around here, we're on a new cluster:
cmp_ok($phys_slot->{'xmin'}, '>=', $phys_slot->{'catalog_xmin'},
'xmin on physical slot must not be lower than catalog_xmin');
cmp_ok(
$phys_slot->{'xmin'}, '>=',
$phys_slot->{'catalog_xmin'},
'xmin on physical slot must not be lower than catalog_xmin');
$node_master->safe_psql('postgres', 'CHECKPOINT');
@ -162,23 +168,30 @@ COMMIT
BEGIN
table public.decoding: INSERT: blah[text]:'after failover'
COMMIT);
is($stdout, $final_expected_output_bb, 'decoded expected data from slot before_basebackup');
is($stdout, $final_expected_output_bb,
'decoded expected data from slot before_basebackup');
is($stderr, '', 'replay from slot before_basebackup produces no stderr');
# So far we've peeked the slots, so when we fetch the same info over
# pg_recvlogical we should get complete results. First, find out the commit lsn
# of the last transaction. There's no max(pg_lsn), so:
my $endpos = $node_replica->safe_psql('postgres', "SELECT lsn FROM pg_logical_slot_peek_changes('before_basebackup', NULL, NULL) ORDER BY lsn DESC LIMIT 1;");
my $endpos = $node_replica->safe_psql('postgres',
"SELECT lsn FROM pg_logical_slot_peek_changes('before_basebackup', NULL, NULL) ORDER BY lsn DESC LIMIT 1;"
);
# now use the walsender protocol to peek the slot changes and make sure we see
# the same results.
$stdout = $node_replica->pg_recvlogical_upto('postgres', 'before_basebackup',
$endpos, 30, 'include-xids' => '0', 'skip-empty-xacts' => '1');
$stdout = $node_replica->pg_recvlogical_upto(
'postgres', 'before_basebackup',
$endpos, 30,
'include-xids' => '0',
'skip-empty-xacts' => '1');
# walsender likes to add a newline
chomp($stdout);
is($stdout, $final_expected_output_bb, 'got same output from walsender via pg_recvlogical on before_basebackup');
is($stdout, $final_expected_output_bb,
'got same output from walsender via pg_recvlogical on before_basebackup');
$node_replica->teardown_node();

View File

@ -7,9 +7,10 @@ use PostgresNode;
use TestLib;
use Test::More;
use Config;
if ($Config{osname} eq 'MSWin32')
if ($Config{osname} eq 'MSWin32')
{
# some Windows Perls at least don't like IPC::Run's start/kill_kill regime.
# some Windows Perls at least don't like IPC::Run's start/kill_kill regime.
plan skip_all => "Test fails on Windows perl";
}
else
@ -28,8 +29,14 @@ my ($stdin, $stdout, $stderr) = ('', '', '');
# an xact to be in-progress when we crash and we need to know
# its xid.
my $tx = IPC::Run::start(
['psql', '-X', '-qAt', '-v', 'ON_ERROR_STOP=1', '-f', '-', '-d', $node->connstr('postgres')],
'<', \$stdin, '>', \$stdout, '2>', \$stderr);
[ 'psql', '-X', '-qAt', '-v', 'ON_ERROR_STOP=1', '-f', '-', '-d',
$node->connstr('postgres') ],
'<',
\$stdin,
'>',
\$stdout,
'2>',
\$stderr);
$stdin .= q[
BEGIN;
CREATE TABLE mine(x integer);
@ -41,16 +48,19 @@ $tx->pump until $stdout =~ /[[:digit:]]+[\r\n]$/;
my $xid = $stdout;
chomp($xid);
is($node->safe_psql('postgres', qq[SELECT txid_status('$xid');]), 'in progress', 'own xid is in-progres');
is($node->safe_psql('postgres', qq[SELECT txid_status('$xid');]),
'in progress', 'own xid is in-progres');
# Crash and restart the postmaster
$node->stop('immediate');
$node->start;
# Make sure we really got a new xid
cmp_ok($node->safe_psql('postgres', 'SELECT txid_current()'), '>', $xid,
'new xid after restart is greater');
cmp_ok($node->safe_psql('postgres', 'SELECT txid_current()'),
'>', $xid, 'new xid after restart is greater');
# and make sure we show the in-progress xact as aborted
is($node->safe_psql('postgres', qq[SELECT txid_status('$xid');]), 'aborted', 'xid is aborted after crash');
is($node->safe_psql('postgres', qq[SELECT txid_status('$xid');]),
'aborted', 'xid is aborted after crash');
$tx->kill_kill;

View File

@ -9,7 +9,8 @@ use Test::More tests => 12;
# Setup master node
my $node_master = get_new_node("master");
$node_master->init(allows_streaming => 1);
$node_master->append_conf('postgresql.conf', qq(
$node_master->append_conf(
'postgresql.conf', qq(
max_prepared_transactions = 10
log_checkpoints = true
));
@ -19,24 +20,27 @@ $node_master->psql('postgres', "CREATE TABLE t_012_tbl (id int)");
# Setup slave node
my $node_slave = get_new_node('slave');
$node_slave->init_from_backup($node_master, 'master_backup', has_streaming => 1);
$node_slave->init_from_backup($node_master, 'master_backup',
has_streaming => 1);
$node_slave->start;
# Switch to synchronous replication
$node_master->append_conf('postgresql.conf', qq(
$node_master->append_conf(
'postgresql.conf', qq(
synchronous_standby_names = '*'
));
$node_master->psql('postgres', "SELECT pg_reload_conf()");
my $psql_out = '';
my $psql_rc = '';
my $psql_rc = '';
###############################################################################
# Check that replay will correctly set SUBTRANS and properly advance nextXid
# so that it won't conflict with savepoint xids.
###############################################################################
$node_master->psql('postgres', "
$node_master->psql(
'postgres', "
BEGIN;
DELETE FROM t_012_tbl;
INSERT INTO t_012_tbl VALUES (43);
@ -55,7 +59,8 @@ $node_master->psql('postgres', "
$node_master->stop;
$node_master->start;
$node_master->psql('postgres', "
$node_master->psql(
'postgres', "
-- here we can get xid of previous savepoint if nextXid
-- wasn't properly advanced
BEGIN;
@ -63,8 +68,10 @@ $node_master->psql('postgres', "
ROLLBACK;
COMMIT PREPARED 'xact_012_1';");
$node_master->psql('postgres', "SELECT count(*) FROM t_012_tbl",
stdout => \$psql_out);
$node_master->psql(
'postgres',
"SELECT count(*) FROM t_012_tbl",
stdout => \$psql_out);
is($psql_out, '6', "Check nextXid handling for prepared subtransactions");
###############################################################################
@ -75,7 +82,8 @@ is($psql_out, '6', "Check nextXid handling for prepared subtransactions");
$node_master->psql('postgres', "DELETE FROM t_012_tbl");
# Function borrowed from src/test/regress/sql/hs_primary_extremes.sql
$node_master->psql('postgres', "
$node_master->psql(
'postgres', "
CREATE OR REPLACE FUNCTION hs_subxids (n integer)
RETURNS void
LANGUAGE plpgsql
@ -87,39 +95,48 @@ $node_master->psql('postgres', "
RETURN;
EXCEPTION WHEN raise_exception THEN NULL; END;
\$\$;");
$node_master->psql('postgres', "
$node_master->psql(
'postgres', "
BEGIN;
SELECT hs_subxids(127);
COMMIT;");
$node_master->wait_for_catchup($node_slave, 'replay', $node_master->lsn('insert'));
$node_slave->psql('postgres', "SELECT coalesce(sum(id),-1) FROM t_012_tbl",
stdout => \$psql_out);
$node_master->wait_for_catchup($node_slave, 'replay',
$node_master->lsn('insert'));
$node_slave->psql(
'postgres',
"SELECT coalesce(sum(id),-1) FROM t_012_tbl",
stdout => \$psql_out);
is($psql_out, '8128', "Visible");
$node_master->stop;
$node_slave->promote;
$node_slave->poll_query_until('postgres',
"SELECT NOT pg_is_in_recovery()")
$node_slave->poll_query_until('postgres', "SELECT NOT pg_is_in_recovery()")
or die "Timed out while waiting for promotion of standby";
$node_slave->psql('postgres', "SELECT coalesce(sum(id),-1) FROM t_012_tbl",
stdout => \$psql_out);
$node_slave->psql(
'postgres',
"SELECT coalesce(sum(id),-1) FROM t_012_tbl",
stdout => \$psql_out);
is($psql_out, '8128', "Visible");
# restore state
($node_master, $node_slave) = ($node_slave, $node_master);
$node_slave->enable_streaming($node_master);
$node_slave->append_conf('recovery.conf', qq(
$node_slave->append_conf(
'recovery.conf', qq(
recovery_target_timeline='latest'
));
$node_slave->start;
$node_slave->psql('postgres', "SELECT coalesce(sum(id),-1) FROM t_012_tbl",
stdout => \$psql_out);
$node_slave->psql(
'postgres',
"SELECT coalesce(sum(id),-1) FROM t_012_tbl",
stdout => \$psql_out);
is($psql_out, '8128', "Visible");
$node_master->psql('postgres', "DELETE FROM t_012_tbl");
# Function borrowed from src/test/regress/sql/hs_primary_extremes.sql
$node_master->psql('postgres', "
$node_master->psql(
'postgres', "
CREATE OR REPLACE FUNCTION hs_subxids (n integer)
RETURNS void
LANGUAGE plpgsql
@ -131,67 +148,87 @@ $node_master->psql('postgres', "
RETURN;
EXCEPTION WHEN raise_exception THEN NULL; END;
\$\$;");
$node_master->psql('postgres', "
$node_master->psql(
'postgres', "
BEGIN;
SELECT hs_subxids(127);
PREPARE TRANSACTION 'xact_012_1';");
$node_master->wait_for_catchup($node_slave, 'replay', $node_master->lsn('insert'));
$node_slave->psql('postgres', "SELECT coalesce(sum(id),-1) FROM t_012_tbl",
stdout => \$psql_out);
$node_master->wait_for_catchup($node_slave, 'replay',
$node_master->lsn('insert'));
$node_slave->psql(
'postgres',
"SELECT coalesce(sum(id),-1) FROM t_012_tbl",
stdout => \$psql_out);
is($psql_out, '-1', "Not visible");
$node_master->stop;
$node_slave->promote;
$node_slave->poll_query_until('postgres',
"SELECT NOT pg_is_in_recovery()")
$node_slave->poll_query_until('postgres', "SELECT NOT pg_is_in_recovery()")
or die "Timed out while waiting for promotion of standby";
$node_slave->psql('postgres', "SELECT coalesce(sum(id),-1) FROM t_012_tbl",
stdout => \$psql_out);
$node_slave->psql(
'postgres',
"SELECT coalesce(sum(id),-1) FROM t_012_tbl",
stdout => \$psql_out);
is($psql_out, '-1', "Not visible");
# restore state
($node_master, $node_slave) = ($node_slave, $node_master);
$node_slave->enable_streaming($node_master);
$node_slave->append_conf('recovery.conf', qq(
$node_slave->append_conf(
'recovery.conf', qq(
recovery_target_timeline='latest'
));
$node_slave->start;
$psql_rc = $node_master->psql('postgres', "COMMIT PREPARED 'xact_012_1'");
is($psql_rc, '0', "Restore of PGPROC_MAX_CACHED_SUBXIDS+ prepared transaction on promoted slave");
is($psql_rc, '0',
"Restore of PGPROC_MAX_CACHED_SUBXIDS+ prepared transaction on promoted slave"
);
$node_master->psql('postgres', "SELECT coalesce(sum(id),-1) FROM t_012_tbl",
stdout => \$psql_out);
$node_master->psql(
'postgres',
"SELECT coalesce(sum(id),-1) FROM t_012_tbl",
stdout => \$psql_out);
is($psql_out, '8128', "Visible");
$node_master->psql('postgres', "DELETE FROM t_012_tbl");
$node_master->psql('postgres', "
$node_master->psql(
'postgres', "
BEGIN;
SELECT hs_subxids(201);
PREPARE TRANSACTION 'xact_012_1';");
$node_master->wait_for_catchup($node_slave, 'replay', $node_master->lsn('insert'));
$node_slave->psql('postgres', "SELECT coalesce(sum(id),-1) FROM t_012_tbl",
stdout => \$psql_out);
$node_master->wait_for_catchup($node_slave, 'replay',
$node_master->lsn('insert'));
$node_slave->psql(
'postgres',
"SELECT coalesce(sum(id),-1) FROM t_012_tbl",
stdout => \$psql_out);
is($psql_out, '-1', "Not visible");
$node_master->stop;
$node_slave->promote;
$node_slave->poll_query_until('postgres',
"SELECT NOT pg_is_in_recovery()")
$node_slave->poll_query_until('postgres', "SELECT NOT pg_is_in_recovery()")
or die "Timed out while waiting for promotion of standby";
$node_slave->psql('postgres', "SELECT coalesce(sum(id),-1) FROM t_012_tbl",
stdout => \$psql_out);
$node_slave->psql(
'postgres',
"SELECT coalesce(sum(id),-1) FROM t_012_tbl",
stdout => \$psql_out);
is($psql_out, '-1', "Not visible");
# restore state
($node_master, $node_slave) = ($node_slave, $node_master);
$node_slave->enable_streaming($node_master);
$node_slave->append_conf('recovery.conf', qq(
$node_slave->append_conf(
'recovery.conf', qq(
recovery_target_timeline='latest'
));
$node_slave->start;
$psql_rc = $node_master->psql('postgres', "ROLLBACK PREPARED 'xact_012_1'");
is($psql_rc, '0', "Rollback of PGPROC_MAX_CACHED_SUBXIDS+ prepared transaction on promoted slave");
is($psql_rc, '0',
"Rollback of PGPROC_MAX_CACHED_SUBXIDS+ prepared transaction on promoted slave"
);
$node_master->psql('postgres', "SELECT coalesce(sum(id),-1) FROM t_012_tbl",
stdout => \$psql_out);
$node_master->psql(
'postgres',
"SELECT coalesce(sum(id),-1) FROM t_012_tbl",
stdout => \$psql_out);
is($psql_out, '-1', "Not visible");

View File

@ -74,12 +74,12 @@ sub configure_test_server_for_ssl
open my $sslconf, '>', "$pgdata/sslconfig.conf";
close $sslconf;
# Copy all server certificates and keys, and client root cert, to the data dir
# Copy all server certificates and keys, and client root cert, to the data dir
copy_files("ssl/server-*.crt", $pgdata);
copy_files("ssl/server-*.key", $pgdata);
chmod(0600, glob "$pgdata/server-*.key") or die $!;
copy_files("ssl/root+client_ca.crt", $pgdata);
copy_files("ssl/root_ca.crt", $pgdata);
copy_files("ssl/root_ca.crt", $pgdata);
copy_files("ssl/root+client.crl", $pgdata);
# Stop and restart server to load new listen_addresses.
@ -95,10 +95,11 @@ sub switch_server_cert
{
my $node = $_[0];
my $certfile = $_[1];
my $cafile = $_[2] || "root+client_ca";
my $cafile = $_[2] || "root+client_ca";
my $pgdata = $node->data_dir;
note "reloading server with certfile \"$certfile\" and cafile \"$cafile\"";
note
"reloading server with certfile \"$certfile\" and cafile \"$cafile\"";
open my $sslconf, '>', "$pgdata/sslconfig.conf";
print $sslconf "ssl=on\n";
@ -117,10 +118,10 @@ sub configure_hba_for_ssl
my $serverhost = $_[1];
my $pgdata = $node->data_dir;
# Only accept SSL connections from localhost. Our tests don't depend on this
# but seems best to keep it as narrow as possible for security reasons.
#
# When connecting to certdb, also check the client certificate.
# Only accept SSL connections from localhost. Our tests don't depend on this
# but seems best to keep it as narrow as possible for security reasons.
#
# When connecting to certdb, also check the client certificate.
open my $hba, '>', "$pgdata/pg_hba.conf";
print $hba
"# TYPE DATABASE USER ADDRESS METHOD\n";

View File

@ -26,19 +26,15 @@ $node_publisher->safe_psql('postgres',
"CREATE TABLE tab_rep (a int primary key)");
# Setup structure on subscriber
$node_subscriber->safe_psql('postgres',
"CREATE TABLE tab_notrep (a int)");
$node_subscriber->safe_psql('postgres',
"CREATE TABLE tab_ins (a int)");
$node_subscriber->safe_psql('postgres',
"CREATE TABLE tab_full (a int)");
$node_subscriber->safe_psql('postgres', "CREATE TABLE tab_notrep (a int)");
$node_subscriber->safe_psql('postgres', "CREATE TABLE tab_ins (a int)");
$node_subscriber->safe_psql('postgres', "CREATE TABLE tab_full (a int)");
$node_subscriber->safe_psql('postgres',
"CREATE TABLE tab_rep (a int primary key)");
# Setup logical replication
my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
$node_publisher->safe_psql('postgres',
"CREATE PUBLICATION tap_pub");
$node_publisher->safe_psql('postgres', "CREATE PUBLICATION tap_pub");
$node_publisher->safe_psql('postgres',
"CREATE PUBLICATION tap_pub_ins_only WITH (publish = insert)");
$node_publisher->safe_psql('postgres',
@ -48,7 +44,8 @@ $node_publisher->safe_psql('postgres',
my $appname = 'tap_sub';
$node_subscriber->safe_psql('postgres',
"CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub, tap_pub_ins_only");
"CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub, tap_pub_ins_only"
);
# Wait for subscriber to finish initialization
my $caughtup_query =
@ -72,27 +69,23 @@ is($result, qq(1002), 'check initial data was copied to subscriber');
$node_publisher->safe_psql('postgres',
"INSERT INTO tab_ins SELECT generate_series(1,50)");
$node_publisher->safe_psql('postgres',
"DELETE FROM tab_ins WHERE a > 20");
$node_publisher->safe_psql('postgres',
"UPDATE tab_ins SET a = -a");
$node_publisher->safe_psql('postgres', "DELETE FROM tab_ins WHERE a > 20");
$node_publisher->safe_psql('postgres', "UPDATE tab_ins SET a = -a");
$node_publisher->safe_psql('postgres',
"INSERT INTO tab_rep SELECT generate_series(1,50)");
$node_publisher->safe_psql('postgres',
"DELETE FROM tab_rep WHERE a > 20");
$node_publisher->safe_psql('postgres',
"UPDATE tab_rep SET a = -a");
$node_publisher->safe_psql('postgres', "DELETE FROM tab_rep WHERE a > 20");
$node_publisher->safe_psql('postgres', "UPDATE tab_rep SET a = -a");
$node_publisher->poll_query_until('postgres', $caughtup_query)
or die "Timed out while waiting for subscriber to catch up";
$result =
$node_subscriber->safe_psql('postgres', "SELECT count(*), min(a), max(a) FROM tab_ins");
$result = $node_subscriber->safe_psql('postgres',
"SELECT count(*), min(a), max(a) FROM tab_ins");
is($result, qq(1052|1|1002), 'check replicated inserts on subscriber');
$result =
$node_subscriber->safe_psql('postgres', "SELECT count(*), min(a), max(a) FROM tab_rep");
$result = $node_subscriber->safe_psql('postgres',
"SELECT count(*), min(a), max(a) FROM tab_rep");
is($result, qq(20|-20|-1), 'check replicated changes on subscriber');
# insert some duplicate rows
@ -110,107 +103,114 @@ $node_subscriber->safe_psql('postgres',
"ALTER TABLE tab_ins REPLICA IDENTITY FULL");
# and do the update
$node_publisher->safe_psql('postgres',
"UPDATE tab_full SET a = a * a");
$node_publisher->safe_psql('postgres', "UPDATE tab_full SET a = a * a");
# Wait for subscription to catch up
$node_publisher->poll_query_until('postgres', $caughtup_query)
or die "Timed out while waiting for subscriber to catch up";
$result =
$node_subscriber->safe_psql('postgres', "SELECT count(*), min(a), max(a) FROM tab_full");
is($result, qq(20|1|100), 'update works with REPLICA IDENTITY FULL and duplicate tuples');
$result = $node_subscriber->safe_psql('postgres',
"SELECT count(*), min(a), max(a) FROM tab_full");
is($result, qq(20|1|100),
'update works with REPLICA IDENTITY FULL and duplicate tuples');
# check that change of connection string and/or publication list causes
# restart of subscription workers. Not all of these are registered as tests
# as we need to poll for a change but the test suite will fail none the less
# when something goes wrong.
my $oldpid = $node_publisher->safe_psql('postgres',
"SELECT pid FROM pg_stat_replication WHERE application_name = '$appname';");
"SELECT pid FROM pg_stat_replication WHERE application_name = '$appname';"
);
$node_subscriber->safe_psql('postgres',
"ALTER SUBSCRIPTION tap_sub CONNECTION 'application_name=$appname $publisher_connstr'");
"ALTER SUBSCRIPTION tap_sub CONNECTION 'application_name=$appname $publisher_connstr'"
);
$node_publisher->poll_query_until('postgres',
"SELECT pid != $oldpid FROM pg_stat_replication WHERE application_name = '$appname';")
or die "Timed out while waiting for apply to restart";
"SELECT pid != $oldpid FROM pg_stat_replication WHERE application_name = '$appname';"
) or die "Timed out while waiting for apply to restart";
$oldpid = $node_publisher->safe_psql('postgres',
"SELECT pid FROM pg_stat_replication WHERE application_name = '$appname';");
"SELECT pid FROM pg_stat_replication WHERE application_name = '$appname';"
);
$node_subscriber->safe_psql('postgres',
"ALTER SUBSCRIPTION tap_sub SET PUBLICATION tap_pub_ins_only REFRESH WITH (copy_data = false)");
"ALTER SUBSCRIPTION tap_sub SET PUBLICATION tap_pub_ins_only REFRESH WITH (copy_data = false)"
);
$node_publisher->poll_query_until('postgres',
"SELECT pid != $oldpid FROM pg_stat_replication WHERE application_name = '$appname';")
or die "Timed out while waiting for apply to restart";
"SELECT pid != $oldpid FROM pg_stat_replication WHERE application_name = '$appname';"
) or die "Timed out while waiting for apply to restart";
$node_publisher->safe_psql('postgres',
"INSERT INTO tab_ins SELECT generate_series(1001,1100)");
$node_publisher->safe_psql('postgres',
"DELETE FROM tab_rep");
$node_publisher->safe_psql('postgres', "DELETE FROM tab_rep");
$node_publisher->poll_query_until('postgres', $caughtup_query)
or die "Timed out while waiting for subscriber to catch up";
$result =
$node_subscriber->safe_psql('postgres', "SELECT count(*), min(a), max(a) FROM tab_ins");
is($result, qq(1152|1|1100), 'check replicated inserts after subscription publication change');
$result = $node_subscriber->safe_psql('postgres',
"SELECT count(*), min(a), max(a) FROM tab_ins");
is($result, qq(1152|1|1100),
'check replicated inserts after subscription publication change');
$result =
$node_subscriber->safe_psql('postgres', "SELECT count(*), min(a), max(a) FROM tab_rep");
is($result, qq(20|-20|-1), 'check changes skipped after subscription publication change');
$result = $node_subscriber->safe_psql('postgres',
"SELECT count(*), min(a), max(a) FROM tab_rep");
is($result, qq(20|-20|-1),
'check changes skipped after subscription publication change');
# check alter publication (relcache invalidation etc)
$node_publisher->safe_psql('postgres',
"ALTER PUBLICATION tap_pub_ins_only SET (publish = 'insert, delete')");
$node_publisher->safe_psql('postgres',
"ALTER PUBLICATION tap_pub_ins_only ADD TABLE tab_full");
$node_publisher->safe_psql('postgres',
"DELETE FROM tab_ins WHERE a > 0");
$node_publisher->safe_psql('postgres', "DELETE FROM tab_ins WHERE a > 0");
$node_subscriber->safe_psql('postgres',
"ALTER SUBSCRIPTION tap_sub REFRESH PUBLICATION WITH (copy_data = false)");
$node_publisher->safe_psql('postgres',
"INSERT INTO tab_full VALUES(0)");
"ALTER SUBSCRIPTION tap_sub REFRESH PUBLICATION WITH (copy_data = false)"
);
$node_publisher->safe_psql('postgres', "INSERT INTO tab_full VALUES(0)");
$node_publisher->poll_query_until('postgres', $caughtup_query)
or die "Timed out while waiting for subscriber to catch up";
# note that data are different on provider and subscriber
$result =
$node_subscriber->safe_psql('postgres', "SELECT count(*), min(a), max(a) FROM tab_ins");
is($result, qq(1052|1|1002), 'check replicated deletes after alter publication');
$result = $node_subscriber->safe_psql('postgres',
"SELECT count(*), min(a), max(a) FROM tab_ins");
is($result, qq(1052|1|1002),
'check replicated deletes after alter publication');
$result =
$node_subscriber->safe_psql('postgres', "SELECT count(*), min(a), max(a) FROM tab_full");
$result = $node_subscriber->safe_psql('postgres',
"SELECT count(*), min(a), max(a) FROM tab_full");
is($result, qq(21|0|100), 'check replicated insert after alter publication');
# check restart on rename
$oldpid = $node_publisher->safe_psql('postgres',
"SELECT pid FROM pg_stat_replication WHERE application_name = '$appname';");
"SELECT pid FROM pg_stat_replication WHERE application_name = '$appname';"
);
$node_subscriber->safe_psql('postgres',
"ALTER SUBSCRIPTION tap_sub RENAME TO tap_sub_renamed");
$node_publisher->poll_query_until('postgres',
"SELECT pid != $oldpid FROM pg_stat_replication WHERE application_name = '$appname';")
or die "Timed out while waiting for apply to restart";
"SELECT pid != $oldpid FROM pg_stat_replication WHERE application_name = '$appname';"
) or die "Timed out while waiting for apply to restart";
# check all the cleanup
$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub_renamed");
$result =
$node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_subscription");
$result = $node_subscriber->safe_psql('postgres',
"SELECT count(*) FROM pg_subscription");
is($result, qq(0), 'check subscription was dropped on subscriber');
$result =
$node_publisher->safe_psql('postgres', "SELECT count(*) FROM pg_replication_slots");
$result = $node_publisher->safe_psql('postgres',
"SELECT count(*) FROM pg_replication_slots");
is($result, qq(0), 'check replication slot was dropped on publisher');
$result =
$node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_subscription_rel");
is($result, qq(0), 'check subscription relation status was dropped on subscriber');
$result = $node_subscriber->safe_psql('postgres',
"SELECT count(*) FROM pg_subscription_rel");
is($result, qq(0),
'check subscription relation status was dropped on subscriber');
$result =
$node_publisher->safe_psql('postgres', "SELECT count(*) FROM pg_replication_slots");
$result = $node_publisher->safe_psql('postgres',
"SELECT count(*) FROM pg_replication_slots");
is($result, qq(0), 'check replication slot was dropped on publisher');
$result =
$node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_replication_origin");
$result = $node_subscriber->safe_psql('postgres',
"SELECT count(*) FROM pg_replication_origin");
is($result, qq(0), 'check replication origin was dropped on subscriber');
$node_subscriber->stop('fast');

View File

@ -17,7 +17,7 @@ $node_subscriber->init(allows_streaming => 'logical');
$node_subscriber->start;
# Create some preexisting content on publisher
my $ddl = qq(
my $ddl = qq(
CREATE EXTENSION hstore WITH SCHEMA public;
CREATE TABLE public.tst_one_array (
a INTEGER PRIMARY KEY,
@ -103,7 +103,8 @@ $node_publisher->safe_psql('postgres',
my $appname = 'tap_sub';
$node_subscriber->safe_psql('postgres',
"CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub WITH (slot_name = tap_sub_slot)");
"CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub WITH (slot_name = tap_sub_slot)"
);
# Wait for subscriber to finish initialization
my $caughtup_query =
@ -118,7 +119,8 @@ $node_subscriber->poll_query_until('postgres', $synced_query)
or die "Timed out while waiting for subscriber to synchronize data";
# Insert initial test data
$node_publisher->safe_psql('postgres', qq(
$node_publisher->safe_psql(
'postgres', qq(
-- test_tbl_one_array_col
INSERT INTO tst_one_array (a, b) VALUES
(1, '{1, 2, 3}'),
@ -248,7 +250,8 @@ $node_publisher->poll_query_until('postgres', $caughtup_query)
or die "Timed out while waiting for subscriber to catch up";
# Check the data on subscriber
my $result = $node_subscriber->safe_psql('postgres', qq(
my $result = $node_subscriber->safe_psql(
'postgres', qq(
SET timezone = '+2';
SELECT a, b FROM tst_one_array ORDER BY a;
SELECT a, b, c, d FROM tst_arrays ORDER BY a;
@ -266,7 +269,7 @@ my $result = $node_subscriber->safe_psql('postgres', qq(
SELECT a, b FROM tst_hstore ORDER BY a;
));
is($result, '1|{1,2,3}
is( $result, '1|{1,2,3}
2|{2,3,1}
3|{3,2,1}
4|{4,3,2}
@ -331,10 +334,11 @@ e|{d,NULL}
2|"zzz"=>"foo"
3|"123"=>"321"
4|"yellow horse"=>"moaned"',
'check replicated inserts on subscriber');
'check replicated inserts on subscriber');
# Run batch of updates
$node_publisher->safe_psql('postgres', qq(
$node_publisher->safe_psql(
'postgres', qq(
UPDATE tst_one_array SET b = '{4, 5, 6}' WHERE a = 1;
UPDATE tst_one_array SET b = '{4, 5, 6, 1}' WHERE a > 3;
UPDATE tst_arrays SET b = '{"1a", "2b", "3c"}', c = '{1.0, 2.0, 3.0}', d = '{"1 day 1 second", "2 days 2 seconds", "3 days 3 second"}' WHERE a = '{1, 2, 3}';
@ -368,7 +372,8 @@ $node_publisher->poll_query_until('postgres', $caughtup_query)
or die "Timed out while waiting for subscriber to catch up";
# Check the data on subscriber
$result = $node_subscriber->safe_psql('postgres', qq(
$result = $node_subscriber->safe_psql(
'postgres', qq(
SET timezone = '+2';
SELECT a, b FROM tst_one_array ORDER BY a;
SELECT a, b, c, d FROM tst_arrays ORDER BY a;
@ -386,7 +391,7 @@ $result = $node_subscriber->safe_psql('postgres', qq(
SELECT a, b FROM tst_hstore ORDER BY a;
));
is($result, '1|{4,5,6}
is( $result, '1|{4,5,6}
2|{2,3,1}
3|{3,2,1}
4|{4,5,6,1}
@ -451,10 +456,11 @@ e|{e,d}
2|"updated"=>"value"
3|"also"=>"updated"
4|"yellow horse"=>"moaned"',
'check replicated updates on subscriber');
'check replicated updates on subscriber');
# Run batch of deletes
$node_publisher->safe_psql('postgres', qq(
$node_publisher->safe_psql(
'postgres', qq(
DELETE FROM tst_one_array WHERE a = 1;
DELETE FROM tst_one_array WHERE b = '{2, 3, 1}';
DELETE FROM tst_arrays WHERE a = '{1, 2, 3}';
@ -487,7 +493,8 @@ $node_publisher->poll_query_until('postgres', $caughtup_query)
or die "Timed out while waiting for subscriber to catch up";
# Check the data on subscriber
$result = $node_subscriber->safe_psql('postgres', qq(
$result = $node_subscriber->safe_psql(
'postgres', qq(
SET timezone = '+2';
SELECT a, b FROM tst_one_array ORDER BY a;
SELECT a, b, c, d FROM tst_arrays ORDER BY a;
@ -505,7 +512,7 @@ $result = $node_subscriber->safe_psql('postgres', qq(
SELECT a, b FROM tst_hstore ORDER BY a;
));
is($result, '3|{3,2,1}
is( $result, '3|{3,2,1}
4|{4,5,6,1}
5|{4,5,6,1}
{3,1,2}|{c,a,b}|{3.3,1.1,2.2}|{"3 years","1 year","2 years"}
@ -539,7 +546,7 @@ e|{e,d}
2|"updated"=>"value"
3|"also"=>"updated"
4|"yellow horse"=>"moaned"',
'check replicated deletes on subscriber');
'check replicated deletes on subscriber');
$node_subscriber->stop('fast');
$node_publisher->stop('fast');

View File

@ -19,13 +19,15 @@ $node_subscriber->start;
$node_publisher->safe_psql('postgres',
"CREATE TABLE tab_fk (bid int PRIMARY KEY);");
$node_publisher->safe_psql('postgres',
"CREATE TABLE tab_fk_ref (id int PRIMARY KEY, bid int REFERENCES tab_fk (bid));");
"CREATE TABLE tab_fk_ref (id int PRIMARY KEY, bid int REFERENCES tab_fk (bid));"
);
# Setup structure on subscriber
$node_subscriber->safe_psql('postgres',
"CREATE TABLE tab_fk (bid int PRIMARY KEY);");
$node_subscriber->safe_psql('postgres',
"CREATE TABLE tab_fk_ref (id int PRIMARY KEY, bid int REFERENCES tab_fk (bid));");
"CREATE TABLE tab_fk_ref (id int PRIMARY KEY, bid int REFERENCES tab_fk (bid));"
);
# Setup logical replication
my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
@ -34,7 +36,8 @@ $node_publisher->safe_psql('postgres',
my $appname = 'tap_sub';
$node_subscriber->safe_psql('postgres',
"CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub WITH (copy_data = false)");
"CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub WITH (copy_data = false)"
);
# Wait for subscriber to finish initialization
my $caughtup_query =
@ -51,17 +54,16 @@ $node_publisher->poll_query_until('postgres', $caughtup_query)
or die "Timed out while waiting for subscriber to catch up";
# Check data on subscriber
my $result =
$node_subscriber->safe_psql('postgres', "SELECT count(*), min(bid), max(bid) FROM tab_fk;");
my $result = $node_subscriber->safe_psql('postgres',
"SELECT count(*), min(bid), max(bid) FROM tab_fk;");
is($result, qq(1|1|1), 'check replicated tab_fk inserts on subscriber');
$result =
$node_subscriber->safe_psql('postgres', "SELECT count(*), min(bid), max(bid) FROM tab_fk_ref;");
$result = $node_subscriber->safe_psql('postgres',
"SELECT count(*), min(bid), max(bid) FROM tab_fk_ref;");
is($result, qq(1|1|1), 'check replicated tab_fk_ref inserts on subscriber');
# Drop the fk on publisher
$node_publisher->safe_psql('postgres',
"DROP TABLE tab_fk CASCADE;");
$node_publisher->safe_psql('postgres', "DROP TABLE tab_fk CASCADE;");
# Insert data
$node_publisher->safe_psql('postgres',
@ -71,12 +73,13 @@ $node_publisher->poll_query_until('postgres', $caughtup_query)
or die "Timed out while waiting for subscriber to catch up";
# FK is not enforced on subscriber
$result =
$node_subscriber->safe_psql('postgres', "SELECT count(*), min(bid), max(bid) FROM tab_fk_ref;");
$result = $node_subscriber->safe_psql('postgres',
"SELECT count(*), min(bid), max(bid) FROM tab_fk_ref;");
is($result, qq(2|1|2), 'check FK ignored on subscriber');
# Add replica trigger
$node_subscriber->safe_psql('postgres', qq{
$node_subscriber->safe_psql(
'postgres', qq{
CREATE FUNCTION filter_basic_dml_fn() RETURNS TRIGGER AS \$\$
BEGIN
IF (TG_OP = 'INSERT') THEN
@ -105,8 +108,8 @@ $node_publisher->poll_query_until('postgres', $caughtup_query)
or die "Timed out while waiting for subscriber to catch up";
# The row should be skipped on subscriber
$result =
$node_subscriber->safe_psql('postgres', "SELECT count(*), min(bid), max(bid) FROM tab_fk_ref;");
$result = $node_subscriber->safe_psql('postgres',
"SELECT count(*), min(bid), max(bid) FROM tab_fk_ref;");
is($result, qq(2|1|2), 'check replica trigger applied on subscriber');
$node_subscriber->stop('fast');

View File

@ -13,7 +13,8 @@ $node_publisher->start;
# Create subscriber node
my $node_subscriber = get_new_node('subscriber');
$node_subscriber->init(allows_streaming => 'logical');
$node_subscriber->append_conf('postgresql.conf', "wal_retrieve_retry_interval = 1ms");
$node_subscriber->append_conf('postgresql.conf',
"wal_retrieve_retry_interval = 1ms");
$node_subscriber->start;
# Create some preexisting content on publisher
@ -33,7 +34,8 @@ $node_publisher->safe_psql('postgres',
my $appname = 'tap_sub';
$node_subscriber->safe_psql('postgres',
"CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub");
"CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub"
);
# Wait for subscriber to finish initialization
my $caughtup_query =
@ -59,17 +61,16 @@ $node_publisher->safe_psql('postgres',
# recreate the subscription, it will try to do initial copy
$node_subscriber->safe_psql('postgres',
"CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub");
"CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub"
);
# but it will be stuck on data copy as it will fail on constraint
my $started_query =
"SELECT srsubstate = 'd' FROM pg_subscription_rel;";
my $started_query = "SELECT srsubstate = 'd' FROM pg_subscription_rel;";
$node_subscriber->poll_query_until('postgres', $started_query)
or die "Timed out while waiting for subscriber to start sync";
# remove the conflicting data
$node_subscriber->safe_psql('postgres',
"DELETE FROM tab_rep;");
$node_subscriber->safe_psql('postgres', "DELETE FROM tab_rep;");
# wait for sync to finish this time
$node_subscriber->poll_query_until('postgres', $synced_query)
@ -82,28 +83,30 @@ is($result, qq(20), 'initial data synced for second sub');
# now check another subscription for the same node pair
$node_subscriber->safe_psql('postgres',
"CREATE SUBSCRIPTION tap_sub2 CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub WITH (copy_data = false)");
"CREATE SUBSCRIPTION tap_sub2 CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub WITH (copy_data = false)"
);
# wait for it to start
$node_subscriber->poll_query_until('postgres', "SELECT pid IS NOT NULL FROM pg_stat_subscription WHERE subname = 'tap_sub2' AND relid IS NULL")
or die "Timed out while waiting for subscriber to start";
$node_subscriber->poll_query_until('postgres',
"SELECT pid IS NOT NULL FROM pg_stat_subscription WHERE subname = 'tap_sub2' AND relid IS NULL"
) or die "Timed out while waiting for subscriber to start";
# and drop both subscriptions
$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub");
$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub2");
# check subscriptions are removed
$result =
$node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_subscription");
$result = $node_subscriber->safe_psql('postgres',
"SELECT count(*) FROM pg_subscription");
is($result, qq(0), 'second and third sub are dropped');
# remove the conflicting data
$node_subscriber->safe_psql('postgres',
"DELETE FROM tab_rep;");
$node_subscriber->safe_psql('postgres', "DELETE FROM tab_rep;");
# recreate the subscription again
$node_subscriber->safe_psql('postgres',
"CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub");
"CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub"
);
# and wait for data sync to finish again
$node_subscriber->poll_query_until('postgres', $synced_query)
@ -115,8 +118,7 @@ $result =
is($result, qq(20), 'initial data synced for fourth sub');
# add new table on subscriber
$node_subscriber->safe_psql('postgres',
"CREATE TABLE tab_rep_next (a int)");
$node_subscriber->safe_psql('postgres', "CREATE TABLE tab_rep_next (a int)");
# setup structure with existing data on pubisher
$node_publisher->safe_psql('postgres',
@ -126,8 +128,8 @@ $node_publisher->safe_psql('postgres',
$node_publisher->poll_query_until('postgres', $caughtup_query)
or die "Timed out while waiting for subscriber to catch up";
$result =
$node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_rep_next");
$result = $node_subscriber->safe_psql('postgres',
"SELECT count(*) FROM tab_rep_next");
is($result, qq(0), 'no data for table added after subscription initialized');
# ask for data sync
@ -138,9 +140,10 @@ $node_subscriber->safe_psql('postgres',
$node_subscriber->poll_query_until('postgres', $synced_query)
or die "Timed out while waiting for subscriber to synchronize data";
$result =
$node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_rep_next");
is($result, qq(10), 'data for table added after subscription initialized are now synced');
$result = $node_subscriber->safe_psql('postgres',
"SELECT count(*) FROM tab_rep_next");
is($result, qq(10),
'data for table added after subscription initialized are now synced');
# Add some data
$node_publisher->safe_psql('postgres',
@ -150,9 +153,10 @@ $node_publisher->safe_psql('postgres',
$node_publisher->poll_query_until('postgres', $caughtup_query)
or die "Timed out while waiting for subscriber to catch up";
$result =
$node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_rep_next");
is($result, qq(20), 'changes for table added after subscription initialized replicated');
$result = $node_subscriber->safe_psql('postgres',
"SELECT count(*) FROM tab_rep_next");
is($result, qq(20),
'changes for table added after subscription initialized replicated');
$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub");

View File

@ -10,16 +10,20 @@ sub wait_for_caught_up
my ($node, $appname) = @_;
$node->poll_query_until('postgres',
"SELECT pg_current_wal_lsn() <= replay_lsn FROM pg_stat_replication WHERE application_name = '$appname';")
or die "Timed out while waiting for subscriber to catch up";
"SELECT pg_current_wal_lsn() <= replay_lsn FROM pg_stat_replication WHERE application_name = '$appname';"
) or die "Timed out while waiting for subscriber to catch up";
}
my $node_publisher = get_new_node('publisher');
$node_publisher->init(allows_streaming => 'logical', extra => ['--locale=C', '--encoding=UTF8']);
$node_publisher->init(
allows_streaming => 'logical',
extra => [ '--locale=C', '--encoding=UTF8' ]);
$node_publisher->start;
my $node_subscriber = get_new_node('subscriber');
$node_subscriber->init(allows_streaming => 'logical', extra => ['--locale=C', '--encoding=LATIN1']);
$node_subscriber->init(
allows_streaming => 'logical',
extra => [ '--locale=C', '--encoding=LATIN1' ]);
$node_subscriber->start;
my $ddl = "CREATE TABLE test1 (a int, b text);";
@ -27,20 +31,26 @@ $node_publisher->safe_psql('postgres', $ddl);
$node_subscriber->safe_psql('postgres', $ddl);
my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
my $appname = 'encoding_test';
my $appname = 'encoding_test';
$node_publisher->safe_psql('postgres', "CREATE PUBLICATION mypub FOR ALL TABLES;");
$node_subscriber->safe_psql('postgres', "CREATE SUBSCRIPTION mysub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION mypub;");
$node_publisher->safe_psql('postgres',
"CREATE PUBLICATION mypub FOR ALL TABLES;");
$node_subscriber->safe_psql('postgres',
"CREATE SUBSCRIPTION mysub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION mypub;"
);
wait_for_caught_up($node_publisher, $appname);
$node_publisher->safe_psql('postgres', q{INSERT INTO test1 VALUES (1, E'Mot\xc3\xb6rhead')}); # hand-rolled UTF-8
$node_publisher->safe_psql('postgres',
q{INSERT INTO test1 VALUES (1, E'Mot\xc3\xb6rhead')}); # hand-rolled UTF-8
wait_for_caught_up($node_publisher, $appname);
is($node_subscriber->safe_psql('postgres', q{SELECT a FROM test1 WHERE b = E'Mot\xf6rhead'}), # LATIN1
qq(1),
'data replicated to subscriber');
is( $node_subscriber->safe_psql(
'postgres', q{SELECT a FROM test1 WHERE b = E'Mot\xf6rhead'}
), # LATIN1
qq(1),
'data replicated to subscriber');
$node_subscriber->stop;
$node_publisher->stop;

View File

@ -36,7 +36,8 @@ exit 0 if $ccode !~ m/^#define YY_FLEX_SUBMINOR_VERSION (\d+)$/m;
exit 0 if $1 >= 36;
# Apply the desired patch.
$ccode =~ s|(struct yyguts_t \* yyg = \(struct yyguts_t\*\)yyscanner; /\* This var may be unused depending upon options. \*/
$ccode =~
s|(struct yyguts_t \* yyg = \(struct yyguts_t\*\)yyscanner; /\* This var may be unused depending upon options. \*/
.*?)
return yy_is_jam \? 0 : yy_current_state;
|$1

View File

@ -20,12 +20,12 @@ our (@ISA, @EXPORT_OK);
my $insttype;
my @client_contribs = ('oid2name', 'pgbench', 'vacuumlo');
my @client_program_files = (
'clusterdb', 'createdb', 'createuser',
'dropdb', 'dropuser', 'ecpg',
'libecpg', 'libecpg_compat', 'libpgtypes', 'libpq',
'pg_basebackup', 'pg_config', 'pg_dump', 'pg_dumpall',
'pg_isready', 'pg_receivewal', 'pg_recvlogical', 'pg_restore',
'psql', 'reindexdb', 'vacuumdb', @client_contribs);
'clusterdb', 'createdb', 'createuser', 'dropdb',
'dropuser', 'ecpg', 'libecpg', 'libecpg_compat',
'libpgtypes', 'libpq', 'pg_basebackup', 'pg_config',
'pg_dump', 'pg_dumpall', 'pg_isready', 'pg_receivewal',
'pg_recvlogical', 'pg_restore', 'psql', 'reindexdb',
'vacuumdb', @client_contribs);
sub lcopy
{
@ -392,8 +392,8 @@ sub GenerateTimezoneFiles
print "Generating timezone files...";
my @args = ("$conf/zic/zic", '-d', "$target/share/timezone",
'-p', "$posixrules");
my @args =
("$conf/zic/zic", '-d', "$target/share/timezone", '-p', "$posixrules");
foreach (@tzfiles)
{
my $tzfile = $_;

View File

@ -35,8 +35,7 @@ my @contrib_uselibpq = ('dblink', 'oid2name', 'postgres_fdw', 'vacuumlo');
my @contrib_uselibpgport = ('oid2name', 'pg_standby', 'vacuumlo');
my @contrib_uselibpgcommon = ('oid2name', 'pg_standby', 'vacuumlo');
my $contrib_extralibs = undef;
my $contrib_extraincludes =
{ 'dblink' => ['src/backend'] };
my $contrib_extraincludes = { 'dblink' => ['src/backend'] };
my $contrib_extrasource = {
'cube' => [ 'contrib/cube/cubescan.l', 'contrib/cube/cubeparse.y' ],
'seg' => [ 'contrib/seg/segscan.l', 'contrib/seg/segparse.y' ], };
@ -54,11 +53,11 @@ my @frontend_uselibpq = ('pg_ctl', 'pg_upgrade', 'pgbench', 'psql', 'initdb');
my @frontend_uselibpgport = (
'pg_archivecleanup', 'pg_test_fsync',
'pg_test_timing', 'pg_upgrade',
'pg_waldump', 'pgbench');
'pg_waldump', 'pgbench');
my @frontend_uselibpgcommon = (
'pg_archivecleanup', 'pg_test_fsync',
'pg_test_timing', 'pg_upgrade',
'pg_waldump', 'pgbench');
'pg_waldump', 'pgbench');
my $frontend_extralibs = {
'initdb' => ['ws2_32.lib'],
'pg_restore' => ['ws2_32.lib'],
@ -72,7 +71,7 @@ my $frontend_extrasource = {
'pgbench' =>
[ 'src/bin/pgbench/exprscan.l', 'src/bin/pgbench/exprparse.y' ] };
my @frontend_excludes = (
'pgevent', 'pg_basebackup', 'pg_rewind', 'pg_dump',
'pgevent', 'pg_basebackup', 'pg_rewind', 'pg_dump',
'pg_waldump', 'scripts');
sub mkvcbuild
@ -221,7 +220,7 @@ sub mkvcbuild
}
}
die "Unable to find $solution->{options}->{tcl}/lib/tcl<version>.lib"
unless $found;
unless $found;
}
$libpq = $solution->AddProject('libpq', 'dll', 'interfaces',
@ -256,8 +255,7 @@ sub mkvcbuild
$libpqwalreceiver->AddIncludeDir('src/interfaces/libpq');
$libpqwalreceiver->AddReference($postgres, $libpq);
my $pgoutput = $solution->AddProject(
'pgoutput', 'dll', '',
my $pgoutput = $solution->AddProject('pgoutput', 'dll', '',
'src/backend/replication/pgoutput');
$pgoutput->AddReference($postgres);
@ -504,12 +502,14 @@ sub mkvcbuild
'hstore_plpython' . $pymajorver, 'contrib/hstore_plpython',
'plpython' . $pymajorver, 'src/pl/plpython',
'hstore', 'contrib/hstore');
$hstore_plpython->AddDefine('PLPYTHON_LIBNAME="plpython' . $pymajorver . '"');
$hstore_plpython->AddDefine(
'PLPYTHON_LIBNAME="plpython' . $pymajorver . '"');
my $ltree_plpython = AddTransformModule(
'ltree_plpython' . $pymajorver, 'contrib/ltree_plpython',
'plpython' . $pymajorver, 'src/pl/plpython',
'ltree', 'contrib/ltree');
$ltree_plpython->AddDefine('PLPYTHON_LIBNAME="plpython' . $pymajorver . '"');
$ltree_plpython->AddDefine(
'PLPYTHON_LIBNAME="plpython' . $pymajorver . '"');
}
if ($solution->{options}->{perl})
@ -586,15 +586,15 @@ sub mkvcbuild
$plperl->AddReference($postgres);
my $perl_path = $solution->{options}->{perl} . '\lib\CORE\perl*.lib';
my @perl_libs =
grep { /perl\d+.lib$/ }
glob($perl_path);
grep { /perl\d+.lib$/ } glob($perl_path);
if (@perl_libs == 1)
{
$plperl->AddLibrary($perl_libs[0]);
}
else
{
die "could not identify perl library version matching pattern $perl_path\n";
die
"could not identify perl library version matching pattern $perl_path\n";
}
# Add transform module dependent on plperl

View File

@ -156,7 +156,7 @@ sub GenerateFiles
{
s{PG_VERSION "[^"]+"}{PG_VERSION "$self->{strver}$extraver"};
s{PG_VERSION_NUM \d+}{PG_VERSION_NUM $self->{numver}};
s{PG_VERSION_STR "[^"]+"}{PG_VERSION_STR "PostgreSQL $self->{strver}$extraver, compiled by Visual C++ build " CppAsString2(_MSC_VER) ", $bits-bit"};
s{PG_VERSION_STR "[^"]+"}{PG_VERSION_STR "PostgreSQL $self->{strver}$extraver, compiled by Visual C++ build " CppAsString2(_MSC_VER) ", $bits-bit"};
print $o $_;
}
print $o "#define PG_MAJORVERSION \"$self->{majorver}\"\n";
@ -171,7 +171,8 @@ sub GenerateFiles
print $o "#define USE_OPENSSL 1\n" if ($self->{options}->{openssl});
print $o "#define ENABLE_NLS 1\n" if ($self->{options}->{nls});
print $o "#define BLCKSZ ", 1024 * $self->{options}->{blocksize}, "\n";
print $o "#define BLCKSZ ", 1024 * $self->{options}->{blocksize},
"\n";
print $o "#define RELSEG_SIZE ",
(1024 / $self->{options}->{blocksize}) *
$self->{options}->{segsize} *
@ -281,7 +282,8 @@ sub GenerateFiles
'src/include/utils/fmgrprotos.h',
'src/backend/utils/fmgrprotos.h'))
{
copyFile('src/backend/utils/fmgrprotos.h',
copyFile(
'src/backend/utils/fmgrprotos.h',
'src/include/utils/fmgrprotos.h');
}
@ -654,7 +656,7 @@ sub GetFakeConfigure
my $self = shift;
my $cfg = '--enable-thread-safety';
$cfg .= ' --enable-cassert' if ($self->{options}->{asserts});
$cfg .= ' --enable-cassert' if ($self->{options}->{asserts});
$cfg .= ' --enable-nls' if ($self->{options}->{nls});
$cfg .= ' --enable-tap-tests' if ($self->{options}->{tap_tests});
$cfg .= ' --with-ldap' if ($self->{options}->{ldap});

View File

@ -39,9 +39,9 @@ my $vcver = Mkvcbuild::mkvcbuild($config);
# check what sort of build we are doing
my $bconf = $ENV{CONFIG} || "Release";
my $bconf = $ENV{CONFIG} || "Release";
my $msbflags = $ENV{MSBFLAGS} || "";
my $buildwhat = $ARGV[1] || "";
my $buildwhat = $ARGV[1] || "";
if (uc($ARGV[0]) eq 'DEBUG')
{
$bconf = "Debug";
@ -56,7 +56,7 @@ elsif (uc($ARGV[0]) ne "RELEASE")
if ($buildwhat and $vcver >= 10.00)
{
system(
"msbuild $buildwhat.vcxproj /verbosity:normal $msbflags /p:Configuration=$bconf"
"msbuild $buildwhat.vcxproj /verbosity:normal $msbflags /p:Configuration=$bconf"
);
}
elsif ($buildwhat)
@ -65,7 +65,8 @@ elsif ($buildwhat)
}
else
{
system("msbuild pgsql.sln /verbosity:normal $msbflags /p:Configuration=$bconf");
system(
"msbuild pgsql.sln /verbosity:normal $msbflags /p:Configuration=$bconf");
}
# report status

View File

@ -4,7 +4,7 @@ use warnings;
our $config = {
asserts => 0, # --enable-cassert
# float4byval=>1, # --disable-float4-byval, on by default
# float4byval=>1, # --disable-float4-byval, on by default
# float8byval=> $platformbits == 64, # --disable-float8-byval,
# off by default on 32 bit platforms, on by default on 64 bit platforms

View File

@ -174,7 +174,7 @@ print "Generating $defname.DEF from directory $ARGV[0], platform $platform\n";
my %def = ();
while (<$ARGV[0]/*.obj>) ## no critic (RequireGlobFunction);
while (<$ARGV[0]/*.obj>) ## no critic (RequireGlobFunction);
{
my $objfile = $_;
my $symfile = $objfile;

View File

@ -51,6 +51,7 @@ my $flexflags = ($make =~ /^$basetarg:\s*FLEXFLAGS\s*=\s*(\S.*)/m ? $1 : '');
system("flex $flexflags -o$output $input");
if ($? == 0)
{
# Check for "%option reentrant" in .l file.
my $lfile;
open($lfile, '<', $input) || die "opening $input for reading: $!";
@ -58,12 +59,14 @@ if ($? == 0)
close($lfile);
if ($lcode =~ /\%option\sreentrant/)
{
# Reentrant scanners usually need a fix to prevent
# "unused variable" warnings with older flex versions.
system("perl src\\tools\\fix-old-flex-code.pl $output");
}
else
{
# For non-reentrant scanners we need to fix up the yywrap
# macro definition to keep the MS compiler happy.
# For reentrant scanners (like the core scanner) we do not

View File

@ -182,7 +182,7 @@ sub tap_check
chdir $dir;
my @flags;
@flags = split(/\s+/,$ENV{PROVE_FLAGS}) if exists $ENV{PROVE_FLAGS};
@flags = split(/\s+/, $ENV{PROVE_FLAGS}) if exists $ENV{PROVE_FLAGS};
my @args = ("prove", @flags, "t/*.pl");
@ -526,6 +526,7 @@ sub fetchRegressOpts
$m =~ s{\\\r?\n}{}g;
if ($m =~ /^\s*REGRESS_OPTS\s*\+?=(.*)/m)
{
# Substitute known Makefile variables, then ignore options that retain
# an unhandled variable reference. Ignore anything that isn't an
# option starting with "--".

View File

@ -434,7 +434,7 @@ sub diff
sub run_build
{
eval "use LWP::Simple;"; ## no critic (ProhibitStringyEval);
eval "use LWP::Simple;"; ## no critic (ProhibitStringyEval);
my $code_base = shift || '.';
my $save_dir = getcwd();