postgresql/src/backend/replication/slotfuncs.c

821 lines
22 KiB
C
Raw Normal View History

/*-------------------------------------------------------------------------
*
* slotfuncs.c
* Support functions for replication slots
*
* Copyright (c) 2012-2020, PostgreSQL Global Development Group
*
* IDENTIFICATION
* src/backend/replication/slotfuncs.c
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "access/htup_details.h"
#include "access/xlog_internal.h"
#include "funcapi.h"
#include "miscadmin.h"
#include "replication/decode.h"
#include "replication/logical.h"
#include "replication/logicalfuncs.h"
#include "replication/slot.h"
#include "utils/builtins.h"
#include "utils/inval.h"
#include "utils/pg_lsn.h"
#include "utils/resowner.h"
static void
check_permissions(void)
{
if (!superuser() && !has_rolreplication(GetUserId()))
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
(errmsg("must be superuser or replication role to use replication slots"))));
}
/*
* Helper function for creating a new physical replication slot with
* given arguments. Note that this function doesn't release the created
* slot.
*
* If restart_lsn is a valid value, we use it without WAL reservation
* routine. So the caller must guarantee that WAL is available.
*/
static void
create_physical_replication_slot(char *name, bool immediately_reserve,
bool temporary, XLogRecPtr restart_lsn)
{
Assert(!MyReplicationSlot);
/* acquire replication slot, this will check for conflicting names */
ReplicationSlotCreate(name, false,
temporary ? RS_TEMPORARY : RS_PERSISTENT);
if (immediately_reserve)
{
/* Reserve WAL as the user asked for it */
if (XLogRecPtrIsInvalid(restart_lsn))
ReplicationSlotReserveWal();
else
MyReplicationSlot->data.restart_lsn = restart_lsn;
/* Write this slot to disk */
ReplicationSlotMarkDirty();
ReplicationSlotSave();
}
}
/*
* SQL function for creating a new physical (streaming replication)
* replication slot.
*/
Datum
pg_create_physical_replication_slot(PG_FUNCTION_ARGS)
{
Name name = PG_GETARG_NAME(0);
2016-06-10 00:02:36 +02:00
bool immediately_reserve = PG_GETARG_BOOL(1);
bool temporary = PG_GETARG_BOOL(2);
Datum values[2];
bool nulls[2];
TupleDesc tupdesc;
HeapTuple tuple;
Datum result;
if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
elog(ERROR, "return type must be a row type");
check_permissions();
CheckSlotRequirements();
create_physical_replication_slot(NameStr(*name),
immediately_reserve,
temporary,
InvalidXLogRecPtr);
values[0] = NameGetDatum(&MyReplicationSlot->data.name);
nulls[0] = false;
if (immediately_reserve)
{
values[1] = LSNGetDatum(MyReplicationSlot->data.restart_lsn);
nulls[1] = false;
}
else
nulls[1] = true;
tuple = heap_form_tuple(tupdesc, values, nulls);
result = HeapTupleGetDatum(tuple);
ReplicationSlotRelease();
PG_RETURN_DATUM(result);
}
/*
* Helper function for creating a new logical replication slot with
* given arguments. Note that this function doesn't release the created
* slot.
*/
static void
create_logical_replication_slot(char *name, char *plugin,
bool temporary, XLogRecPtr restart_lsn)
{
LogicalDecodingContext *ctx = NULL;
Assert(!MyReplicationSlot);
/*
2015-05-24 03:35:49 +02:00
* Acquire a logical decoding slot, this will check for conflicting names.
* Initially create persistent slot as ephemeral - that allows us to
* nicely handle errors during initialization because it'll get dropped if
* this transaction fails. We'll make it persistent at the end. Temporary
* slots can be created as temporary from beginning as they get dropped on
* error as well.
*/
ReplicationSlotCreate(name, true,
temporary ? RS_TEMPORARY : RS_EPHEMERAL);
/*
* Create logical decoding context, to build the initial snapshot.
*/
ctx = CreateInitDecodingContext(plugin, NIL,
Phase 2 of pgindent updates. Change pg_bsd_indent to follow upstream rules for placement of comments to the right of code, and remove pgindent hack that caused comments following #endif to not obey the general rule. Commit e3860ffa4dd0dad0dd9eea4be9cc1412373a8c89 wasn't actually using the published version of pg_bsd_indent, but a hacked-up version that tried to minimize the amount of movement of comments to the right of code. The situation of interest is where such a comment has to be moved to the right of its default placement at column 33 because there's code there. BSD indent has always moved right in units of tab stops in such cases --- but in the previous incarnation, indent was working in 8-space tab stops, while now it knows we use 4-space tabs. So the net result is that in about half the cases, such comments are placed one tab stop left of before. This is better all around: it leaves more room on the line for comment text, and it means that in such cases the comment uniformly starts at the next 4-space tab stop after the code, rather than sometimes one and sometimes two tabs after. Also, ensure that comments following #endif are indented the same as comments following other preprocessor commands such as #else. That inconsistency turns out to have been self-inflicted damage from a poorly-thought-through post-indent "fixup" in pgindent. This patch is much less interesting than the first round of indent changes, but also bulkier, so I thought it best to separate the effects. Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
2017-06-21 21:18:54 +02:00
false, /* do not build snapshot */
restart_lsn,
logical_read_local_xlog_page, NULL, NULL,
NULL);
/* build initial snapshot, might take a while */
DecodingContextFindStartpoint(ctx);
/* don't need the decoding context anymore */
FreeDecodingContext(ctx);
}
/*
* SQL function for creating a new logical replication slot.
*/
Datum
pg_create_logical_replication_slot(PG_FUNCTION_ARGS)
{
Name name = PG_GETARG_NAME(0);
Name plugin = PG_GETARG_NAME(1);
bool temporary = PG_GETARG_BOOL(2);
Datum result;
TupleDesc tupdesc;
HeapTuple tuple;
Datum values[2];
bool nulls[2];
if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
elog(ERROR, "return type must be a row type");
check_permissions();
CheckLogicalDecodingRequirements();
create_logical_replication_slot(NameStr(*name),
NameStr(*plugin),
temporary,
InvalidXLogRecPtr);
values[0] = NameGetDatum(&MyReplicationSlot->data.name);
values[1] = LSNGetDatum(MyReplicationSlot->data.confirmed_flush);
memset(nulls, 0, sizeof(nulls));
tuple = heap_form_tuple(tupdesc, values, nulls);
result = HeapTupleGetDatum(tuple);
/* ok, slot is now fully created, mark it as persistent if needed */
if (!temporary)
ReplicationSlotPersist();
ReplicationSlotRelease();
PG_RETURN_DATUM(result);
}
/*
* SQL function for dropping a replication slot.
*/
Datum
pg_drop_replication_slot(PG_FUNCTION_ARGS)
{
Name name = PG_GETARG_NAME(0);
check_permissions();
CheckSlotRequirements();
ReplicationSlotDrop(NameStr(*name), true);
PG_RETURN_VOID();
}
/*
* pg_get_replication_slots - SQL SRF showing active replication slots.
*/
Datum
pg_get_replication_slots(PG_FUNCTION_ARGS)
{
#define PG_GET_REPLICATION_SLOTS_COLS 11
ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
TupleDesc tupdesc;
Tuplestorestate *tupstore;
MemoryContext per_query_ctx;
MemoryContext oldcontext;
int slotno;
/* check to see if caller supports us returning a tuplestore */
if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("set-valued function called in context that cannot accept a set")));
if (!(rsinfo->allowedModes & SFRM_Materialize))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("materialize mode required, but it is not allowed in this context")));
/* Build a tuple descriptor for our result type */
if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
elog(ERROR, "return type must be a row type");
/*
* We don't require any special permission to see this function's data
* because nothing should be sensitive. The most critical being the slot
* name, which shouldn't contain anything particularly sensitive.
*/
per_query_ctx = rsinfo->econtext->ecxt_per_query_memory;
oldcontext = MemoryContextSwitchTo(per_query_ctx);
tupstore = tuplestore_begin_heap(true, false, work_mem);
rsinfo->returnMode = SFRM_Materialize;
rsinfo->setResult = tupstore;
rsinfo->setDesc = tupdesc;
MemoryContextSwitchTo(oldcontext);
LWLockAcquire(ReplicationSlotControlLock, LW_SHARED);
for (slotno = 0; slotno < max_replication_slots; slotno++)
{
ReplicationSlot *slot = &ReplicationSlotCtl->replication_slots[slotno];
Datum values[PG_GET_REPLICATION_SLOTS_COLS];
bool nulls[PG_GET_REPLICATION_SLOTS_COLS];
ReplicationSlotPersistency persistency;
TransactionId xmin;
TransactionId catalog_xmin;
XLogRecPtr restart_lsn;
XLogRecPtr confirmed_flush_lsn;
pid_t active_pid;
Oid database;
NameData slot_name;
NameData plugin;
int i;
if (!slot->in_use)
continue;
SpinLockAcquire(&slot->mutex);
xmin = slot->data.xmin;
catalog_xmin = slot->data.catalog_xmin;
database = slot->data.database;
restart_lsn = slot->data.restart_lsn;
confirmed_flush_lsn = slot->data.confirmed_flush;
namecpy(&slot_name, &slot->data.name);
namecpy(&plugin, &slot->data.plugin);
active_pid = slot->active_pid;
persistency = slot->data.persistency;
SpinLockRelease(&slot->mutex);
memset(nulls, 0, sizeof(nulls));
i = 0;
values[i++] = NameGetDatum(&slot_name);
if (database == InvalidOid)
nulls[i++] = true;
else
values[i++] = NameGetDatum(&plugin);
if (database == InvalidOid)
values[i++] = CStringGetTextDatum("physical");
else
values[i++] = CStringGetTextDatum("logical");
if (database == InvalidOid)
nulls[i++] = true;
else
values[i++] = database;
values[i++] = BoolGetDatum(persistency == RS_TEMPORARY);
values[i++] = BoolGetDatum(active_pid != 0);
if (active_pid != 0)
values[i++] = Int32GetDatum(active_pid);
else
nulls[i++] = true;
if (xmin != InvalidTransactionId)
values[i++] = TransactionIdGetDatum(xmin);
else
nulls[i++] = true;
if (catalog_xmin != InvalidTransactionId)
values[i++] = TransactionIdGetDatum(catalog_xmin);
else
nulls[i++] = true;
if (restart_lsn != InvalidXLogRecPtr)
values[i++] = LSNGetDatum(restart_lsn);
else
nulls[i++] = true;
if (confirmed_flush_lsn != InvalidXLogRecPtr)
values[i++] = LSNGetDatum(confirmed_flush_lsn);
else
nulls[i++] = true;
tuplestore_putvalues(tupstore, tupdesc, values, nulls);
}
LWLockRelease(ReplicationSlotControlLock);
tuplestore_donestoring(tupstore);
return (Datum) 0;
}
/*
* Helper function for advancing our physical replication slot forward.
*
* The LSN position to move to is compared simply to the slot's restart_lsn,
* knowing that any position older than that would be removed by successive
* checkpoints.
*/
static XLogRecPtr
Fix a couple of bugs with replication slot advancing feature A review of the code has showed up a couple of issues fixed by this commit: - Physical slots have been using the confirmed LSN position as a start comparison point which is always 0/0, instead use the restart LSN position (logical slots need to use the confirmed LSN position, which was correct). - The actual slot update was incorrect for both physical and logical slots. Physical slots need to use their restart_lsn as base comparison point (confirmed_flush was used because of previous point), and logical slots need to begin reading WAL from restart_lsn (confirmed_flush was used as well), while confirmed_flush is compiled depending on the decoding context and record read, and is the LSN position returned back to the caller. - Never return 0/0 if a slot cannot be advanced. This way, if a slot is advanced while the activity is idle, then the same position is returned to the caller over and over without raising an error. Instead return the LSN the slot has been advanced to. With repetitive calls, the same position is returned hence caller can directly monitor the difference in progress in bytes by doing simply LSN difference calculations, which should be monotonic. Note that as the slot is owned by the backend advancing it, then the read of those fields is fine lock-less, while updates need to happen while the slot mutex is held, so fix that on the way as well. Other locks for in-memory data of replication slots have been already fixed previously. Some of those issues have been pointed out by Petr and Simon during the patch, while I noticed some of them after looking at the code. This also visibly takes of a recently-discovered bug causing assertion failures which can be triggered by a two-step slot forwarding which first advanced the slot to a WAL page boundary and secondly advanced it to the latest position, say 'FF/FFFFFFF' to make sure that the newest LSN is used as forward point. It would have been nice to drop a test for that, but the set of operators working on pg_lsn limits it, so this is left for a future exercise. Author: Michael Paquier Reviewed-by: Petr Jelinek, Simon Riggs Discussion: https://postgr.es/m/CANP8+jLyS=X-CAk59BJnsxKQfjwrmKicHQykyn52Qj-Q=9GLCw@mail.gmail.com Discussion: https://www.postgresql.org/message-id/2840048a-1184-417a-9da8-3299d207a1d7%40postgrespro.ru
2018-06-11 02:26:13 +02:00
pg_physical_replication_slot_advance(XLogRecPtr moveto)
{
Fix a couple of bugs with replication slot advancing feature A review of the code has showed up a couple of issues fixed by this commit: - Physical slots have been using the confirmed LSN position as a start comparison point which is always 0/0, instead use the restart LSN position (logical slots need to use the confirmed LSN position, which was correct). - The actual slot update was incorrect for both physical and logical slots. Physical slots need to use their restart_lsn as base comparison point (confirmed_flush was used because of previous point), and logical slots need to begin reading WAL from restart_lsn (confirmed_flush was used as well), while confirmed_flush is compiled depending on the decoding context and record read, and is the LSN position returned back to the caller. - Never return 0/0 if a slot cannot be advanced. This way, if a slot is advanced while the activity is idle, then the same position is returned to the caller over and over without raising an error. Instead return the LSN the slot has been advanced to. With repetitive calls, the same position is returned hence caller can directly monitor the difference in progress in bytes by doing simply LSN difference calculations, which should be monotonic. Note that as the slot is owned by the backend advancing it, then the read of those fields is fine lock-less, while updates need to happen while the slot mutex is held, so fix that on the way as well. Other locks for in-memory data of replication slots have been already fixed previously. Some of those issues have been pointed out by Petr and Simon during the patch, while I noticed some of them after looking at the code. This also visibly takes of a recently-discovered bug causing assertion failures which can be triggered by a two-step slot forwarding which first advanced the slot to a WAL page boundary and secondly advanced it to the latest position, say 'FF/FFFFFFF' to make sure that the newest LSN is used as forward point. It would have been nice to drop a test for that, but the set of operators working on pg_lsn limits it, so this is left for a future exercise. Author: Michael Paquier Reviewed-by: Petr Jelinek, Simon Riggs Discussion: https://postgr.es/m/CANP8+jLyS=X-CAk59BJnsxKQfjwrmKicHQykyn52Qj-Q=9GLCw@mail.gmail.com Discussion: https://www.postgresql.org/message-id/2840048a-1184-417a-9da8-3299d207a1d7%40postgrespro.ru
2018-06-11 02:26:13 +02:00
XLogRecPtr startlsn = MyReplicationSlot->data.restart_lsn;
XLogRecPtr retlsn = startlsn;
Fix a couple of bugs with replication slot advancing feature A review of the code has showed up a couple of issues fixed by this commit: - Physical slots have been using the confirmed LSN position as a start comparison point which is always 0/0, instead use the restart LSN position (logical slots need to use the confirmed LSN position, which was correct). - The actual slot update was incorrect for both physical and logical slots. Physical slots need to use their restart_lsn as base comparison point (confirmed_flush was used because of previous point), and logical slots need to begin reading WAL from restart_lsn (confirmed_flush was used as well), while confirmed_flush is compiled depending on the decoding context and record read, and is the LSN position returned back to the caller. - Never return 0/0 if a slot cannot be advanced. This way, if a slot is advanced while the activity is idle, then the same position is returned to the caller over and over without raising an error. Instead return the LSN the slot has been advanced to. With repetitive calls, the same position is returned hence caller can directly monitor the difference in progress in bytes by doing simply LSN difference calculations, which should be monotonic. Note that as the slot is owned by the backend advancing it, then the read of those fields is fine lock-less, while updates need to happen while the slot mutex is held, so fix that on the way as well. Other locks for in-memory data of replication slots have been already fixed previously. Some of those issues have been pointed out by Petr and Simon during the patch, while I noticed some of them after looking at the code. This also visibly takes of a recently-discovered bug causing assertion failures which can be triggered by a two-step slot forwarding which first advanced the slot to a WAL page boundary and secondly advanced it to the latest position, say 'FF/FFFFFFF' to make sure that the newest LSN is used as forward point. It would have been nice to drop a test for that, but the set of operators working on pg_lsn limits it, so this is left for a future exercise. Author: Michael Paquier Reviewed-by: Petr Jelinek, Simon Riggs Discussion: https://postgr.es/m/CANP8+jLyS=X-CAk59BJnsxKQfjwrmKicHQykyn52Qj-Q=9GLCw@mail.gmail.com Discussion: https://www.postgresql.org/message-id/2840048a-1184-417a-9da8-3299d207a1d7%40postgrespro.ru
2018-06-11 02:26:13 +02:00
if (startlsn < moveto)
{
Fix a couple of bugs with replication slot advancing feature A review of the code has showed up a couple of issues fixed by this commit: - Physical slots have been using the confirmed LSN position as a start comparison point which is always 0/0, instead use the restart LSN position (logical slots need to use the confirmed LSN position, which was correct). - The actual slot update was incorrect for both physical and logical slots. Physical slots need to use their restart_lsn as base comparison point (confirmed_flush was used because of previous point), and logical slots need to begin reading WAL from restart_lsn (confirmed_flush was used as well), while confirmed_flush is compiled depending on the decoding context and record read, and is the LSN position returned back to the caller. - Never return 0/0 if a slot cannot be advanced. This way, if a slot is advanced while the activity is idle, then the same position is returned to the caller over and over without raising an error. Instead return the LSN the slot has been advanced to. With repetitive calls, the same position is returned hence caller can directly monitor the difference in progress in bytes by doing simply LSN difference calculations, which should be monotonic. Note that as the slot is owned by the backend advancing it, then the read of those fields is fine lock-less, while updates need to happen while the slot mutex is held, so fix that on the way as well. Other locks for in-memory data of replication slots have been already fixed previously. Some of those issues have been pointed out by Petr and Simon during the patch, while I noticed some of them after looking at the code. This also visibly takes of a recently-discovered bug causing assertion failures which can be triggered by a two-step slot forwarding which first advanced the slot to a WAL page boundary and secondly advanced it to the latest position, say 'FF/FFFFFFF' to make sure that the newest LSN is used as forward point. It would have been nice to drop a test for that, but the set of operators working on pg_lsn limits it, so this is left for a future exercise. Author: Michael Paquier Reviewed-by: Petr Jelinek, Simon Riggs Discussion: https://postgr.es/m/CANP8+jLyS=X-CAk59BJnsxKQfjwrmKicHQykyn52Qj-Q=9GLCw@mail.gmail.com Discussion: https://www.postgresql.org/message-id/2840048a-1184-417a-9da8-3299d207a1d7%40postgrespro.ru
2018-06-11 02:26:13 +02:00
SpinLockAcquire(&MyReplicationSlot->mutex);
MyReplicationSlot->data.restart_lsn = moveto;
Fix a couple of bugs with replication slot advancing feature A review of the code has showed up a couple of issues fixed by this commit: - Physical slots have been using the confirmed LSN position as a start comparison point which is always 0/0, instead use the restart LSN position (logical slots need to use the confirmed LSN position, which was correct). - The actual slot update was incorrect for both physical and logical slots. Physical slots need to use their restart_lsn as base comparison point (confirmed_flush was used because of previous point), and logical slots need to begin reading WAL from restart_lsn (confirmed_flush was used as well), while confirmed_flush is compiled depending on the decoding context and record read, and is the LSN position returned back to the caller. - Never return 0/0 if a slot cannot be advanced. This way, if a slot is advanced while the activity is idle, then the same position is returned to the caller over and over without raising an error. Instead return the LSN the slot has been advanced to. With repetitive calls, the same position is returned hence caller can directly monitor the difference in progress in bytes by doing simply LSN difference calculations, which should be monotonic. Note that as the slot is owned by the backend advancing it, then the read of those fields is fine lock-less, while updates need to happen while the slot mutex is held, so fix that on the way as well. Other locks for in-memory data of replication slots have been already fixed previously. Some of those issues have been pointed out by Petr and Simon during the patch, while I noticed some of them after looking at the code. This also visibly takes of a recently-discovered bug causing assertion failures which can be triggered by a two-step slot forwarding which first advanced the slot to a WAL page boundary and secondly advanced it to the latest position, say 'FF/FFFFFFF' to make sure that the newest LSN is used as forward point. It would have been nice to drop a test for that, but the set of operators working on pg_lsn limits it, so this is left for a future exercise. Author: Michael Paquier Reviewed-by: Petr Jelinek, Simon Riggs Discussion: https://postgr.es/m/CANP8+jLyS=X-CAk59BJnsxKQfjwrmKicHQykyn52Qj-Q=9GLCw@mail.gmail.com Discussion: https://www.postgresql.org/message-id/2840048a-1184-417a-9da8-3299d207a1d7%40postgrespro.ru
2018-06-11 02:26:13 +02:00
SpinLockRelease(&MyReplicationSlot->mutex);
retlsn = moveto;
}
return retlsn;
}
/*
* Helper function for advancing our logical replication slot forward.
*
* The slot's restart_lsn is used as start point for reading records, while
* confirmed_flush is used as base point for the decoding context.
*
* We cannot just do LogicalConfirmReceivedLocation to update confirmed_flush,
* because we need to digest WAL to advance restart_lsn allowing to recycle
* WAL and removal of old catalog tuples. As decoding is done in fast_forward
* mode, no changes are generated anyway.
*/
static XLogRecPtr
Fix a couple of bugs with replication slot advancing feature A review of the code has showed up a couple of issues fixed by this commit: - Physical slots have been using the confirmed LSN position as a start comparison point which is always 0/0, instead use the restart LSN position (logical slots need to use the confirmed LSN position, which was correct). - The actual slot update was incorrect for both physical and logical slots. Physical slots need to use their restart_lsn as base comparison point (confirmed_flush was used because of previous point), and logical slots need to begin reading WAL from restart_lsn (confirmed_flush was used as well), while confirmed_flush is compiled depending on the decoding context and record read, and is the LSN position returned back to the caller. - Never return 0/0 if a slot cannot be advanced. This way, if a slot is advanced while the activity is idle, then the same position is returned to the caller over and over without raising an error. Instead return the LSN the slot has been advanced to. With repetitive calls, the same position is returned hence caller can directly monitor the difference in progress in bytes by doing simply LSN difference calculations, which should be monotonic. Note that as the slot is owned by the backend advancing it, then the read of those fields is fine lock-less, while updates need to happen while the slot mutex is held, so fix that on the way as well. Other locks for in-memory data of replication slots have been already fixed previously. Some of those issues have been pointed out by Petr and Simon during the patch, while I noticed some of them after looking at the code. This also visibly takes of a recently-discovered bug causing assertion failures which can be triggered by a two-step slot forwarding which first advanced the slot to a WAL page boundary and secondly advanced it to the latest position, say 'FF/FFFFFFF' to make sure that the newest LSN is used as forward point. It would have been nice to drop a test for that, but the set of operators working on pg_lsn limits it, so this is left for a future exercise. Author: Michael Paquier Reviewed-by: Petr Jelinek, Simon Riggs Discussion: https://postgr.es/m/CANP8+jLyS=X-CAk59BJnsxKQfjwrmKicHQykyn52Qj-Q=9GLCw@mail.gmail.com Discussion: https://www.postgresql.org/message-id/2840048a-1184-417a-9da8-3299d207a1d7%40postgrespro.ru
2018-06-11 02:26:13 +02:00
pg_logical_replication_slot_advance(XLogRecPtr moveto)
{
LogicalDecodingContext *ctx;
ResourceOwner old_resowner = CurrentResourceOwner;
XLogRecPtr retlsn;
PG_TRY();
{
/*
* Create our decoding context in fast_forward mode, passing start_lsn
* as InvalidXLogRecPtr, so that we start processing from my slot's
* confirmed_flush.
*/
ctx = CreateDecodingContext(InvalidXLogRecPtr,
NIL,
true, /* fast_forward */
logical_read_local_xlog_page,
NULL, NULL, NULL);
/*
* Start reading at the slot's restart_lsn, which we know to point to
* a valid record.
*/
XLogBeginRead(ctx->reader, MyReplicationSlot->data.restart_lsn);
/* Initialize our return value in case we don't do anything */
retlsn = MyReplicationSlot->data.confirmed_flush;
/* invalidate non-timetravel entries */
InvalidateSystemCaches();
/* Decode at least one record, until we run out of records */
while (ctx->reader->EndRecPtr < moveto)
{
char *errm = NULL;
XLogRecord *record;
/*
* Read records. No changes are generated in fast_forward mode,
* but snapbuilder/slot statuses are updated properly.
*/
record = XLogReadRecord(ctx->reader, &errm);
if (errm)
elog(ERROR, "%s", errm);
/*
* Process the record. Storage-level changes are ignored in
* fast_forward mode, but other modules (such as snapbuilder)
* might still have critical updates to do.
*/
if (record)
LogicalDecodingProcessRecord(ctx, ctx->reader);
/* Stop once the requested target has been reached */
if (moveto <= ctx->reader->EndRecPtr)
break;
CHECK_FOR_INTERRUPTS();
}
Use a ResourceOwner to track buffer pins in all cases. Historically, we've allowed auxiliary processes to take buffer pins without tracking them in a ResourceOwner. However, that creates problems for error recovery. In particular, we've seen multiple reports of assertion crashes in the startup process when it gets an error while holding a buffer pin, as for example if it gets ENOSPC during a write. In a non-assert build, the process would simply exit without releasing the pin at all. We've gotten away with that so far just because a failure exit of the startup process translates to a database crash anyhow; but any similar behavior in other aux processes could result in stuck pins and subsequent problems in vacuum. To improve this, institute a policy that we must *always* have a resowner backing any attempt to pin a buffer, which we can enforce just by removing the previous special-case code in resowner.c. Add infrastructure to make it easy to create a process-lifespan AuxProcessResourceOwner and clear out its contents at appropriate times. Replace existing ad-hoc resowner management in bgwriter.c and other aux processes with that. (Thus, while the startup process gains a resowner where it had none at all before, some other aux process types are replacing an ad-hoc resowner with this code.) Also use the AuxProcessResourceOwner to manage buffer pins taken during StartupXLOG and ShutdownXLOG, even when those are being run in a bootstrap process or a standalone backend rather than a true auxiliary process. In passing, remove some other ad-hoc resource owner creations that had gotten cargo-culted into various other places. As far as I can tell that was all unnecessary, and if it had been necessary it was incomplete, due to lacking any provision for clearing those resowners later. (Also worth noting in this connection is that a process that hasn't called InitBufferPoolBackend has no business accessing buffers; so there's more to do than just add the resowner if we want to touch buffers in processes not covered by this patch.) Although this fixes a very old bug, no back-patch, because there's no evidence of any significant problem in non-assert builds. Patch by me, pursuant to a report from Justin Pryzby. Thanks to Robert Haas and Kyotaro Horiguchi for reviews. Discussion: https://postgr.es/m/20180627233939.GA10276@telsasoft.com
2018-07-18 18:15:16 +02:00
/*
* Logical decoding could have clobbered CurrentResourceOwner during
* transaction management, so restore the executor's value. (This is
* a kluge, but it's not worth cleaning up right now.)
*/
CurrentResourceOwner = old_resowner;
if (ctx->reader->EndRecPtr != InvalidXLogRecPtr)
{
LogicalConfirmReceivedLocation(moveto);
/*
* If only the confirmed_flush LSN has changed the slot won't get
* marked as dirty by the above. Callers on the walsender
* interface are expected to keep track of their own progress and
* don't need it written out. But SQL-interface users cannot
* specify their own start positions and it's harder for them to
* keep track of their progress, so we should make more of an
* effort to save it for them.
*
* Dirty the slot so it's written out at the next checkpoint.
* We'll still lose its position on crash, as documented, but it's
* better than always losing the position even on clean restart.
*/
ReplicationSlotMarkDirty();
}
retlsn = MyReplicationSlot->data.confirmed_flush;
/* free context, call shutdown callback */
FreeDecodingContext(ctx);
InvalidateSystemCaches();
}
PG_CATCH();
{
/* clear all timetravel entries */
InvalidateSystemCaches();
PG_RE_THROW();
}
PG_END_TRY();
return retlsn;
}
/*
* SQL function for moving the position in a replication slot.
*/
Datum
pg_replication_slot_advance(PG_FUNCTION_ARGS)
{
Name slotname = PG_GETARG_NAME(0);
XLogRecPtr moveto = PG_GETARG_LSN(1);
XLogRecPtr endlsn;
Fix a couple of bugs with replication slot advancing feature A review of the code has showed up a couple of issues fixed by this commit: - Physical slots have been using the confirmed LSN position as a start comparison point which is always 0/0, instead use the restart LSN position (logical slots need to use the confirmed LSN position, which was correct). - The actual slot update was incorrect for both physical and logical slots. Physical slots need to use their restart_lsn as base comparison point (confirmed_flush was used because of previous point), and logical slots need to begin reading WAL from restart_lsn (confirmed_flush was used as well), while confirmed_flush is compiled depending on the decoding context and record read, and is the LSN position returned back to the caller. - Never return 0/0 if a slot cannot be advanced. This way, if a slot is advanced while the activity is idle, then the same position is returned to the caller over and over without raising an error. Instead return the LSN the slot has been advanced to. With repetitive calls, the same position is returned hence caller can directly monitor the difference in progress in bytes by doing simply LSN difference calculations, which should be monotonic. Note that as the slot is owned by the backend advancing it, then the read of those fields is fine lock-less, while updates need to happen while the slot mutex is held, so fix that on the way as well. Other locks for in-memory data of replication slots have been already fixed previously. Some of those issues have been pointed out by Petr and Simon during the patch, while I noticed some of them after looking at the code. This also visibly takes of a recently-discovered bug causing assertion failures which can be triggered by a two-step slot forwarding which first advanced the slot to a WAL page boundary and secondly advanced it to the latest position, say 'FF/FFFFFFF' to make sure that the newest LSN is used as forward point. It would have been nice to drop a test for that, but the set of operators working on pg_lsn limits it, so this is left for a future exercise. Author: Michael Paquier Reviewed-by: Petr Jelinek, Simon Riggs Discussion: https://postgr.es/m/CANP8+jLyS=X-CAk59BJnsxKQfjwrmKicHQykyn52Qj-Q=9GLCw@mail.gmail.com Discussion: https://www.postgresql.org/message-id/2840048a-1184-417a-9da8-3299d207a1d7%40postgrespro.ru
2018-06-11 02:26:13 +02:00
XLogRecPtr minlsn;
TupleDesc tupdesc;
Datum values[2];
bool nulls[2];
HeapTuple tuple;
Datum result;
Assert(!MyReplicationSlot);
check_permissions();
if (XLogRecPtrIsInvalid(moveto))
ereport(ERROR,
2019-09-23 13:37:33 +02:00
(errmsg("invalid target WAL LSN")));
/* Build a tuple descriptor for our result type */
if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
elog(ERROR, "return type must be a row type");
/*
* We can't move slot past what's been flushed/replayed so clamp the
* target position accordingly.
*/
if (!RecoveryInProgress())
moveto = Min(moveto, GetFlushRecPtr());
else
moveto = Min(moveto, GetXLogReplayRecPtr(&ThisTimeLineID));
/* Acquire the slot so we "own" it */
ReplicationSlotAcquire(NameStr(*slotname), true);
/* A slot whose restart_lsn has never been reserved cannot be advanced */
if (XLogRecPtrIsInvalid(MyReplicationSlot->data.restart_lsn))
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("cannot advance replication slot that has not previously reserved WAL")));
Fix a couple of bugs with replication slot advancing feature A review of the code has showed up a couple of issues fixed by this commit: - Physical slots have been using the confirmed LSN position as a start comparison point which is always 0/0, instead use the restart LSN position (logical slots need to use the confirmed LSN position, which was correct). - The actual slot update was incorrect for both physical and logical slots. Physical slots need to use their restart_lsn as base comparison point (confirmed_flush was used because of previous point), and logical slots need to begin reading WAL from restart_lsn (confirmed_flush was used as well), while confirmed_flush is compiled depending on the decoding context and record read, and is the LSN position returned back to the caller. - Never return 0/0 if a slot cannot be advanced. This way, if a slot is advanced while the activity is idle, then the same position is returned to the caller over and over without raising an error. Instead return the LSN the slot has been advanced to. With repetitive calls, the same position is returned hence caller can directly monitor the difference in progress in bytes by doing simply LSN difference calculations, which should be monotonic. Note that as the slot is owned by the backend advancing it, then the read of those fields is fine lock-less, while updates need to happen while the slot mutex is held, so fix that on the way as well. Other locks for in-memory data of replication slots have been already fixed previously. Some of those issues have been pointed out by Petr and Simon during the patch, while I noticed some of them after looking at the code. This also visibly takes of a recently-discovered bug causing assertion failures which can be triggered by a two-step slot forwarding which first advanced the slot to a WAL page boundary and secondly advanced it to the latest position, say 'FF/FFFFFFF' to make sure that the newest LSN is used as forward point. It would have been nice to drop a test for that, but the set of operators working on pg_lsn limits it, so this is left for a future exercise. Author: Michael Paquier Reviewed-by: Petr Jelinek, Simon Riggs Discussion: https://postgr.es/m/CANP8+jLyS=X-CAk59BJnsxKQfjwrmKicHQykyn52Qj-Q=9GLCw@mail.gmail.com Discussion: https://www.postgresql.org/message-id/2840048a-1184-417a-9da8-3299d207a1d7%40postgrespro.ru
2018-06-11 02:26:13 +02:00
/*
* Check if the slot is not moving backwards. Physical slots rely simply
* on restart_lsn as a minimum point, while logical slots have confirmed
* consumption up to confirmed_flush, meaning that in both cases data
* older than that is not available anymore.
Fix a couple of bugs with replication slot advancing feature A review of the code has showed up a couple of issues fixed by this commit: - Physical slots have been using the confirmed LSN position as a start comparison point which is always 0/0, instead use the restart LSN position (logical slots need to use the confirmed LSN position, which was correct). - The actual slot update was incorrect for both physical and logical slots. Physical slots need to use their restart_lsn as base comparison point (confirmed_flush was used because of previous point), and logical slots need to begin reading WAL from restart_lsn (confirmed_flush was used as well), while confirmed_flush is compiled depending on the decoding context and record read, and is the LSN position returned back to the caller. - Never return 0/0 if a slot cannot be advanced. This way, if a slot is advanced while the activity is idle, then the same position is returned to the caller over and over without raising an error. Instead return the LSN the slot has been advanced to. With repetitive calls, the same position is returned hence caller can directly monitor the difference in progress in bytes by doing simply LSN difference calculations, which should be monotonic. Note that as the slot is owned by the backend advancing it, then the read of those fields is fine lock-less, while updates need to happen while the slot mutex is held, so fix that on the way as well. Other locks for in-memory data of replication slots have been already fixed previously. Some of those issues have been pointed out by Petr and Simon during the patch, while I noticed some of them after looking at the code. This also visibly takes of a recently-discovered bug causing assertion failures which can be triggered by a two-step slot forwarding which first advanced the slot to a WAL page boundary and secondly advanced it to the latest position, say 'FF/FFFFFFF' to make sure that the newest LSN is used as forward point. It would have been nice to drop a test for that, but the set of operators working on pg_lsn limits it, so this is left for a future exercise. Author: Michael Paquier Reviewed-by: Petr Jelinek, Simon Riggs Discussion: https://postgr.es/m/CANP8+jLyS=X-CAk59BJnsxKQfjwrmKicHQykyn52Qj-Q=9GLCw@mail.gmail.com Discussion: https://www.postgresql.org/message-id/2840048a-1184-417a-9da8-3299d207a1d7%40postgrespro.ru
2018-06-11 02:26:13 +02:00
*/
if (OidIsValid(MyReplicationSlot->data.database))
minlsn = MyReplicationSlot->data.confirmed_flush;
else
minlsn = MyReplicationSlot->data.restart_lsn;
if (moveto < minlsn)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("cannot advance replication slot to %X/%X, minimum is %X/%X",
(uint32) (moveto >> 32), (uint32) moveto,
Fix a couple of bugs with replication slot advancing feature A review of the code has showed up a couple of issues fixed by this commit: - Physical slots have been using the confirmed LSN position as a start comparison point which is always 0/0, instead use the restart LSN position (logical slots need to use the confirmed LSN position, which was correct). - The actual slot update was incorrect for both physical and logical slots. Physical slots need to use their restart_lsn as base comparison point (confirmed_flush was used because of previous point), and logical slots need to begin reading WAL from restart_lsn (confirmed_flush was used as well), while confirmed_flush is compiled depending on the decoding context and record read, and is the LSN position returned back to the caller. - Never return 0/0 if a slot cannot be advanced. This way, if a slot is advanced while the activity is idle, then the same position is returned to the caller over and over without raising an error. Instead return the LSN the slot has been advanced to. With repetitive calls, the same position is returned hence caller can directly monitor the difference in progress in bytes by doing simply LSN difference calculations, which should be monotonic. Note that as the slot is owned by the backend advancing it, then the read of those fields is fine lock-less, while updates need to happen while the slot mutex is held, so fix that on the way as well. Other locks for in-memory data of replication slots have been already fixed previously. Some of those issues have been pointed out by Petr and Simon during the patch, while I noticed some of them after looking at the code. This also visibly takes of a recently-discovered bug causing assertion failures which can be triggered by a two-step slot forwarding which first advanced the slot to a WAL page boundary and secondly advanced it to the latest position, say 'FF/FFFFFFF' to make sure that the newest LSN is used as forward point. It would have been nice to drop a test for that, but the set of operators working on pg_lsn limits it, so this is left for a future exercise. Author: Michael Paquier Reviewed-by: Petr Jelinek, Simon Riggs Discussion: https://postgr.es/m/CANP8+jLyS=X-CAk59BJnsxKQfjwrmKicHQykyn52Qj-Q=9GLCw@mail.gmail.com Discussion: https://www.postgresql.org/message-id/2840048a-1184-417a-9da8-3299d207a1d7%40postgrespro.ru
2018-06-11 02:26:13 +02:00
(uint32) (minlsn >> 32), (uint32) minlsn)));
Fix a couple of bugs with replication slot advancing feature A review of the code has showed up a couple of issues fixed by this commit: - Physical slots have been using the confirmed LSN position as a start comparison point which is always 0/0, instead use the restart LSN position (logical slots need to use the confirmed LSN position, which was correct). - The actual slot update was incorrect for both physical and logical slots. Physical slots need to use their restart_lsn as base comparison point (confirmed_flush was used because of previous point), and logical slots need to begin reading WAL from restart_lsn (confirmed_flush was used as well), while confirmed_flush is compiled depending on the decoding context and record read, and is the LSN position returned back to the caller. - Never return 0/0 if a slot cannot be advanced. This way, if a slot is advanced while the activity is idle, then the same position is returned to the caller over and over without raising an error. Instead return the LSN the slot has been advanced to. With repetitive calls, the same position is returned hence caller can directly monitor the difference in progress in bytes by doing simply LSN difference calculations, which should be monotonic. Note that as the slot is owned by the backend advancing it, then the read of those fields is fine lock-less, while updates need to happen while the slot mutex is held, so fix that on the way as well. Other locks for in-memory data of replication slots have been already fixed previously. Some of those issues have been pointed out by Petr and Simon during the patch, while I noticed some of them after looking at the code. This also visibly takes of a recently-discovered bug causing assertion failures which can be triggered by a two-step slot forwarding which first advanced the slot to a WAL page boundary and secondly advanced it to the latest position, say 'FF/FFFFFFF' to make sure that the newest LSN is used as forward point. It would have been nice to drop a test for that, but the set of operators working on pg_lsn limits it, so this is left for a future exercise. Author: Michael Paquier Reviewed-by: Petr Jelinek, Simon Riggs Discussion: https://postgr.es/m/CANP8+jLyS=X-CAk59BJnsxKQfjwrmKicHQykyn52Qj-Q=9GLCw@mail.gmail.com Discussion: https://www.postgresql.org/message-id/2840048a-1184-417a-9da8-3299d207a1d7%40postgrespro.ru
2018-06-11 02:26:13 +02:00
/* Do the actual slot update, depending on the slot type */
if (OidIsValid(MyReplicationSlot->data.database))
Fix a couple of bugs with replication slot advancing feature A review of the code has showed up a couple of issues fixed by this commit: - Physical slots have been using the confirmed LSN position as a start comparison point which is always 0/0, instead use the restart LSN position (logical slots need to use the confirmed LSN position, which was correct). - The actual slot update was incorrect for both physical and logical slots. Physical slots need to use their restart_lsn as base comparison point (confirmed_flush was used because of previous point), and logical slots need to begin reading WAL from restart_lsn (confirmed_flush was used as well), while confirmed_flush is compiled depending on the decoding context and record read, and is the LSN position returned back to the caller. - Never return 0/0 if a slot cannot be advanced. This way, if a slot is advanced while the activity is idle, then the same position is returned to the caller over and over without raising an error. Instead return the LSN the slot has been advanced to. With repetitive calls, the same position is returned hence caller can directly monitor the difference in progress in bytes by doing simply LSN difference calculations, which should be monotonic. Note that as the slot is owned by the backend advancing it, then the read of those fields is fine lock-less, while updates need to happen while the slot mutex is held, so fix that on the way as well. Other locks for in-memory data of replication slots have been already fixed previously. Some of those issues have been pointed out by Petr and Simon during the patch, while I noticed some of them after looking at the code. This also visibly takes of a recently-discovered bug causing assertion failures which can be triggered by a two-step slot forwarding which first advanced the slot to a WAL page boundary and secondly advanced it to the latest position, say 'FF/FFFFFFF' to make sure that the newest LSN is used as forward point. It would have been nice to drop a test for that, but the set of operators working on pg_lsn limits it, so this is left for a future exercise. Author: Michael Paquier Reviewed-by: Petr Jelinek, Simon Riggs Discussion: https://postgr.es/m/CANP8+jLyS=X-CAk59BJnsxKQfjwrmKicHQykyn52Qj-Q=9GLCw@mail.gmail.com Discussion: https://www.postgresql.org/message-id/2840048a-1184-417a-9da8-3299d207a1d7%40postgrespro.ru
2018-06-11 02:26:13 +02:00
endlsn = pg_logical_replication_slot_advance(moveto);
else
Fix a couple of bugs with replication slot advancing feature A review of the code has showed up a couple of issues fixed by this commit: - Physical slots have been using the confirmed LSN position as a start comparison point which is always 0/0, instead use the restart LSN position (logical slots need to use the confirmed LSN position, which was correct). - The actual slot update was incorrect for both physical and logical slots. Physical slots need to use their restart_lsn as base comparison point (confirmed_flush was used because of previous point), and logical slots need to begin reading WAL from restart_lsn (confirmed_flush was used as well), while confirmed_flush is compiled depending on the decoding context and record read, and is the LSN position returned back to the caller. - Never return 0/0 if a slot cannot be advanced. This way, if a slot is advanced while the activity is idle, then the same position is returned to the caller over and over without raising an error. Instead return the LSN the slot has been advanced to. With repetitive calls, the same position is returned hence caller can directly monitor the difference in progress in bytes by doing simply LSN difference calculations, which should be monotonic. Note that as the slot is owned by the backend advancing it, then the read of those fields is fine lock-less, while updates need to happen while the slot mutex is held, so fix that on the way as well. Other locks for in-memory data of replication slots have been already fixed previously. Some of those issues have been pointed out by Petr and Simon during the patch, while I noticed some of them after looking at the code. This also visibly takes of a recently-discovered bug causing assertion failures which can be triggered by a two-step slot forwarding which first advanced the slot to a WAL page boundary and secondly advanced it to the latest position, say 'FF/FFFFFFF' to make sure that the newest LSN is used as forward point. It would have been nice to drop a test for that, but the set of operators working on pg_lsn limits it, so this is left for a future exercise. Author: Michael Paquier Reviewed-by: Petr Jelinek, Simon Riggs Discussion: https://postgr.es/m/CANP8+jLyS=X-CAk59BJnsxKQfjwrmKicHQykyn52Qj-Q=9GLCw@mail.gmail.com Discussion: https://www.postgresql.org/message-id/2840048a-1184-417a-9da8-3299d207a1d7%40postgrespro.ru
2018-06-11 02:26:13 +02:00
endlsn = pg_physical_replication_slot_advance(moveto);
values[0] = NameGetDatum(&MyReplicationSlot->data.name);
nulls[0] = false;
/* Update the on disk state when lsn was updated. */
if (XLogRecPtrIsInvalid(endlsn))
{
ReplicationSlotMarkDirty();
ReplicationSlotsComputeRequiredXmin(false);
ReplicationSlotsComputeRequiredLSN();
ReplicationSlotSave();
}
ReplicationSlotRelease();
/* Return the reached position. */
values[1] = LSNGetDatum(endlsn);
nulls[1] = false;
tuple = heap_form_tuple(tupdesc, values, nulls);
result = HeapTupleGetDatum(tuple);
PG_RETURN_DATUM(result);
}
/*
* Helper function of copying a replication slot.
*/
static Datum
copy_replication_slot(FunctionCallInfo fcinfo, bool logical_slot)
{
Name src_name = PG_GETARG_NAME(0);
Name dst_name = PG_GETARG_NAME(1);
ReplicationSlot *src = NULL;
XLogRecPtr src_restart_lsn;
bool src_islogical;
bool temporary;
char *plugin;
Datum values[2];
bool nulls[2];
Datum result;
TupleDesc tupdesc;
HeapTuple tuple;
if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
elog(ERROR, "return type must be a row type");
check_permissions();
if (logical_slot)
CheckLogicalDecodingRequirements();
else
CheckSlotRequirements();
LWLockAcquire(ReplicationSlotControlLock, LW_SHARED);
/*
* We need to prevent the source slot's reserved WAL from being removed,
* but we don't want to lock that slot for very long, and it can advance
* in the meantime. So obtain the source slot's data, and create a new
* slot using its restart_lsn. Afterwards we lock the source slot again
* and verify that the data we copied (name, type) has not changed
* incompatibly. No inconvenient WAL removal can occur once the new slot
* is created -- but since WAL removal could have occurred before we
* managed to create the new slot, we advance the new slot's restart_lsn
* to the source slot's updated restart_lsn the second time we lock it.
*/
for (int i = 0; i < max_replication_slots; i++)
{
ReplicationSlot *s = &ReplicationSlotCtl->replication_slots[i];
if (s->in_use && strcmp(NameStr(s->data.name), NameStr(*src_name)) == 0)
{
SpinLockAcquire(&s->mutex);
src_islogical = SlotIsLogical(s);
src_restart_lsn = s->data.restart_lsn;
temporary = s->data.persistency == RS_TEMPORARY;
plugin = logical_slot ? pstrdup(NameStr(s->data.plugin)) : NULL;
SpinLockRelease(&s->mutex);
src = s;
break;
}
}
LWLockRelease(ReplicationSlotControlLock);
if (src == NULL)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("replication slot \"%s\" does not exist", NameStr(*src_name))));
/* Check type of replication slot */
if (src_islogical != logical_slot)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
src_islogical ?
errmsg("cannot copy physical replication slot \"%s\" as a logical replication slot",
NameStr(*src_name)) :
errmsg("cannot copy logical replication slot \"%s\" as a physical replication slot",
NameStr(*src_name))));
/* Copying non-reserved slot doesn't make sense */
if (XLogRecPtrIsInvalid(src_restart_lsn))
{
Assert(!logical_slot);
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
(errmsg("cannot copy a replication slot that doesn't reserve WAL"))));
}
/* Overwrite params from optional arguments */
if (PG_NARGS() >= 3)
temporary = PG_GETARG_BOOL(2);
if (PG_NARGS() >= 4)
{
Assert(logical_slot);
plugin = NameStr(*(PG_GETARG_NAME(3)));
}
/* Create new slot and acquire it */
if (logical_slot)
create_logical_replication_slot(NameStr(*dst_name),
plugin,
temporary,
src_restart_lsn);
else
create_physical_replication_slot(NameStr(*dst_name),
true,
temporary,
src_restart_lsn);
/*
* Update the destination slot to current values of the source slot;
* recheck that the source slot is still the one we saw previously.
*/
{
TransactionId copy_effective_xmin;
TransactionId copy_effective_catalog_xmin;
TransactionId copy_xmin;
TransactionId copy_catalog_xmin;
XLogRecPtr copy_restart_lsn;
bool copy_islogical;
char *copy_name;
/* Copy data of source slot again */
SpinLockAcquire(&src->mutex);
copy_effective_xmin = src->effective_xmin;
copy_effective_catalog_xmin = src->effective_catalog_xmin;
copy_xmin = src->data.xmin;
copy_catalog_xmin = src->data.catalog_xmin;
copy_restart_lsn = src->data.restart_lsn;
/* for existence check */
copy_name = pstrdup(NameStr(src->data.name));
copy_islogical = SlotIsLogical(src);
SpinLockRelease(&src->mutex);
/*
* Check if the source slot still exists and is valid. We regard it as
* invalid if the type of replication slot or name has been changed,
* or the restart_lsn either is invalid or has gone backward. (The
* restart_lsn could go backwards if the source slot is dropped and
* copied from an older slot during installation.)
*
* Since erroring out will release and drop the destination slot we
* don't need to release it here.
*/
if (copy_restart_lsn < src_restart_lsn ||
src_islogical != copy_islogical ||
strcmp(copy_name, NameStr(*src_name)) != 0)
ereport(ERROR,
(errmsg("could not copy replication slot \"%s\"",
NameStr(*src_name)),
errdetail("The source replication slot was modified incompatibly during the copy operation.")));
/* Install copied values again */
SpinLockAcquire(&MyReplicationSlot->mutex);
MyReplicationSlot->effective_xmin = copy_effective_xmin;
MyReplicationSlot->effective_catalog_xmin = copy_effective_catalog_xmin;
MyReplicationSlot->data.xmin = copy_xmin;
MyReplicationSlot->data.catalog_xmin = copy_catalog_xmin;
MyReplicationSlot->data.restart_lsn = copy_restart_lsn;
SpinLockRelease(&MyReplicationSlot->mutex);
ReplicationSlotMarkDirty();
ReplicationSlotsComputeRequiredXmin(false);
ReplicationSlotsComputeRequiredLSN();
ReplicationSlotSave();
#ifdef USE_ASSERT_CHECKING
/* Check that the restart_lsn is available */
{
XLogSegNo segno;
XLByteToSeg(copy_restart_lsn, segno, wal_segment_size);
Assert(XLogGetLastRemovedSegno() < segno);
}
#endif
}
/* target slot fully created, mark as persistent if needed */
if (logical_slot && !temporary)
ReplicationSlotPersist();
/* All done. Set up the return values */
values[0] = NameGetDatum(dst_name);
nulls[0] = false;
if (!XLogRecPtrIsInvalid(MyReplicationSlot->data.confirmed_flush))
{
values[1] = LSNGetDatum(MyReplicationSlot->data.confirmed_flush);
nulls[1] = false;
}
else
nulls[1] = true;
tuple = heap_form_tuple(tupdesc, values, nulls);
result = HeapTupleGetDatum(tuple);
ReplicationSlotRelease();
PG_RETURN_DATUM(result);
}
/* The wrappers below are all to appease opr_sanity */
Datum
pg_copy_logical_replication_slot_a(PG_FUNCTION_ARGS)
{
return copy_replication_slot(fcinfo, true);
}
Datum
pg_copy_logical_replication_slot_b(PG_FUNCTION_ARGS)
{
return copy_replication_slot(fcinfo, true);
}
Datum
pg_copy_logical_replication_slot_c(PG_FUNCTION_ARGS)
{
return copy_replication_slot(fcinfo, true);
}
Datum
pg_copy_physical_replication_slot_a(PG_FUNCTION_ARGS)
{
return copy_replication_slot(fcinfo, false);
}
Datum
pg_copy_physical_replication_slot_b(PG_FUNCTION_ARGS)
{
return copy_replication_slot(fcinfo, false);
}