Remove dependency on HeapTuple from predicate locking functions.

The following changes make the predicate locking functions more
generic and suitable for use by future access methods:

- PredicateLockTuple() is renamed to PredicateLockTID().  It takes
  ItemPointer and inserting transaction ID instead of HeapTuple.

- CheckForSerializableConflictIn() takes blocknum instead of buffer.

- CheckForSerializableConflictOut() no longer takes HeapTuple or buffer.

Author: Ashwin Agrawal
Reviewed-by: Andres Freund, Kuntal Ghosh, Thomas Munro
Discussion: https://postgr.es/m/CALfoeiv0k3hkEb3Oqk%3DziWqtyk2Jys1UOK5hwRBNeANT_yX%2Bng%40mail.gmail.com
This commit is contained in:
Thomas Munro 2020-01-28 13:13:04 +13:00
parent 4589c6a2a3
commit 6f38d4dac3
12 changed files with 176 additions and 130 deletions

View File

@ -89,7 +89,7 @@ ginFindLeafPage(GinBtree btree, bool searchMode,
stack->predictNumber = 1;
if (rootConflictCheck)
CheckForSerializableConflictIn(btree->index, NULL, stack->buffer);
CheckForSerializableConflictIn(btree->index, NULL, btree->rootBlkno);
for (;;)
{

View File

@ -246,7 +246,7 @@ ginHeapTupleFastInsert(GinState *ginstate, GinTupleCollector *collector)
* tree, so it conflicts with all serializable scans. All scans acquire a
* predicate lock on the metabuffer to represent that.
*/
CheckForSerializableConflictIn(index, NULL, metabuffer);
CheckForSerializableConflictIn(index, NULL, GIN_METAPAGE_BLKNO);
if (collector->sumsize + collector->ntuples * sizeof(ItemIdData) > GinListPageSize)
{

View File

@ -216,7 +216,8 @@ ginEntryInsert(GinState *ginstate,
return;
}
CheckForSerializableConflictIn(ginstate->index, NULL, stack->buffer);
CheckForSerializableConflictIn(ginstate->index, NULL,
BufferGetBlockNumber(stack->buffer));
/* modify an existing leaf entry */
itup = addItemPointersToLeafTuple(ginstate, itup,
items, nitem, buildStats, stack->buffer);
@ -225,7 +226,8 @@ ginEntryInsert(GinState *ginstate,
}
else
{
CheckForSerializableConflictIn(ginstate->index, NULL, stack->buffer);
CheckForSerializableConflictIn(ginstate->index, NULL,
BufferGetBlockNumber(stack->buffer));
/* no match, so construct a new leaf entry */
itup = buildFreshLeafTuple(ginstate, attnum, key, category,
items, nitem, buildStats, stack->buffer);

View File

@ -1264,7 +1264,7 @@ gistinserttuples(GISTInsertState *state, GISTInsertStack *stack,
* Check for any rw conflicts (in serializable isolation level) just
* before we intend to modify the page
*/
CheckForSerializableConflictIn(state->r, NULL, stack->buffer);
CheckForSerializableConflictIn(state->r, NULL, BufferGetBlockNumber(stack->buffer));
/* Insert the tuple(s) to the page, splitting the page if necessary */
is_split = gistplacetopage(state->r, state->freespace, giststate,

View File

@ -88,7 +88,7 @@ restart_insert:
&usedmetap);
Assert(usedmetap != NULL);
CheckForSerializableConflictIn(rel, NULL, buf);
CheckForSerializableConflictIn(rel, NULL, BufferGetBlockNumber(buf));
/* remember the primary bucket buffer to release the pin on it at end. */
bucket_buf = buf;

View File

@ -41,6 +41,7 @@
#include "access/multixact.h"
#include "access/parallel.h"
#include "access/relscan.h"
#include "access/subtrans.h"
#include "access/sysattr.h"
#include "access/tableam.h"
#include "access/transam.h"
@ -446,8 +447,8 @@ heapgetpage(TableScanDesc sscan, BlockNumber page)
else
valid = HeapTupleSatisfiesVisibility(&loctup, snapshot, buffer);
CheckForSerializableConflictOut(valid, scan->rs_base.rs_rd,
&loctup, buffer, snapshot);
HeapCheckForSerializableConflictOut(valid, scan->rs_base.rs_rd,
&loctup, buffer, snapshot);
if (valid)
scan->rs_vistuples[ntup++] = lineoff;
@ -668,9 +669,9 @@ heapgettup(HeapScanDesc scan,
snapshot,
scan->rs_cbuf);
CheckForSerializableConflictOut(valid, scan->rs_base.rs_rd,
tuple, scan->rs_cbuf,
snapshot);
HeapCheckForSerializableConflictOut(valid, scan->rs_base.rs_rd,
tuple, scan->rs_cbuf,
snapshot);
if (valid && key != NULL)
HeapKeyTest(tuple, RelationGetDescr(scan->rs_base.rs_rd),
@ -1477,9 +1478,10 @@ heap_fetch(Relation relation,
valid = HeapTupleSatisfiesVisibility(tuple, snapshot, buffer);
if (valid)
PredicateLockTuple(relation, tuple, snapshot);
PredicateLockTID(relation, &(tuple->t_self), snapshot,
HeapTupleHeaderGetXmin(tuple->t_data));
CheckForSerializableConflictOut(valid, relation, tuple, buffer, snapshot);
HeapCheckForSerializableConflictOut(valid, relation, tuple, buffer, snapshot);
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
@ -1610,13 +1612,14 @@ heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer,
{
/* If it's visible per the snapshot, we must return it */
valid = HeapTupleSatisfiesVisibility(heapTuple, snapshot, buffer);
CheckForSerializableConflictOut(valid, relation, heapTuple,
buffer, snapshot);
HeapCheckForSerializableConflictOut(valid, relation, heapTuple,
buffer, snapshot);
if (valid)
{
ItemPointerSetOffsetNumber(tid, offnum);
PredicateLockTuple(relation, heapTuple, snapshot);
PredicateLockTID(relation, &heapTuple->t_self, snapshot,
HeapTupleHeaderGetXmin(heapTuple->t_data));
if (all_dead)
*all_dead = false;
return true;
@ -1750,7 +1753,7 @@ heap_get_latest_tid(TableScanDesc sscan,
* candidate.
*/
valid = HeapTupleSatisfiesVisibility(&tp, snapshot, buffer);
CheckForSerializableConflictOut(valid, relation, &tp, buffer, snapshot);
HeapCheckForSerializableConflictOut(valid, relation, &tp, buffer, snapshot);
if (valid)
*tid = ctid;
@ -1905,7 +1908,7 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
* lock "gaps" as index page locks do. So we don't need to specify a
* buffer when making the call, which makes for a faster check.
*/
CheckForSerializableConflictIn(relation, NULL, InvalidBuffer);
CheckForSerializableConflictIn(relation, NULL, InvalidBlockNumber);
/* NO EREPORT(ERROR) from here till changes are logged */
START_CRIT_SECTION();
@ -2159,7 +2162,7 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
* lock "gaps" as index page locks do. So we don't need to specify a
* buffer when making the call, which makes for a faster check.
*/
CheckForSerializableConflictIn(relation, NULL, InvalidBuffer);
CheckForSerializableConflictIn(relation, NULL, InvalidBlockNumber);
ndone = 0;
while (ndone < ntuples)
@ -2350,7 +2353,7 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
* lock "gaps" as index page locks do. So we don't need to specify a
* buffer when making the call.
*/
CheckForSerializableConflictIn(relation, NULL, InvalidBuffer);
CheckForSerializableConflictIn(relation, NULL, InvalidBlockNumber);
/*
* If tuples are cachable, mark them for invalidation from the caches in
@ -2664,7 +2667,7 @@ l1:
* being visible to the scan (i.e., an exclusive buffer content lock is
* continuously held from this point until the tuple delete is visible).
*/
CheckForSerializableConflictIn(relation, &tp, buffer);
CheckForSerializableConflictIn(relation, tid, BufferGetBlockNumber(buffer));
/* replace cid with a combo cid if necessary */
HeapTupleHeaderAdjustCmax(tp.t_data, &cid, &iscombo);
@ -3580,7 +3583,7 @@ l2:
* will include checking the relation level, there is no benefit to a
* separate check for the new tuple.
*/
CheckForSerializableConflictIn(relation, &oldtup, buffer);
CheckForSerializableConflictIn(relation, otid, BufferGetBlockNumber(buffer));
/*
* At this point newbuf and buffer are both pinned and locked, and newbuf
@ -9043,3 +9046,93 @@ heap_mask(char *pagedata, BlockNumber blkno)
}
}
}
/*
* HeapCheckForSerializableConflictOut
* We are reading a tuple which has been modified. If it is visible to
* us but has been deleted, that indicates a rw-conflict out. If it's
* not visible and was created by a concurrent (overlapping)
* serializable transaction, that is also a rw-conflict out,
*
* We will determine the top level xid of the writing transaction with which
* we may be in conflict, and check for overlap with our own transaction.
* If the transactions overlap (i.e., they cannot see each other's writes),
* then we have a conflict out.
*
* This function should be called just about anywhere in heapam.c where a
* tuple has been read. The caller must hold at least a shared lock on the
* buffer, because this function might set hint bits on the tuple. There is
* currently no known reason to call this function from an index AM.
*/
void
HeapCheckForSerializableConflictOut(bool visible, Relation relation,
HeapTuple tuple, Buffer buffer,
Snapshot snapshot)
{
TransactionId xid;
HTSV_Result htsvResult;
if (!CheckForSerializableConflictOutNeeded(relation, snapshot))
return;
/*
* Check to see whether the tuple has been written to by a concurrent
* transaction, either to create it not visible to us, or to delete it
* while it is visible to us. The "visible" bool indicates whether the
* tuple is visible to us, while HeapTupleSatisfiesVacuum checks what else
* is going on with it.
*/
htsvResult = HeapTupleSatisfiesVacuum(tuple, TransactionXmin, buffer);
switch (htsvResult)
{
case HEAPTUPLE_LIVE:
if (visible)
return;
xid = HeapTupleHeaderGetXmin(tuple->t_data);
break;
case HEAPTUPLE_RECENTLY_DEAD:
if (!visible)
return;
xid = HeapTupleHeaderGetUpdateXid(tuple->t_data);
break;
case HEAPTUPLE_DELETE_IN_PROGRESS:
xid = HeapTupleHeaderGetUpdateXid(tuple->t_data);
break;
case HEAPTUPLE_INSERT_IN_PROGRESS:
xid = HeapTupleHeaderGetXmin(tuple->t_data);
break;
case HEAPTUPLE_DEAD:
return;
default:
/*
* The only way to get to this default clause is if a new value is
* added to the enum type without adding it to this switch
* statement. That's a bug, so elog.
*/
elog(ERROR, "unrecognized return value from HeapTupleSatisfiesVacuum: %u", htsvResult);
/*
* In spite of having all enum values covered and calling elog on
* this default, some compilers think this is a code path which
* allows xid to be used below without initialization. Silence
* that warning.
*/
xid = InvalidTransactionId;
}
Assert(TransactionIdIsValid(xid));
Assert(TransactionIdFollowsOrEquals(xid, TransactionXmin));
/*
* Find top level xid. Bail out if xid is too early to be a conflict, or
* if it's our own xid.
*/
if (TransactionIdEquals(xid, GetTopTransactionIdIfAny()))
return;
xid = SubTransGetTopmostTransaction(xid);
if (TransactionIdPrecedes(xid, TransactionXmin))
return;
return CheckForSerializableConflictOut(relation, xid, snapshot);
}

View File

@ -2171,10 +2171,11 @@ heapam_scan_bitmap_next_block(TableScanDesc scan,
if (valid)
{
hscan->rs_vistuples[ntup++] = offnum;
PredicateLockTuple(scan->rs_rd, &loctup, snapshot);
PredicateLockTID(scan->rs_rd, &loctup.t_self, snapshot,
HeapTupleHeaderGetXmin(loctup.t_data));
}
CheckForSerializableConflictOut(valid, scan->rs_rd, &loctup,
buffer, snapshot);
HeapCheckForSerializableConflictOut(valid, scan->rs_rd, &loctup,
buffer, snapshot);
}
}
@ -2361,8 +2362,8 @@ heapam_scan_sample_next_tuple(TableScanDesc scan, SampleScanState *scanstate,
/* in pagemode, heapgetpage did this for us */
if (!pagemode)
CheckForSerializableConflictOut(visible, scan->rs_rd, tuple,
hscan->rs_cbuf, scan->rs_snapshot);
HeapCheckForSerializableConflictOut(visible, scan->rs_rd, tuple,
hscan->rs_cbuf, scan->rs_snapshot);
/* Try next tuple from same page. */
if (!visible)

View File

@ -180,8 +180,8 @@ index_insert(Relation indexRelation,
if (!(indexRelation->rd_indam->ampredlocks))
CheckForSerializableConflictIn(indexRelation,
(HeapTuple) NULL,
InvalidBuffer);
(ItemPointer) NULL,
InvalidBlockNumber);
return indexRelation->rd_indam->aminsert(indexRelation, values, isnull,
heap_t_ctid, heapRelation,

View File

@ -285,7 +285,7 @@ top:
* checkingunique and !heapkeyspace cases, but it's okay to use the
* first page the value could be on (with scantid omitted) instead.
*/
CheckForSerializableConflictIn(rel, NULL, insertstate.buf);
CheckForSerializableConflictIn(rel, NULL, BufferGetBlockNumber(insertstate.buf));
/*
* Do the insertion. Note that insertstate contains cached binary
@ -528,7 +528,7 @@ _bt_check_unique(Relation rel, BTInsertState insertstate, Relation heapRel,
* otherwise be masked by this unique constraint
* violation.
*/
CheckForSerializableConflictIn(rel, NULL, insertstate->buf);
CheckForSerializableConflictIn(rel, NULL, BufferGetBlockNumber(insertstate->buf));
/*
* This is a definite conflict. Break the tuple down into

View File

@ -163,8 +163,8 @@
* PredicateLockRelation(Relation relation, Snapshot snapshot)
* PredicateLockPage(Relation relation, BlockNumber blkno,
* Snapshot snapshot)
* PredicateLockTuple(Relation relation, HeapTuple tuple,
* Snapshot snapshot)
* PredicateLockTID(Relation relation, ItemPointer tid, Snapshot snapshot,
* TransactionId insert_xid)
* PredicateLockPageSplit(Relation relation, BlockNumber oldblkno,
* BlockNumber newblkno)
* PredicateLockPageCombine(Relation relation, BlockNumber oldblkno,
@ -173,11 +173,10 @@
* ReleasePredicateLocks(bool isCommit, bool isReadOnlySafe)
*
* conflict detection (may also trigger rollback)
* CheckForSerializableConflictOut(bool visible, Relation relation,
* HeapTupleData *tup, Buffer buffer,
* CheckForSerializableConflictOut(Relation relation, TransactionId xid,
* Snapshot snapshot)
* CheckForSerializableConflictIn(Relation relation, HeapTupleData *tup,
* Buffer buffer)
* CheckForSerializableConflictIn(Relation relation, ItemPointer tid,
* BlockNumber blkno)
* CheckTableForSerializableConflictIn(Relation relation)
*
* final rollback checking
@ -193,8 +192,6 @@
#include "postgres.h"
#include "access/heapam.h"
#include "access/htup_details.h"
#include "access/parallel.h"
#include "access/slru.h"
#include "access/subtrans.h"
@ -2538,28 +2535,28 @@ PredicateLockPage(Relation relation, BlockNumber blkno, Snapshot snapshot)
}
/*
* PredicateLockTuple
* PredicateLockTID
*
* Gets a predicate lock at the tuple level.
* Skip if not in full serializable transaction isolation level.
* Skip if this is a temporary table.
*/
void
PredicateLockTuple(Relation relation, HeapTuple tuple, Snapshot snapshot)
PredicateLockTID(Relation relation, ItemPointer tid, Snapshot snapshot,
TransactionId tuple_xid)
{
PREDICATELOCKTARGETTAG tag;
ItemPointer tid;
if (!SerializationNeededForRead(relation, snapshot))
return;
/*
* If it's a heap tuple, return if this xact wrote it.
* Return if this xact wrote it.
*/
if (relation->rd_index == NULL)
{
/* If we wrote it; we already have a write lock. */
if (TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmin(tuple->t_data)))
if (TransactionIdIsCurrentTransactionId(tuple_xid))
return;
}
@ -2575,7 +2572,6 @@ PredicateLockTuple(Relation relation, HeapTuple tuple, Snapshot snapshot)
if (PredicateLockExists(&tag))
return;
tid = &(tuple->t_self);
SET_PREDICATELOCKTARGETTAG_TUPLE(tag,
relation->rd_node.dbNode,
relation->rd_id,
@ -4020,33 +4016,41 @@ XidIsConcurrent(TransactionId xid)
return false;
}
bool
CheckForSerializableConflictOutNeeded(Relation relation, Snapshot snapshot)
{
if (!SerializationNeededForRead(relation, snapshot))
return false;
/* Check if someone else has already decided that we need to die */
if (SxactIsDoomed(MySerializableXact))
{
ereport(ERROR,
(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
errmsg("could not serialize access due to read/write dependencies among transactions"),
errdetail_internal("Reason code: Canceled on identification as a pivot, during conflict out checking."),
errhint("The transaction might succeed if retried.")));
}
return true;
}
/*
* CheckForSerializableConflictOut
* We are reading a tuple which has been modified. If it is visible to
* us but has been deleted, that indicates a rw-conflict out. If it's
* not visible and was created by a concurrent (overlapping)
* serializable transaction, that is also a rw-conflict out,
* A table AM is reading a tuple that has been modified. After determining
* that it is visible to us, it should call this function with the top
* level xid of the writing transaction.
*
* We will determine the top level xid of the writing transaction with which
* we may be in conflict, and check for overlap with our own transaction.
* If the transactions overlap (i.e., they cannot see each other's writes),
* then we have a conflict out.
*
* This function should be called just about anywhere in heapam.c where a
* tuple has been read. The caller must hold at least a shared lock on the
* buffer, because this function might set hint bits on the tuple. There is
* currently no known reason to call this function from an index AM.
* This function will check for overlap with our own transaction. If the
* transactions overlap (i.e., they cannot see each other's writes), then we
* have a conflict out.
*/
void
CheckForSerializableConflictOut(bool visible, Relation relation,
HeapTuple tuple, Buffer buffer,
Snapshot snapshot)
CheckForSerializableConflictOut(Relation relation, TransactionId xid, Snapshot snapshot)
{
TransactionId xid;
SERIALIZABLEXIDTAG sxidtag;
SERIALIZABLEXID *sxid;
SERIALIZABLEXACT *sxact;
HTSV_Result htsvResult;
if (!SerializationNeededForRead(relation, snapshot))
return;
@ -4060,64 +4064,8 @@ CheckForSerializableConflictOut(bool visible, Relation relation,
errdetail_internal("Reason code: Canceled on identification as a pivot, during conflict out checking."),
errhint("The transaction might succeed if retried.")));
}
/*
* Check to see whether the tuple has been written to by a concurrent
* transaction, either to create it not visible to us, or to delete it
* while it is visible to us. The "visible" bool indicates whether the
* tuple is visible to us, while HeapTupleSatisfiesVacuum checks what else
* is going on with it.
*/
htsvResult = HeapTupleSatisfiesVacuum(tuple, TransactionXmin, buffer);
switch (htsvResult)
{
case HEAPTUPLE_LIVE:
if (visible)
return;
xid = HeapTupleHeaderGetXmin(tuple->t_data);
break;
case HEAPTUPLE_RECENTLY_DEAD:
if (!visible)
return;
xid = HeapTupleHeaderGetUpdateXid(tuple->t_data);
break;
case HEAPTUPLE_DELETE_IN_PROGRESS:
xid = HeapTupleHeaderGetUpdateXid(tuple->t_data);
break;
case HEAPTUPLE_INSERT_IN_PROGRESS:
xid = HeapTupleHeaderGetXmin(tuple->t_data);
break;
case HEAPTUPLE_DEAD:
return;
default:
/*
* The only way to get to this default clause is if a new value is
* added to the enum type without adding it to this switch
* statement. That's a bug, so elog.
*/
elog(ERROR, "unrecognized return value from HeapTupleSatisfiesVacuum: %u", htsvResult);
/*
* In spite of having all enum values covered and calling elog on
* this default, some compilers think this is a code path which
* allows xid to be used below without initialization. Silence
* that warning.
*/
xid = InvalidTransactionId;
}
Assert(TransactionIdIsValid(xid));
Assert(TransactionIdFollowsOrEquals(xid, TransactionXmin));
/*
* Find top level xid. Bail out if xid is too early to be a conflict, or
* if it's our own xid.
*/
if (TransactionIdEquals(xid, GetTopTransactionIdIfAny()))
return;
xid = SubTransGetTopmostTransaction(xid);
if (TransactionIdPrecedes(xid, TransactionXmin))
return;
if (TransactionIdEquals(xid, GetTopTransactionIdIfAny()))
return;
@ -4423,8 +4371,7 @@ CheckTargetForConflictsIn(PREDICATELOCKTARGETTAG *targettag)
* tuple itself.
*/
void
CheckForSerializableConflictIn(Relation relation, HeapTuple tuple,
Buffer buffer)
CheckForSerializableConflictIn(Relation relation, ItemPointer tid, BlockNumber blkno)
{
PREDICATELOCKTARGETTAG targettag;
@ -4454,22 +4401,22 @@ CheckForSerializableConflictIn(Relation relation, HeapTuple tuple,
* It is not possible to take and hold a lock across the checks for all
* granularities because each target could be in a separate partition.
*/
if (tuple != NULL)
if (tid != NULL)
{
SET_PREDICATELOCKTARGETTAG_TUPLE(targettag,
relation->rd_node.dbNode,
relation->rd_id,
ItemPointerGetBlockNumber(&(tuple->t_self)),
ItemPointerGetOffsetNumber(&(tuple->t_self)));
ItemPointerGetBlockNumber(tid),
ItemPointerGetOffsetNumber(tid));
CheckTargetForConflictsIn(&targettag);
}
if (BufferIsValid(buffer))
if (blkno != InvalidBlockNumber)
{
SET_PREDICATELOCKTARGETTAG_PAGE(targettag,
relation->rd_node.dbNode,
relation->rd_id,
BufferGetBlockNumber(buffer));
blkno);
CheckTargetForConflictsIn(&targettag);
}

View File

@ -220,5 +220,7 @@ extern bool ResolveCminCmaxDuringDecoding(struct HTAB *tuplecid_data,
HeapTuple htup,
Buffer buffer,
CommandId *cmin, CommandId *cmax);
extern void HeapCheckForSerializableConflictOut(bool valid, Relation relation, HeapTuple tuple,
Buffer buffer, Snapshot snapshot);
#endif /* HEAPAM_H */

View File

@ -57,16 +57,17 @@ extern void SetSerializableTransactionSnapshot(Snapshot snapshot,
extern void RegisterPredicateLockingXid(TransactionId xid);
extern void PredicateLockRelation(Relation relation, Snapshot snapshot);
extern void PredicateLockPage(Relation relation, BlockNumber blkno, Snapshot snapshot);
extern void PredicateLockTuple(Relation relation, HeapTuple tuple, Snapshot snapshot);
extern void PredicateLockTID(Relation relation, ItemPointer tid, Snapshot snapshot,
TransactionId insert_xid);
extern void PredicateLockPageSplit(Relation relation, BlockNumber oldblkno, BlockNumber newblkno);
extern void PredicateLockPageCombine(Relation relation, BlockNumber oldblkno, BlockNumber newblkno);
extern void TransferPredicateLocksToHeapRelation(Relation relation);
extern void ReleasePredicateLocks(bool isCommit, bool isReadOnlySafe);
/* conflict detection (may also trigger rollback) */
extern void CheckForSerializableConflictOut(bool valid, Relation relation, HeapTuple tuple,
Buffer buffer, Snapshot snapshot);
extern void CheckForSerializableConflictIn(Relation relation, HeapTuple tuple, Buffer buffer);
extern bool CheckForSerializableConflictOutNeeded(Relation relation, Snapshot snapshot);
extern void CheckForSerializableConflictOut(Relation relation, TransactionId xid, Snapshot snapshot);
extern void CheckForSerializableConflictIn(Relation relation, ItemPointer tid, BlockNumber blkno);
extern void CheckTableForSerializableConflictIn(Relation relation);
/* final rollback checking */