tableam: Add table_multi_insert() and revamp/speed-up COPY FROM buffering.

This adds table_multi_insert(), and converts COPY FROM, the only user
of heap_multi_insert, to it.

A simple conversion of COPY FROM use slots would have yielded a
slowdown when inserting into a partitioned table for some
workloads. Different partitions might need different slots (both slot
types and their descriptors), and dropping / creating slots when
there's constant partition changes is measurable.

Thus instead revamp the COPY FROM buffering for partitioned tables to
allow to buffer inserts into multiple tables, flushing only when
limits are reached across all partition buffers. By only dropping
slots when there've been inserts into too many different partitions,
the aforementioned overhead is gone. By allowing larger batches, even
when there are frequent partition changes, we actuall speed such cases
up significantly.

By using slots COPY of very narrow rows into unlogged / temporary
might slow down very slightly (due to the indirect function calls).

Author: David Rowley, Andres Freund, Haribabu Kommi
Discussion:
    https://postgr.es/m/20180703070645.wchpu5muyto5n647@alap3.anarazel.de
    https://postgr.es/m/20190327054923.t3epfuewxfqdt22e@alap3.anarazel.de
This commit is contained in:
Andres Freund 2019-04-04 15:47:19 -07:00
parent 7bac3acab4
commit 86b85044e8
9 changed files with 587 additions and 366 deletions

View File

@ -2106,7 +2106,7 @@ heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid,
* temporary context before calling this, if that's a problem.
*/
void
heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples,
heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
CommandId cid, int options, BulkInsertState bistate)
{
TransactionId xid = GetCurrentTransactionId();
@ -2127,11 +2127,18 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples,
saveFreeSpace = RelationGetTargetPageFreeSpace(relation,
HEAP_DEFAULT_FILLFACTOR);
/* Toast and set header data in all the tuples */
/* Toast and set header data in all the slots */
heaptuples = palloc(ntuples * sizeof(HeapTuple));
for (i = 0; i < ntuples; i++)
heaptuples[i] = heap_prepare_insert(relation, tuples[i],
xid, cid, options);
{
HeapTuple tuple;
tuple = ExecFetchSlotHeapTuple(slots[i], true, NULL);
slots[i]->tts_tableOid = RelationGetRelid(relation);
tuple->t_tableOid = slots[i]->tts_tableOid;
heaptuples[i] = heap_prepare_insert(relation, tuple, xid, cid,
options);
}
/*
* We're about to do the actual inserts -- but check for conflict first,
@ -2361,13 +2368,9 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples,
CacheInvalidateHeapTuple(relation, heaptuples[i], NULL);
}
/*
* Copy t_self fields back to the caller's original tuples. This does
* nothing for untoasted tuples (tuples[i] == heaptuples[i)], but it's
* probably faster to always copy than check.
*/
/* copy t_self fields back to the caller's slots */
for (i = 0; i < ntuples; i++)
tuples[i]->t_self = heaptuples[i]->t_self;
slots[i]->tts_tid = heaptuples[i]->t_self;
pgstat_count_heap_insert(relation, ntuples);
}

View File

@ -2516,6 +2516,7 @@ static const TableAmRoutine heapam_methods = {
.tuple_insert = heapam_tuple_insert,
.tuple_insert_speculative = heapam_tuple_insert_speculative,
.tuple_complete_speculative = heapam_tuple_complete_speculative,
.multi_insert = heap_multi_insert,
.tuple_delete = heapam_tuple_delete,
.tuple_update = heapam_tuple_update,
.tuple_lock = heapam_tuple_lock,

File diff suppressed because it is too large Load Diff

View File

@ -1346,6 +1346,7 @@ InitResultRelInfo(ResultRelInfo *resultRelInfo,
resultRelInfo->ri_PartitionCheck = partition_check;
resultRelInfo->ri_PartitionRoot = partition_root;
resultRelInfo->ri_PartitionInfo = NULL; /* may be set later */
resultRelInfo->ri_CopyMultiInsertBuffer = NULL;
}
/*

View File

@ -947,6 +947,7 @@ ExecInitRoutingInfo(ModifyTableState *mtstate,
partRelInfo->ri_FdwRoutine->BeginForeignInsert(mtstate, partRelInfo);
partRelInfo->ri_PartitionInfo = partrouteinfo;
partRelInfo->ri_CopyMultiInsertBuffer = NULL;
/*
* Keep track of it in the PartitionTupleRouting->partitions array.

View File

@ -36,6 +36,7 @@
#define HEAP_INSERT_SPECULATIVE 0x0010
typedef struct BulkInsertStateData *BulkInsertState;
struct TupleTableSlot;
#define MaxLockTupleMode LockTupleExclusive
@ -143,8 +144,9 @@ extern void ReleaseBulkInsertStatePin(BulkInsertState bistate);
extern void heap_insert(Relation relation, HeapTuple tup, CommandId cid,
int options, BulkInsertState bistate);
extern void heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples,
CommandId cid, int options, BulkInsertState bistate);
extern void heap_multi_insert(Relation relation, struct TupleTableSlot **slots,
int ntuples, CommandId cid, int options,
BulkInsertState bistate);
extern TM_Result heap_delete(Relation relation, ItemPointer tid,
CommandId cid, Snapshot crosscheck, bool wait,
struct TM_FailureData *tmfd, bool changingPart);

View File

@ -350,6 +350,10 @@ typedef struct TableAmRoutine
uint32 specToken,
bool succeeded);
/* see table_multi_insert() for reference about parameters */
void (*multi_insert) (Relation rel, TupleTableSlot **slots, int nslots,
CommandId cid, int options, struct BulkInsertStateData *bistate);
/* see table_delete() for reference about parameters */
TM_Result (*tuple_delete) (Relation rel,
ItemPointer tid,
@ -1077,6 +1081,28 @@ table_complete_speculative(Relation rel, TupleTableSlot *slot,
succeeded);
}
/*
* Insert multiple tuple into a table.
*
* This is like table_insert(), but inserts multiple tuples in one
* operation. That's often faster than calling table_insert() in a loop,
* because e.g. the AM can reduce WAL logging and page locking overhead.
*
* Except for taking `nslots` tuples as input, as an array of TupleTableSlots
* in `slots`, the parameters for table_multi_insert() are the same as for
* table_insert().
*
* Note: this leaks memory into the current memory context. You can create a
* temporary context before calling this, if that's a problem.
*/
static inline void
table_multi_insert(Relation rel, TupleTableSlot **slots, int nslots,
CommandId cid, int options, struct BulkInsertStateData *bistate)
{
rel->rd_tableam->multi_insert(rel, slots, nslots,
cid, options, bistate);
}
/*
* Delete a tuple.
*

View File

@ -40,6 +40,7 @@ struct ExprState;
struct ExprContext;
struct RangeTblEntry; /* avoid including parsenodes.h here */
struct ExprEvalStep; /* avoid including execExpr.h everywhere */
struct CopyMultiInsertBuffer;
/* ----------------
@ -481,6 +482,9 @@ typedef struct ResultRelInfo
/* Additional information specific to partition tuple routing */
struct PartitionRoutingInfo *ri_PartitionInfo;
/* For use by copy.c when performing multi-inserts */
struct CopyMultiInsertBuffer *ri_CopyMultiInsertBuffer;
} ResultRelInfo;
/* ----------------

View File

@ -402,6 +402,9 @@ ConversionLocation
ConvertRowtypeExpr
CookedConstraint
CopyDest
CopyInsertMethod
CopyMultiInsertBuffer
CopyMultiInsertInfo
CopyState
CopyStateData
CopyStmt