postgresql/src/backend/partitioning/partbounds.c

2998 lines
84 KiB
C
Raw Normal View History

/*-------------------------------------------------------------------------
*
* partbounds.c
* Support routines for manipulating partition bounds
*
* Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* src/backend/partitioning/partbounds.c
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
tableam: Add and use scan APIs. Too allow table accesses to be not directly dependent on heap, several new abstractions are needed. Specifically: 1) Heap scans need to be generalized into table scans. Do this by introducing TableScanDesc, which will be the "base class" for individual AMs. This contains the AM independent fields from HeapScanDesc. The previous heap_{beginscan,rescan,endscan} et al. have been replaced with a table_ version. There's no direct replacement for heap_getnext(), as that returned a HeapTuple, which is undesirable for a other AMs. Instead there's table_scan_getnextslot(). But note that heap_getnext() lives on, it's still used widely to access catalog tables. This is achieved by new scan_begin, scan_end, scan_rescan, scan_getnextslot callbacks. 2) The portion of parallel scans that's shared between backends need to be able to do so without the user doing per-AM work. To achieve that new parallelscan_{estimate, initialize, reinitialize} callbacks are introduced, which operate on a new ParallelTableScanDesc, which again can be subclassed by AMs. As it is likely that several AMs are going to be block oriented, block oriented callbacks that can be shared between such AMs are provided and used by heap. table_block_parallelscan_{estimate, intiialize, reinitialize} as callbacks, and table_block_parallelscan_{nextpage, init} for use in AMs. These operate on a ParallelBlockTableScanDesc. 3) Index scans need to be able to access tables to return a tuple, and there needs to be state across individual accesses to the heap to store state like buffers. That's now handled by introducing a sort-of-scan IndexFetchTable, which again is intended to be subclassed by individual AMs (for heap IndexFetchHeap). The relevant callbacks for an AM are index_fetch_{end, begin, reset} to create the necessary state, and index_fetch_tuple to retrieve an indexed tuple. Note that index_fetch_tuple implementations need to be smarter than just blindly fetching the tuples for AMs that have optimizations similar to heap's HOT - the currently alive tuple in the update chain needs to be fetched if appropriate. Similar to table_scan_getnextslot(), it's undesirable to continue to return HeapTuples. Thus index_fetch_heap (might want to rename that later) now accepts a slot as an argument. Core code doesn't have a lot of call sites performing index scans without going through the systable_* API (in contrast to loads of heap_getnext calls and working directly with HeapTuples). Index scans now store the result of a search in IndexScanDesc->xs_heaptid, rather than xs_ctup->t_self. As the target is not generally a HeapTuple anymore that seems cleaner. To be able to sensible adapt code to use the above, two further callbacks have been introduced: a) slot_callbacks returns a TupleTableSlotOps* suitable for creating slots capable of holding a tuple of the AMs type. table_slot_callbacks() and table_slot_create() are based upon that, but have additional logic to deal with views, foreign tables, etc. While this change could have been done separately, nearly all the call sites that needed to be adapted for the rest of this commit also would have been needed to be adapted for table_slot_callbacks(), making separation not worthwhile. b) tuple_satisfies_snapshot checks whether the tuple in a slot is currently visible according to a snapshot. That's required as a few places now don't have a buffer + HeapTuple around, but a slot (which in heap's case internally has that information). Additionally a few infrastructure changes were needed: I) SysScanDesc, as used by systable_{beginscan, getnext} et al. now internally uses a slot to keep track of tuples. While systable_getnext() still returns HeapTuples, and will so for the foreseeable future, the index API (see 1) above) now only deals with slots. The remainder, and largest part, of this commit is then adjusting all scans in postgres to use the new APIs. Author: Andres Freund, Haribabu Kommi, Alvaro Herrera Discussion: https://postgr.es/m/20180703070645.wchpu5muyto5n647@alap3.anarazel.de https://postgr.es/m/20160812231527.GA690404@alvherre.pgsql
2019-03-11 20:46:41 +01:00
#include "access/relation.h"
#include "access/table.h"
#include "access/tableam.h"
#include "catalog/partition.h"
#include "catalog/pg_inherits.h"
#include "catalog/pg_type.h"
#include "commands/tablecmds.h"
#include "executor/executor.h"
#include "miscadmin.h"
#include "nodes/makefuncs.h"
#include "nodes/nodeFuncs.h"
#include "parser/parse_coerce.h"
#include "partitioning/partbounds.h"
#include "partitioning/partdesc.h"
#include "partitioning/partprune.h"
#include "utils/builtins.h"
#include "utils/datum.h"
#include "utils/fmgroids.h"
#include "utils/hashutils.h"
#include "utils/lsyscache.h"
#include "utils/partcache.h"
#include "utils/rel.h"
#include "utils/ruleutils.h"
#include "utils/snapmgr.h"
#include "utils/syscache.h"
/*
* When qsort'ing partition bounds after reading from the catalog, each bound
* is represented with one of the following structs.
*/
/* One bound of a hash partition */
typedef struct PartitionHashBound
{
int modulus;
int remainder;
int index;
} PartitionHashBound;
/* One value coming from some (index'th) list partition */
typedef struct PartitionListValue
{
int index;
Datum value;
} PartitionListValue;
/* One bound of a range partition */
typedef struct PartitionRangeBound
{
int index;
Datum *datums; /* range bound datums */
PartitionRangeDatumKind *kind; /* the kind of each datum */
bool lower; /* this is the lower (vs upper) bound */
} PartitionRangeBound;
static int32 qsort_partition_hbound_cmp(const void *a, const void *b);
static int32 qsort_partition_list_value_cmp(const void *a, const void *b,
void *arg);
static int32 qsort_partition_rbound_cmp(const void *a, const void *b,
void *arg);
static PartitionBoundInfo create_hash_bounds(PartitionBoundSpec **boundspecs,
int nparts, PartitionKey key, int **mapping);
static PartitionBoundInfo create_list_bounds(PartitionBoundSpec **boundspecs,
int nparts, PartitionKey key, int **mapping);
static PartitionBoundInfo create_range_bounds(PartitionBoundSpec **boundspecs,
int nparts, PartitionKey key, int **mapping);
static PartitionRangeBound *make_one_partition_rbound(PartitionKey key, int index,
List *datums, bool lower);
static int32 partition_hbound_cmp(int modulus1, int remainder1, int modulus2,
int remainder2);
static int32 partition_rbound_cmp(int partnatts, FmgrInfo *partsupfunc,
Oid *partcollation, Datum *datums1,
PartitionRangeDatumKind *kind1, bool lower1,
PartitionRangeBound *b2);
static int partition_range_bsearch(int partnatts, FmgrInfo *partsupfunc,
Oid *partcollation,
PartitionBoundInfo boundinfo,
PartitionRangeBound *probe, bool *is_equal);
static int get_partition_bound_num_indexes(PartitionBoundInfo b);
static Expr *make_partition_op_expr(PartitionKey key, int keynum,
uint16 strategy, Expr *arg1, Expr *arg2);
static Oid get_partition_operator(PartitionKey key, int col,
StrategyNumber strategy, bool *need_relabel);
static List *get_qual_for_hash(Relation parent, PartitionBoundSpec *spec);
static List *get_qual_for_list(Relation parent, PartitionBoundSpec *spec);
static List *get_qual_for_range(Relation parent, PartitionBoundSpec *spec,
bool for_default);
static void get_range_key_properties(PartitionKey key, int keynum,
PartitionRangeDatum *ldatum,
PartitionRangeDatum *udatum,
ListCell **partexprs_item,
Expr **keyCol,
Const **lower_val, Const **upper_val);
static List *get_range_nulltest(PartitionKey key);
/*
* get_qual_from_partbound
* Given a parser node for partition bound, return the list of executable
* expressions as partition constraint
*/
List *
get_qual_from_partbound(Relation rel, Relation parent,
PartitionBoundSpec *spec)
{
PartitionKey key = RelationGetPartitionKey(parent);
List *my_qual = NIL;
Assert(key != NULL);
switch (key->strategy)
{
case PARTITION_STRATEGY_HASH:
Assert(spec->strategy == PARTITION_STRATEGY_HASH);
my_qual = get_qual_for_hash(parent, spec);
break;
case PARTITION_STRATEGY_LIST:
Assert(spec->strategy == PARTITION_STRATEGY_LIST);
my_qual = get_qual_for_list(parent, spec);
break;
case PARTITION_STRATEGY_RANGE:
Assert(spec->strategy == PARTITION_STRATEGY_RANGE);
my_qual = get_qual_for_range(parent, spec, false);
break;
default:
elog(ERROR, "unexpected partition strategy: %d",
(int) key->strategy);
}
return my_qual;
}
/*
* partition_bounds_create
* Build a PartitionBoundInfo struct from a list of PartitionBoundSpec
* nodes
*
* This function creates a PartitionBoundInfo and fills the values of its
* various members based on the input list. Importantly, 'datums' array will
* contain Datum representation of individual bounds (possibly after
* de-duplication as in case of range bounds), sorted in a canonical order
* defined by qsort_partition_* functions of respective partitioning methods.
* 'indexes' array will contain as many elements as there are bounds (specific
* exceptions to this rule are listed in the function body), which represent
* the 0-based canonical positions of partitions.
*
* Upon return from this function, *mapping is set to an array of
* list_length(boundspecs) elements, each of which maps the original index of
* a partition to its canonical index.
*
* Note: The objects returned by this function are wholly allocated in the
* current memory context.
*/
PartitionBoundInfo
partition_bounds_create(PartitionBoundSpec **boundspecs, int nparts,
PartitionKey key, int **mapping)
{
int i;
Assert(nparts > 0);
/*
* For each partitioning method, we first convert the partition bounds
* from their parser node representation to the internal representation,
* along with any additional preprocessing (such as de-duplicating range
* bounds). Resulting bound datums are then added to the 'datums' array
* in PartitionBoundInfo. For each datum added, an integer indicating the
* canonical partition index is added to the 'indexes' array.
*
* For each bound, we remember its partition's position (0-based) in the
* original list to later map it to the canonical index.
*/
/*
* Initialize mapping array with invalid values, this is filled within
* each sub-routine below depending on the bound type.
*/
*mapping = (int *) palloc(sizeof(int) * nparts);
for (i = 0; i < nparts; i++)
(*mapping)[i] = -1;
switch (key->strategy)
{
case PARTITION_STRATEGY_HASH:
return create_hash_bounds(boundspecs, nparts, key, mapping);
case PARTITION_STRATEGY_LIST:
return create_list_bounds(boundspecs, nparts, key, mapping);
case PARTITION_STRATEGY_RANGE:
return create_range_bounds(boundspecs, nparts, key, mapping);
default:
elog(ERROR, "unexpected partition strategy: %d",
(int) key->strategy);
break;
}
Assert(false);
return NULL; /* keep compiler quiet */
}
/*
* create_hash_bounds
* Create a PartitionBoundInfo for a hash partitioned table
*/
static PartitionBoundInfo
create_hash_bounds(PartitionBoundSpec **boundspecs, int nparts,
PartitionKey key, int **mapping)
{
PartitionBoundInfo boundinfo;
PartitionHashBound **hbounds = NULL;
int i;
int ndatums = 0;
int greatest_modulus;
boundinfo = (PartitionBoundInfoData *)
palloc0(sizeof(PartitionBoundInfoData));
boundinfo->strategy = key->strategy;
/* No special hash partitions. */
boundinfo->null_index = -1;
boundinfo->default_index = -1;
ndatums = nparts;
hbounds = (PartitionHashBound **)
palloc(nparts * sizeof(PartitionHashBound *));
/* Convert from node to the internal representation */
for (i = 0; i < nparts; i++)
{
PartitionBoundSpec *spec = boundspecs[i];
if (spec->strategy != PARTITION_STRATEGY_HASH)
elog(ERROR, "invalid strategy in partition bound spec");
hbounds[i] = (PartitionHashBound *) palloc(sizeof(PartitionHashBound));
hbounds[i]->modulus = spec->modulus;
hbounds[i]->remainder = spec->remainder;
hbounds[i]->index = i;
}
/* Sort all the bounds in ascending order */
qsort(hbounds, nparts, sizeof(PartitionHashBound *),
qsort_partition_hbound_cmp);
/* After sorting, moduli are now stored in ascending order. */
greatest_modulus = hbounds[ndatums - 1]->modulus;
boundinfo->ndatums = ndatums;
boundinfo->datums = (Datum **) palloc0(ndatums * sizeof(Datum *));
boundinfo->indexes = (int *) palloc(greatest_modulus * sizeof(int));
for (i = 0; i < greatest_modulus; i++)
boundinfo->indexes[i] = -1;
/*
* For hash partitioning, there are as many datums (modulus and remainder
* pairs) as there are partitions. Indexes are simply values ranging from
* 0 to (nparts - 1).
*/
for (i = 0; i < nparts; i++)
{
int modulus = hbounds[i]->modulus;
int remainder = hbounds[i]->remainder;
boundinfo->datums[i] = (Datum *) palloc(2 * sizeof(Datum));
boundinfo->datums[i][0] = Int32GetDatum(modulus);
boundinfo->datums[i][1] = Int32GetDatum(remainder);
while (remainder < greatest_modulus)
{
/* overlap? */
Assert(boundinfo->indexes[remainder] == -1);
boundinfo->indexes[remainder] = i;
remainder += modulus;
}
(*mapping)[hbounds[i]->index] = i;
pfree(hbounds[i]);
}
pfree(hbounds);
return boundinfo;
}
/*
* create_list_bounds
* Create a PartitionBoundInfo for a list partitioned table
*/
static PartitionBoundInfo
create_list_bounds(PartitionBoundSpec **boundspecs, int nparts,
PartitionKey key, int **mapping)
{
PartitionBoundInfo boundinfo;
PartitionListValue **all_values = NULL;
ListCell *cell;
int i = 0;
int ndatums = 0;
int next_index = 0;
int default_index = -1;
int null_index = -1;
List *non_null_values = NIL;
boundinfo = (PartitionBoundInfoData *)
palloc0(sizeof(PartitionBoundInfoData));
boundinfo->strategy = key->strategy;
/* Will be set correctly below. */
boundinfo->null_index = -1;
boundinfo->default_index = -1;
/* Create a unified list of non-null values across all partitions. */
for (i = 0; i < nparts; i++)
{
PartitionBoundSpec *spec = boundspecs[i];
ListCell *c;
if (spec->strategy != PARTITION_STRATEGY_LIST)
elog(ERROR, "invalid strategy in partition bound spec");
/*
* Note the index of the partition bound spec for the default
* partition. There's no datum to add to the list on non-null datums
* for this partition.
*/
if (spec->is_default)
{
default_index = i;
continue;
}
foreach(c, spec->listdatums)
{
Const *val = castNode(Const, lfirst(c));
PartitionListValue *list_value = NULL;
if (!val->constisnull)
{
list_value = (PartitionListValue *)
palloc0(sizeof(PartitionListValue));
list_value->index = i;
list_value->value = val->constvalue;
}
else
{
/*
* Never put a null into the values array; save the index of
* the partition that stores nulls, instead.
*/
if (null_index != -1)
elog(ERROR, "found null more than once");
null_index = i;
}
if (list_value)
non_null_values = lappend(non_null_values, list_value);
}
}
ndatums = list_length(non_null_values);
/*
* Collect all list values in one array. Alongside the value, we also save
* the index of partition the value comes from.
*/
all_values = (PartitionListValue **)
palloc(ndatums * sizeof(PartitionListValue *));
i = 0;
foreach(cell, non_null_values)
{
PartitionListValue *src = lfirst(cell);
all_values[i] = (PartitionListValue *)
palloc(sizeof(PartitionListValue));
all_values[i]->value = src->value;
all_values[i]->index = src->index;
i++;
}
qsort_arg(all_values, ndatums, sizeof(PartitionListValue *),
qsort_partition_list_value_cmp, (void *) key);
boundinfo->ndatums = ndatums;
boundinfo->datums = (Datum **) palloc0(ndatums * sizeof(Datum *));
boundinfo->indexes = (int *) palloc(ndatums * sizeof(int));
/*
* Copy values. Canonical indexes are values ranging from 0 to (nparts -
* 1) assigned to each partition such that all datums of a given partition
* receive the same value. The value for a given partition is the index of
* that partition's smallest datum in the all_values[] array.
*/
for (i = 0; i < ndatums; i++)
{
int orig_index = all_values[i]->index;
boundinfo->datums[i] = (Datum *) palloc(sizeof(Datum));
boundinfo->datums[i][0] = datumCopy(all_values[i]->value,
key->parttypbyval[0],
key->parttyplen[0]);
/* If the old index has no mapping, assign one */
if ((*mapping)[orig_index] == -1)
(*mapping)[orig_index] = next_index++;
boundinfo->indexes[i] = (*mapping)[orig_index];
}
/*
* Set the canonical value for null_index, if any.
*
* It is possible that the null-accepting partition has not been assigned
* an index yet, which could happen if such partition accepts only null
* and hence not handled in the above loop which only looked at non-null
* values.
*/
if (null_index != -1)
{
Assert(null_index >= 0);
if ((*mapping)[null_index] == -1)
(*mapping)[null_index] = next_index++;
boundinfo->null_index = (*mapping)[null_index];
}
/* Set the canonical value for default_index, if any. */
if (default_index != -1)
{
/*
* The default partition accepts any value not specified in the lists
* of other partitions, hence it should not get mapped index while
* assigning those for non-null datums.
*/
Assert(default_index >= 0);
Assert((*mapping)[default_index] == -1);
(*mapping)[default_index] = next_index++;
boundinfo->default_index = (*mapping)[default_index];
}
2019-08-07 12:05:17 +02:00
/* All partitions must now have been assigned canonical indexes. */
Assert(next_index == nparts);
return boundinfo;
}
/*
* create_range_bounds
* Create a PartitionBoundInfo for a range partitioned table
*/
static PartitionBoundInfo
create_range_bounds(PartitionBoundSpec **boundspecs, int nparts,
PartitionKey key, int **mapping)
{
PartitionBoundInfo boundinfo;
PartitionRangeBound **rbounds = NULL;
PartitionRangeBound **all_bounds,
*prev;
int i,
k;
int ndatums = 0;
int default_index = -1;
int next_index = 0;
boundinfo = (PartitionBoundInfoData *)
palloc0(sizeof(PartitionBoundInfoData));
boundinfo->strategy = key->strategy;
/* There is no special null-accepting range partition. */
boundinfo->null_index = -1;
/* Will be set correctly below. */
boundinfo->default_index = -1;
all_bounds = (PartitionRangeBound **)
palloc0(2 * nparts * sizeof(PartitionRangeBound *));
/* Create a unified list of range bounds across all the partitions. */
ndatums = 0;
for (i = 0; i < nparts; i++)
{
PartitionBoundSpec *spec = boundspecs[i];
PartitionRangeBound *lower,
*upper;
if (spec->strategy != PARTITION_STRATEGY_RANGE)
elog(ERROR, "invalid strategy in partition bound spec");
/*
* Note the index of the partition bound spec for the default
* partition. There's no datum to add to the all_bounds array for
* this partition.
*/
if (spec->is_default)
{
default_index = i;
continue;
}
lower = make_one_partition_rbound(key, i, spec->lowerdatums, true);
upper = make_one_partition_rbound(key, i, spec->upperdatums, false);
all_bounds[ndatums++] = lower;
all_bounds[ndatums++] = upper;
}
Assert(ndatums == nparts * 2 ||
(default_index != -1 && ndatums == (nparts - 1) * 2));
/* Sort all the bounds in ascending order */
qsort_arg(all_bounds, ndatums,
sizeof(PartitionRangeBound *),
qsort_partition_rbound_cmp,
(void *) key);
/* Save distinct bounds from all_bounds into rbounds. */
rbounds = (PartitionRangeBound **)
palloc(ndatums * sizeof(PartitionRangeBound *));
k = 0;
prev = NULL;
for (i = 0; i < ndatums; i++)
{
PartitionRangeBound *cur = all_bounds[i];
bool is_distinct = false;
int j;
/* Is the current bound distinct from the previous one? */
for (j = 0; j < key->partnatts; j++)
{
Datum cmpval;
if (prev == NULL || cur->kind[j] != prev->kind[j])
{
is_distinct = true;
break;
}
/*
* If the bounds are both MINVALUE or MAXVALUE, stop now and treat
* them as equal, since any values after this point must be
* ignored.
*/
if (cur->kind[j] != PARTITION_RANGE_DATUM_VALUE)
break;
cmpval = FunctionCall2Coll(&key->partsupfunc[j],
key->partcollation[j],
cur->datums[j],
prev->datums[j]);
if (DatumGetInt32(cmpval) != 0)
{
is_distinct = true;
break;
}
}
/*
* Only if the bound is distinct save it into a temporary array, i.e,
* rbounds which is later copied into boundinfo datums array.
*/
if (is_distinct)
rbounds[k++] = all_bounds[i];
prev = cur;
}
/* Update ndatums to hold the count of distinct datums. */
ndatums = k;
/*
* Add datums to boundinfo. Canonical indexes are values ranging from 0
* to nparts - 1, assigned in that order to each partition's upper bound.
* For 'datums' elements that are lower bounds, there is -1 in the
* 'indexes' array to signify that no partition exists for the values less
* than such a bound and greater than or equal to the previous upper
* bound.
*/
boundinfo->ndatums = ndatums;
boundinfo->datums = (Datum **) palloc0(ndatums * sizeof(Datum *));
boundinfo->kind = (PartitionRangeDatumKind **)
palloc(ndatums *
sizeof(PartitionRangeDatumKind *));
/*
* For range partitioning, an additional value of -1 is stored as the last
* element.
*/
boundinfo->indexes = (int *) palloc((ndatums + 1) * sizeof(int));
for (i = 0; i < ndatums; i++)
{
int j;
boundinfo->datums[i] = (Datum *) palloc(key->partnatts *
sizeof(Datum));
boundinfo->kind[i] = (PartitionRangeDatumKind *)
palloc(key->partnatts *
sizeof(PartitionRangeDatumKind));
for (j = 0; j < key->partnatts; j++)
{
if (rbounds[i]->kind[j] == PARTITION_RANGE_DATUM_VALUE)
boundinfo->datums[i][j] =
datumCopy(rbounds[i]->datums[j],
key->parttypbyval[j],
key->parttyplen[j]);
boundinfo->kind[i][j] = rbounds[i]->kind[j];
}
/*
* There is no mapping for invalid indexes.
*
* Any lower bounds in the rbounds array have invalid indexes
* assigned, because the values between the previous bound (if there
* is one) and this (lower) bound are not part of the range of any
* existing partition.
*/
if (rbounds[i]->lower)
boundinfo->indexes[i] = -1;
else
{
int orig_index = rbounds[i]->index;
/* If the old index has no mapping, assign one */
if ((*mapping)[orig_index] == -1)
(*mapping)[orig_index] = next_index++;
boundinfo->indexes[i] = (*mapping)[orig_index];
}
}
/* Set the canonical value for default_index, if any. */
if (default_index != -1)
{
Assert(default_index >= 0 && (*mapping)[default_index] == -1);
(*mapping)[default_index] = next_index++;
boundinfo->default_index = (*mapping)[default_index];
}
/* The extra -1 element. */
Assert(i == ndatums);
boundinfo->indexes[i] = -1;
2019-08-07 12:05:17 +02:00
/* All partitions must now have been assigned canonical indexes. */
Assert(next_index == nparts);
return boundinfo;
}
/*
* Are two partition bound collections logically equal?
*
* Used in the keep logic of relcache.c (ie, in RelationClearRelation()).
* This is also useful when b1 and b2 are bound collections of two separate
* relations, respectively, because PartitionBoundInfo is a canonical
* representation of partition bounds.
*/
bool
partition_bounds_equal(int partnatts, int16 *parttyplen, bool *parttypbyval,
PartitionBoundInfo b1, PartitionBoundInfo b2)
{
int i;
if (b1->strategy != b2->strategy)
return false;
if (b1->ndatums != b2->ndatums)
return false;
if (b1->null_index != b2->null_index)
return false;
if (b1->default_index != b2->default_index)
return false;
if (b1->strategy == PARTITION_STRATEGY_HASH)
{
int greatest_modulus = get_hash_partition_greatest_modulus(b1);
/*
* If two hash partitioned tables have different greatest moduli,
* their partition schemes don't match.
*/
if (greatest_modulus != get_hash_partition_greatest_modulus(b2))
return false;
/*
2018-04-19 17:37:23 +02:00
* We arrange the partitions in the ascending order of their moduli
* and remainders. Also every modulus is factor of next larger
* modulus. Therefore we can safely store index of a given partition
* in indexes array at remainder of that partition. Also entries at
* (remainder + N * modulus) positions in indexes array are all same
* for (modulus, remainder) specification for any partition. Thus
* datums array from both the given bounds are same, if and only if
* their indexes array will be same. So, it suffices to compare
* indexes array.
*/
for (i = 0; i < greatest_modulus; i++)
if (b1->indexes[i] != b2->indexes[i])
return false;
#ifdef USE_ASSERT_CHECKING
/*
* Nonetheless make sure that the bounds are indeed same when the
* indexes match. Hash partition bound stores modulus and remainder
* at b1->datums[i][0] and b1->datums[i][1] position respectively.
*/
for (i = 0; i < b1->ndatums; i++)
Assert((b1->datums[i][0] == b2->datums[i][0] &&
b1->datums[i][1] == b2->datums[i][1]));
#endif
}
else
{
for (i = 0; i < b1->ndatums; i++)
{
int j;
for (j = 0; j < partnatts; j++)
{
/* For range partitions, the bounds might not be finite. */
if (b1->kind != NULL)
{
/* The different kinds of bound all differ from each other */
if (b1->kind[i][j] != b2->kind[i][j])
return false;
/*
* Non-finite bounds are equal without further
* examination.
*/
if (b1->kind[i][j] != PARTITION_RANGE_DATUM_VALUE)
continue;
}
/*
* Compare the actual values. Note that it would be both
* incorrect and unsafe to invoke the comparison operator
* derived from the partitioning specification here. It would
* be incorrect because we want the relcache entry to be
* updated for ANY change to the partition bounds, not just
* those that the partitioning operator thinks are
* significant. It would be unsafe because we might reach
* this code in the context of an aborted transaction, and an
* arbitrary partitioning operator might not be safe in that
* context. datumIsEqual() should be simple enough to be
* safe.
*/
if (!datumIsEqual(b1->datums[i][j], b2->datums[i][j],
parttypbyval[j], parttyplen[j]))
return false;
}
if (b1->indexes[i] != b2->indexes[i])
return false;
}
/* There are ndatums+1 indexes in case of range partitions */
if (b1->strategy == PARTITION_STRATEGY_RANGE &&
b1->indexes[i] != b2->indexes[i])
return false;
}
return true;
}
/*
* Return a copy of given PartitionBoundInfo structure. The data types of bounds
* are described by given partition key specification.
*/
PartitionBoundInfo
partition_bounds_copy(PartitionBoundInfo src,
PartitionKey key)
{
PartitionBoundInfo dest;
int i;
int ndatums;
int partnatts;
int num_indexes;
dest = (PartitionBoundInfo) palloc(sizeof(PartitionBoundInfoData));
dest->strategy = src->strategy;
ndatums = dest->ndatums = src->ndatums;
partnatts = key->partnatts;
num_indexes = get_partition_bound_num_indexes(src);
/* List partitioned tables have only a single partition key. */
Assert(key->strategy != PARTITION_STRATEGY_LIST || partnatts == 1);
dest->datums = (Datum **) palloc(sizeof(Datum *) * ndatums);
if (src->kind != NULL)
{
dest->kind = (PartitionRangeDatumKind **) palloc(ndatums *
sizeof(PartitionRangeDatumKind *));
for (i = 0; i < ndatums; i++)
{
dest->kind[i] = (PartitionRangeDatumKind *) palloc(partnatts *
sizeof(PartitionRangeDatumKind));
memcpy(dest->kind[i], src->kind[i],
sizeof(PartitionRangeDatumKind) * key->partnatts);
}
}
else
dest->kind = NULL;
for (i = 0; i < ndatums; i++)
{
int j;
/*
* For a corresponding to hash partition, datums array will have two
* elements - modulus and remainder.
*/
bool hash_part = (key->strategy == PARTITION_STRATEGY_HASH);
int natts = hash_part ? 2 : partnatts;
dest->datums[i] = (Datum *) palloc(sizeof(Datum) * natts);
for (j = 0; j < natts; j++)
{
bool byval;
int typlen;
if (hash_part)
{
typlen = sizeof(int32); /* Always int4 */
byval = true; /* int4 is pass-by-value */
}
else
{
byval = key->parttypbyval[j];
typlen = key->parttyplen[j];
}
if (dest->kind == NULL ||
dest->kind[i][j] == PARTITION_RANGE_DATUM_VALUE)
dest->datums[i][j] = datumCopy(src->datums[i][j],
byval, typlen);
}
}
dest->indexes = (int *) palloc(sizeof(int) * num_indexes);
memcpy(dest->indexes, src->indexes, sizeof(int) * num_indexes);
dest->null_index = src->null_index;
dest->default_index = src->default_index;
return dest;
}
Use Append rather than MergeAppend for scanning ordered partitions. If we need ordered output from a scan of a partitioned table, but the ordering matches the partition ordering, then we don't need to use a MergeAppend to combine the pre-ordered per-partition scan results: a plain Append will produce the same results. This both saves useless comparison work inside the MergeAppend proper, and allows us to start returning tuples after istarting up just the first child node not all of them. However, all is not peaches and cream, because if some of the child nodes have high startup costs then there will be big discontinuities in the tuples-returned-versus-elapsed-time curve. The planner's cost model cannot handle that (yet, anyway). If we model the Append's startup cost as being just the first child's startup cost, we may drastically underestimate the cost of fetching slightly more tuples than are available from the first child. Since we've had bad experiences with over-optimistic choices of "fast start" plans for ORDER BY LIMIT queries, that seems scary. As a klugy workaround, set the startup cost estimate for an ordered Append to be the sum of its children's startup costs (as MergeAppend would). This doesn't really describe reality, but it's less likely to cause a bad plan choice than an underestimated startup cost would. In practice, the cases where we really care about this optimization will have child plans that are IndexScans with zero startup cost, so that the overly conservative estimate is still just zero. David Rowley, reviewed by Julien Rouhaud and Antonin Houska Discussion: https://postgr.es/m/CAKJS1f-hAqhPLRk_RaSFTgYxd=Tz5hA7kQ2h4-DhJufQk8TGuw@mail.gmail.com
2019-04-06 01:20:30 +02:00
/*
* partitions_are_ordered
* Determine whether the partitions described by 'boundinfo' are ordered,
* that is partitions appearing earlier in the PartitionDesc sequence
* contain partition keys strictly less than those appearing later.
* Also, if NULL values are possible, they must come in the last
* partition defined in the PartitionDesc.
*
* If out of order, or there is insufficient info to know the order,
* then we return false.
*/
bool
partitions_are_ordered(PartitionBoundInfo boundinfo, int nparts)
{
Assert(boundinfo != NULL);
switch (boundinfo->strategy)
{
case PARTITION_STRATEGY_RANGE:
/*
* RANGE-type partitioning guarantees that the partitions can be
* scanned in the order that they're defined in the PartitionDesc
* to provide sequential, non-overlapping ranges of tuples.
* However, if a DEFAULT partition exists then it doesn't work, as
* that could contain tuples from either below or above the
* defined range, or tuples belonging to gaps between partitions.
*/
if (!partition_bound_has_default(boundinfo))
return true;
break;
case PARTITION_STRATEGY_LIST:
/*
* LIST partitioning can also guarantee ordering, but only if the
* partitions don't accept interleaved values. We could likely
* check for this by looping over the PartitionBound's indexes
* array to check that the indexes are in order. For now, let's
* just keep it simple and just accept LIST partitioning when
* there's no DEFAULT partition, exactly one value per partition,
* and optionally a NULL partition that does not accept any other
* values. Such a NULL partition will come last in the
* PartitionDesc, and the other partitions will be properly
* ordered. This is a cheap test to make as it does not require
* any per-partition processing. Maybe we'd like to handle more
* complex cases in the future.
*/
if (partition_bound_has_default(boundinfo))
return false;
if (boundinfo->ndatums + partition_bound_accepts_nulls(boundinfo)
== nparts)
return true;
break;
default:
/* HASH, or some other strategy */
break;
}
return false;
}
/*
* check_new_partition_bound
*
* Checks if the new partition's bound overlaps any of the existing partitions
* of parent. Also performs additional checks as necessary per strategy.
*/
void
check_new_partition_bound(char *relname, Relation parent,
PartitionBoundSpec *spec)
{
PartitionKey key = RelationGetPartitionKey(parent);
PartitionDesc partdesc = RelationGetPartitionDesc(parent);
PartitionBoundInfo boundinfo = partdesc->boundinfo;
ParseState *pstate = make_parsestate(NULL);
int with = -1;
bool overlap = false;
if (spec->is_default)
{
/*
* The default partition bound never conflicts with any other
* partition's; if that's what we're attaching, the only possible
* problem is that one already exists, so check for that and we're
* done.
*/
if (boundinfo == NULL || !partition_bound_has_default(boundinfo))
return;
/* Default partition already exists, error out. */
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
errmsg("partition \"%s\" conflicts with existing default partition \"%s\"",
relname, get_rel_name(partdesc->oids[boundinfo->default_index])),
parser_errposition(pstate, spec->location)));
}
switch (key->strategy)
{
case PARTITION_STRATEGY_HASH:
{
Assert(spec->strategy == PARTITION_STRATEGY_HASH);
Assert(spec->remainder >= 0 && spec->remainder < spec->modulus);
if (partdesc->nparts > 0)
{
Datum **datums = boundinfo->datums;
int ndatums = boundinfo->ndatums;
int greatest_modulus;
int remainder;
int offset;
bool valid_modulus = true;
int prev_modulus, /* Previous largest modulus */
next_modulus; /* Next largest modulus */
/*
* Check rule that every modulus must be a factor of the
* next larger modulus. For example, if you have a bunch
* of partitions that all have modulus 5, you can add a
* new partition with modulus 10 or a new partition with
* modulus 15, but you cannot add both a partition with
* modulus 10 and a partition with modulus 15, because 10
* is not a factor of 15.
*
* Get the greatest (modulus, remainder) pair contained in
* boundinfo->datums that is less than or equal to the
* (spec->modulus, spec->remainder) pair.
*/
offset = partition_hash_bsearch(boundinfo,
spec->modulus,
spec->remainder);
if (offset < 0)
{
next_modulus = DatumGetInt32(datums[0][0]);
valid_modulus = (next_modulus % spec->modulus) == 0;
}
else
{
prev_modulus = DatumGetInt32(datums[offset][0]);
valid_modulus = (spec->modulus % prev_modulus) == 0;
if (valid_modulus && (offset + 1) < ndatums)
{
next_modulus = DatumGetInt32(datums[offset + 1][0]);
valid_modulus = (next_modulus % spec->modulus) == 0;
}
}
if (!valid_modulus)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
errmsg("every hash partition modulus must be a factor of the next larger modulus")));
greatest_modulus = get_hash_partition_greatest_modulus(boundinfo);
remainder = spec->remainder;
/*
* Normally, the lowest remainder that could conflict with
* the new partition is equal to the remainder specified
* for the new partition, but when the new partition has a
* modulus higher than any used so far, we need to adjust.
*/
if (remainder >= greatest_modulus)
remainder = remainder % greatest_modulus;
/* Check every potentially-conflicting remainder. */
do
{
if (boundinfo->indexes[remainder] != -1)
{
overlap = true;
with = boundinfo->indexes[remainder];
break;
}
remainder += spec->modulus;
} while (remainder < greatest_modulus);
}
break;
}
case PARTITION_STRATEGY_LIST:
{
Assert(spec->strategy == PARTITION_STRATEGY_LIST);
if (partdesc->nparts > 0)
{
ListCell *cell;
Assert(boundinfo &&
boundinfo->strategy == PARTITION_STRATEGY_LIST &&
(boundinfo->ndatums > 0 ||
partition_bound_accepts_nulls(boundinfo) ||
partition_bound_has_default(boundinfo)));
foreach(cell, spec->listdatums)
{
Const *val = castNode(Const, lfirst(cell));
if (!val->constisnull)
{
int offset;
bool equal;
offset = partition_list_bsearch(&key->partsupfunc[0],
key->partcollation,
boundinfo,
val->constvalue,
&equal);
if (offset >= 0 && equal)
{
overlap = true;
with = boundinfo->indexes[offset];
break;
}
}
else if (partition_bound_accepts_nulls(boundinfo))
{
overlap = true;
with = boundinfo->null_index;
break;
}
}
}
break;
}
case PARTITION_STRATEGY_RANGE:
{
PartitionRangeBound *lower,
*upper;
Assert(spec->strategy == PARTITION_STRATEGY_RANGE);
lower = make_one_partition_rbound(key, -1, spec->lowerdatums, true);
upper = make_one_partition_rbound(key, -1, spec->upperdatums, false);
/*
* First check if the resulting range would be empty with
* specified lower and upper bounds
*/
if (partition_rbound_cmp(key->partnatts, key->partsupfunc,
key->partcollation, lower->datums,
lower->kind, true, upper) >= 0)
{
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
errmsg("empty range bound specified for partition \"%s\"",
relname),
errdetail("Specified lower bound %s is greater than or equal to upper bound %s.",
get_range_partbound_string(spec->lowerdatums),
get_range_partbound_string(spec->upperdatums)),
parser_errposition(pstate, spec->location)));
}
if (partdesc->nparts > 0)
{
int offset;
bool equal;
Assert(boundinfo &&
boundinfo->strategy == PARTITION_STRATEGY_RANGE &&
(boundinfo->ndatums > 0 ||
partition_bound_has_default(boundinfo)));
/*
* Test whether the new lower bound (which is treated
* inclusively as part of the new partition) lies inside
* an existing partition, or in a gap.
*
* If it's inside an existing partition, the bound at
* offset + 1 will be the upper bound of that partition,
* and its index will be >= 0.
*
* If it's in a gap, the bound at offset + 1 will be the
* lower bound of the next partition, and its index will
* be -1. This is also true if there is no next partition,
* since the index array is initialised with an extra -1
* at the end.
*/
offset = partition_range_bsearch(key->partnatts,
key->partsupfunc,
key->partcollation,
boundinfo, lower,
&equal);
if (boundinfo->indexes[offset + 1] < 0)
{
/*
* Check that the new partition will fit in the gap.
* For it to fit, the new upper bound must be less
* than or equal to the lower bound of the next
* partition, if there is one.
*/
if (offset + 1 < boundinfo->ndatums)
{
int32 cmpval;
Datum *datums;
PartitionRangeDatumKind *kind;
bool is_lower;
datums = boundinfo->datums[offset + 1];
kind = boundinfo->kind[offset + 1];
is_lower = (boundinfo->indexes[offset + 1] == -1);
cmpval = partition_rbound_cmp(key->partnatts,
key->partsupfunc,
key->partcollation,
datums, kind,
is_lower, upper);
if (cmpval < 0)
{
/*
* The new partition overlaps with the
* existing partition between offset + 1 and
* offset + 2.
*/
overlap = true;
with = boundinfo->indexes[offset + 2];
}
}
}
else
{
/*
* The new partition overlaps with the existing
* partition between offset and offset + 1.
*/
overlap = true;
with = boundinfo->indexes[offset + 1];
}
}
break;
}
default:
elog(ERROR, "unexpected partition strategy: %d",
(int) key->strategy);
}
if (overlap)
{
Assert(with >= 0);
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
errmsg("partition \"%s\" would overlap partition \"%s\"",
relname, get_rel_name(partdesc->oids[with])),
parser_errposition(pstate, spec->location)));
}
}
/*
* check_default_partition_contents
*
* This function checks if there exists a row in the default partition that
* would properly belong to the new partition being added. If it finds one,
* it throws an error.
*/
void
check_default_partition_contents(Relation parent, Relation default_rel,
PartitionBoundSpec *new_spec)
{
List *new_part_constraints;
List *def_part_constraints;
List *all_parts;
ListCell *lc;
new_part_constraints = (new_spec->strategy == PARTITION_STRATEGY_LIST)
? get_qual_for_list(parent, new_spec)
: get_qual_for_range(parent, new_spec, false);
def_part_constraints =
get_proposed_default_constraint(new_part_constraints);
/*
* Map the Vars in the constraint expression from parent's attnos to
* default_rel's.
*/
def_part_constraints =
map_partition_varattnos(def_part_constraints, 1, default_rel,
parent, NULL);
/*
* If the existing constraints on the default partition imply that it will
* not contain any row that would belong to the new partition, we can
* avoid scanning the default partition.
*/
if (PartConstraintImpliedByRelConstraint(default_rel, def_part_constraints))
{
ereport(DEBUG1,
(errmsg("updated partition constraint for default partition \"%s\" is implied by existing constraints",
RelationGetRelationName(default_rel))));
return;
}
/*
* Scan the default partition and its subpartitions, and check for rows
* that do not satisfy the revised partition constraints.
*/
if (default_rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
all_parts = find_all_inheritors(RelationGetRelid(default_rel),
AccessExclusiveLock, NULL);
else
all_parts = list_make1_oid(RelationGetRelid(default_rel));
foreach(lc, all_parts)
{
Oid part_relid = lfirst_oid(lc);
Relation part_rel;
Expr *partition_constraint;
EState *estate;
ExprState *partqualstate = NULL;
Snapshot snapshot;
ExprContext *econtext;
tableam: Add and use scan APIs. Too allow table accesses to be not directly dependent on heap, several new abstractions are needed. Specifically: 1) Heap scans need to be generalized into table scans. Do this by introducing TableScanDesc, which will be the "base class" for individual AMs. This contains the AM independent fields from HeapScanDesc. The previous heap_{beginscan,rescan,endscan} et al. have been replaced with a table_ version. There's no direct replacement for heap_getnext(), as that returned a HeapTuple, which is undesirable for a other AMs. Instead there's table_scan_getnextslot(). But note that heap_getnext() lives on, it's still used widely to access catalog tables. This is achieved by new scan_begin, scan_end, scan_rescan, scan_getnextslot callbacks. 2) The portion of parallel scans that's shared between backends need to be able to do so without the user doing per-AM work. To achieve that new parallelscan_{estimate, initialize, reinitialize} callbacks are introduced, which operate on a new ParallelTableScanDesc, which again can be subclassed by AMs. As it is likely that several AMs are going to be block oriented, block oriented callbacks that can be shared between such AMs are provided and used by heap. table_block_parallelscan_{estimate, intiialize, reinitialize} as callbacks, and table_block_parallelscan_{nextpage, init} for use in AMs. These operate on a ParallelBlockTableScanDesc. 3) Index scans need to be able to access tables to return a tuple, and there needs to be state across individual accesses to the heap to store state like buffers. That's now handled by introducing a sort-of-scan IndexFetchTable, which again is intended to be subclassed by individual AMs (for heap IndexFetchHeap). The relevant callbacks for an AM are index_fetch_{end, begin, reset} to create the necessary state, and index_fetch_tuple to retrieve an indexed tuple. Note that index_fetch_tuple implementations need to be smarter than just blindly fetching the tuples for AMs that have optimizations similar to heap's HOT - the currently alive tuple in the update chain needs to be fetched if appropriate. Similar to table_scan_getnextslot(), it's undesirable to continue to return HeapTuples. Thus index_fetch_heap (might want to rename that later) now accepts a slot as an argument. Core code doesn't have a lot of call sites performing index scans without going through the systable_* API (in contrast to loads of heap_getnext calls and working directly with HeapTuples). Index scans now store the result of a search in IndexScanDesc->xs_heaptid, rather than xs_ctup->t_self. As the target is not generally a HeapTuple anymore that seems cleaner. To be able to sensible adapt code to use the above, two further callbacks have been introduced: a) slot_callbacks returns a TupleTableSlotOps* suitable for creating slots capable of holding a tuple of the AMs type. table_slot_callbacks() and table_slot_create() are based upon that, but have additional logic to deal with views, foreign tables, etc. While this change could have been done separately, nearly all the call sites that needed to be adapted for the rest of this commit also would have been needed to be adapted for table_slot_callbacks(), making separation not worthwhile. b) tuple_satisfies_snapshot checks whether the tuple in a slot is currently visible according to a snapshot. That's required as a few places now don't have a buffer + HeapTuple around, but a slot (which in heap's case internally has that information). Additionally a few infrastructure changes were needed: I) SysScanDesc, as used by systable_{beginscan, getnext} et al. now internally uses a slot to keep track of tuples. While systable_getnext() still returns HeapTuples, and will so for the foreseeable future, the index API (see 1) above) now only deals with slots. The remainder, and largest part, of this commit is then adjusting all scans in postgres to use the new APIs. Author: Andres Freund, Haribabu Kommi, Alvaro Herrera Discussion: https://postgr.es/m/20180703070645.wchpu5muyto5n647@alap3.anarazel.de https://postgr.es/m/20160812231527.GA690404@alvherre.pgsql
2019-03-11 20:46:41 +01:00
TableScanDesc scan;
MemoryContext oldCxt;
TupleTableSlot *tupslot;
/* Lock already taken above. */
if (part_relid != RelationGetRelid(default_rel))
{
part_rel = table_open(part_relid, NoLock);
/*
* Map the Vars in the constraint expression from default_rel's
* the sub-partition's.
*/
partition_constraint = make_ands_explicit(def_part_constraints);
partition_constraint = (Expr *)
map_partition_varattnos((List *) partition_constraint, 1,
part_rel, default_rel, NULL);
/*
* If the partition constraints on default partition child imply
* that it will not contain any row that would belong to the new
* partition, we can avoid scanning the child table.
*/
if (PartConstraintImpliedByRelConstraint(part_rel,
def_part_constraints))
{
ereport(DEBUG1,
(errmsg("updated partition constraint for default partition \"%s\" is implied by existing constraints",
RelationGetRelationName(part_rel))));
table_close(part_rel, NoLock);
continue;
}
}
else
{
part_rel = default_rel;
partition_constraint = make_ands_explicit(def_part_constraints);
}
/*
* Only RELKIND_RELATION relations (i.e. leaf partitions) need to be
* scanned.
*/
if (part_rel->rd_rel->relkind != RELKIND_RELATION)
{
if (part_rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
ereport(WARNING,
(errcode(ERRCODE_CHECK_VIOLATION),
errmsg("skipped scanning foreign table \"%s\" which is a partition of default partition \"%s\"",
RelationGetRelationName(part_rel),
RelationGetRelationName(default_rel))));
if (RelationGetRelid(default_rel) != RelationGetRelid(part_rel))
table_close(part_rel, NoLock);
continue;
}
estate = CreateExecutorState();
/* Build expression execution states for partition check quals */
partqualstate = ExecPrepareExpr(partition_constraint, estate);
econtext = GetPerTupleExprContext(estate);
snapshot = RegisterSnapshot(GetLatestSnapshot());
tableam: Add and use scan APIs. Too allow table accesses to be not directly dependent on heap, several new abstractions are needed. Specifically: 1) Heap scans need to be generalized into table scans. Do this by introducing TableScanDesc, which will be the "base class" for individual AMs. This contains the AM independent fields from HeapScanDesc. The previous heap_{beginscan,rescan,endscan} et al. have been replaced with a table_ version. There's no direct replacement for heap_getnext(), as that returned a HeapTuple, which is undesirable for a other AMs. Instead there's table_scan_getnextslot(). But note that heap_getnext() lives on, it's still used widely to access catalog tables. This is achieved by new scan_begin, scan_end, scan_rescan, scan_getnextslot callbacks. 2) The portion of parallel scans that's shared between backends need to be able to do so without the user doing per-AM work. To achieve that new parallelscan_{estimate, initialize, reinitialize} callbacks are introduced, which operate on a new ParallelTableScanDesc, which again can be subclassed by AMs. As it is likely that several AMs are going to be block oriented, block oriented callbacks that can be shared between such AMs are provided and used by heap. table_block_parallelscan_{estimate, intiialize, reinitialize} as callbacks, and table_block_parallelscan_{nextpage, init} for use in AMs. These operate on a ParallelBlockTableScanDesc. 3) Index scans need to be able to access tables to return a tuple, and there needs to be state across individual accesses to the heap to store state like buffers. That's now handled by introducing a sort-of-scan IndexFetchTable, which again is intended to be subclassed by individual AMs (for heap IndexFetchHeap). The relevant callbacks for an AM are index_fetch_{end, begin, reset} to create the necessary state, and index_fetch_tuple to retrieve an indexed tuple. Note that index_fetch_tuple implementations need to be smarter than just blindly fetching the tuples for AMs that have optimizations similar to heap's HOT - the currently alive tuple in the update chain needs to be fetched if appropriate. Similar to table_scan_getnextslot(), it's undesirable to continue to return HeapTuples. Thus index_fetch_heap (might want to rename that later) now accepts a slot as an argument. Core code doesn't have a lot of call sites performing index scans without going through the systable_* API (in contrast to loads of heap_getnext calls and working directly with HeapTuples). Index scans now store the result of a search in IndexScanDesc->xs_heaptid, rather than xs_ctup->t_self. As the target is not generally a HeapTuple anymore that seems cleaner. To be able to sensible adapt code to use the above, two further callbacks have been introduced: a) slot_callbacks returns a TupleTableSlotOps* suitable for creating slots capable of holding a tuple of the AMs type. table_slot_callbacks() and table_slot_create() are based upon that, but have additional logic to deal with views, foreign tables, etc. While this change could have been done separately, nearly all the call sites that needed to be adapted for the rest of this commit also would have been needed to be adapted for table_slot_callbacks(), making separation not worthwhile. b) tuple_satisfies_snapshot checks whether the tuple in a slot is currently visible according to a snapshot. That's required as a few places now don't have a buffer + HeapTuple around, but a slot (which in heap's case internally has that information). Additionally a few infrastructure changes were needed: I) SysScanDesc, as used by systable_{beginscan, getnext} et al. now internally uses a slot to keep track of tuples. While systable_getnext() still returns HeapTuples, and will so for the foreseeable future, the index API (see 1) above) now only deals with slots. The remainder, and largest part, of this commit is then adjusting all scans in postgres to use the new APIs. Author: Andres Freund, Haribabu Kommi, Alvaro Herrera Discussion: https://postgr.es/m/20180703070645.wchpu5muyto5n647@alap3.anarazel.de https://postgr.es/m/20160812231527.GA690404@alvherre.pgsql
2019-03-11 20:46:41 +01:00
tupslot = table_slot_create(part_rel, &estate->es_tupleTable);
scan = table_beginscan(part_rel, snapshot, 0, NULL);
/*
* Switch to per-tuple memory context and reset it for each tuple
* produced, so we don't leak memory.
*/
oldCxt = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
tableam: Add and use scan APIs. Too allow table accesses to be not directly dependent on heap, several new abstractions are needed. Specifically: 1) Heap scans need to be generalized into table scans. Do this by introducing TableScanDesc, which will be the "base class" for individual AMs. This contains the AM independent fields from HeapScanDesc. The previous heap_{beginscan,rescan,endscan} et al. have been replaced with a table_ version. There's no direct replacement for heap_getnext(), as that returned a HeapTuple, which is undesirable for a other AMs. Instead there's table_scan_getnextslot(). But note that heap_getnext() lives on, it's still used widely to access catalog tables. This is achieved by new scan_begin, scan_end, scan_rescan, scan_getnextslot callbacks. 2) The portion of parallel scans that's shared between backends need to be able to do so without the user doing per-AM work. To achieve that new parallelscan_{estimate, initialize, reinitialize} callbacks are introduced, which operate on a new ParallelTableScanDesc, which again can be subclassed by AMs. As it is likely that several AMs are going to be block oriented, block oriented callbacks that can be shared between such AMs are provided and used by heap. table_block_parallelscan_{estimate, intiialize, reinitialize} as callbacks, and table_block_parallelscan_{nextpage, init} for use in AMs. These operate on a ParallelBlockTableScanDesc. 3) Index scans need to be able to access tables to return a tuple, and there needs to be state across individual accesses to the heap to store state like buffers. That's now handled by introducing a sort-of-scan IndexFetchTable, which again is intended to be subclassed by individual AMs (for heap IndexFetchHeap). The relevant callbacks for an AM are index_fetch_{end, begin, reset} to create the necessary state, and index_fetch_tuple to retrieve an indexed tuple. Note that index_fetch_tuple implementations need to be smarter than just blindly fetching the tuples for AMs that have optimizations similar to heap's HOT - the currently alive tuple in the update chain needs to be fetched if appropriate. Similar to table_scan_getnextslot(), it's undesirable to continue to return HeapTuples. Thus index_fetch_heap (might want to rename that later) now accepts a slot as an argument. Core code doesn't have a lot of call sites performing index scans without going through the systable_* API (in contrast to loads of heap_getnext calls and working directly with HeapTuples). Index scans now store the result of a search in IndexScanDesc->xs_heaptid, rather than xs_ctup->t_self. As the target is not generally a HeapTuple anymore that seems cleaner. To be able to sensible adapt code to use the above, two further callbacks have been introduced: a) slot_callbacks returns a TupleTableSlotOps* suitable for creating slots capable of holding a tuple of the AMs type. table_slot_callbacks() and table_slot_create() are based upon that, but have additional logic to deal with views, foreign tables, etc. While this change could have been done separately, nearly all the call sites that needed to be adapted for the rest of this commit also would have been needed to be adapted for table_slot_callbacks(), making separation not worthwhile. b) tuple_satisfies_snapshot checks whether the tuple in a slot is currently visible according to a snapshot. That's required as a few places now don't have a buffer + HeapTuple around, but a slot (which in heap's case internally has that information). Additionally a few infrastructure changes were needed: I) SysScanDesc, as used by systable_{beginscan, getnext} et al. now internally uses a slot to keep track of tuples. While systable_getnext() still returns HeapTuples, and will so for the foreseeable future, the index API (see 1) above) now only deals with slots. The remainder, and largest part, of this commit is then adjusting all scans in postgres to use the new APIs. Author: Andres Freund, Haribabu Kommi, Alvaro Herrera Discussion: https://postgr.es/m/20180703070645.wchpu5muyto5n647@alap3.anarazel.de https://postgr.es/m/20160812231527.GA690404@alvherre.pgsql
2019-03-11 20:46:41 +01:00
while (table_scan_getnextslot(scan, ForwardScanDirection, tupslot))
{
econtext->ecxt_scantuple = tupslot;
if (!ExecCheck(partqualstate, econtext))
ereport(ERROR,
(errcode(ERRCODE_CHECK_VIOLATION),
errmsg("updated partition constraint for default partition \"%s\" would be violated by some row",
RelationGetRelationName(default_rel))));
ResetExprContext(econtext);
CHECK_FOR_INTERRUPTS();
}
MemoryContextSwitchTo(oldCxt);
tableam: Add and use scan APIs. Too allow table accesses to be not directly dependent on heap, several new abstractions are needed. Specifically: 1) Heap scans need to be generalized into table scans. Do this by introducing TableScanDesc, which will be the "base class" for individual AMs. This contains the AM independent fields from HeapScanDesc. The previous heap_{beginscan,rescan,endscan} et al. have been replaced with a table_ version. There's no direct replacement for heap_getnext(), as that returned a HeapTuple, which is undesirable for a other AMs. Instead there's table_scan_getnextslot(). But note that heap_getnext() lives on, it's still used widely to access catalog tables. This is achieved by new scan_begin, scan_end, scan_rescan, scan_getnextslot callbacks. 2) The portion of parallel scans that's shared between backends need to be able to do so without the user doing per-AM work. To achieve that new parallelscan_{estimate, initialize, reinitialize} callbacks are introduced, which operate on a new ParallelTableScanDesc, which again can be subclassed by AMs. As it is likely that several AMs are going to be block oriented, block oriented callbacks that can be shared between such AMs are provided and used by heap. table_block_parallelscan_{estimate, intiialize, reinitialize} as callbacks, and table_block_parallelscan_{nextpage, init} for use in AMs. These operate on a ParallelBlockTableScanDesc. 3) Index scans need to be able to access tables to return a tuple, and there needs to be state across individual accesses to the heap to store state like buffers. That's now handled by introducing a sort-of-scan IndexFetchTable, which again is intended to be subclassed by individual AMs (for heap IndexFetchHeap). The relevant callbacks for an AM are index_fetch_{end, begin, reset} to create the necessary state, and index_fetch_tuple to retrieve an indexed tuple. Note that index_fetch_tuple implementations need to be smarter than just blindly fetching the tuples for AMs that have optimizations similar to heap's HOT - the currently alive tuple in the update chain needs to be fetched if appropriate. Similar to table_scan_getnextslot(), it's undesirable to continue to return HeapTuples. Thus index_fetch_heap (might want to rename that later) now accepts a slot as an argument. Core code doesn't have a lot of call sites performing index scans without going through the systable_* API (in contrast to loads of heap_getnext calls and working directly with HeapTuples). Index scans now store the result of a search in IndexScanDesc->xs_heaptid, rather than xs_ctup->t_self. As the target is not generally a HeapTuple anymore that seems cleaner. To be able to sensible adapt code to use the above, two further callbacks have been introduced: a) slot_callbacks returns a TupleTableSlotOps* suitable for creating slots capable of holding a tuple of the AMs type. table_slot_callbacks() and table_slot_create() are based upon that, but have additional logic to deal with views, foreign tables, etc. While this change could have been done separately, nearly all the call sites that needed to be adapted for the rest of this commit also would have been needed to be adapted for table_slot_callbacks(), making separation not worthwhile. b) tuple_satisfies_snapshot checks whether the tuple in a slot is currently visible according to a snapshot. That's required as a few places now don't have a buffer + HeapTuple around, but a slot (which in heap's case internally has that information). Additionally a few infrastructure changes were needed: I) SysScanDesc, as used by systable_{beginscan, getnext} et al. now internally uses a slot to keep track of tuples. While systable_getnext() still returns HeapTuples, and will so for the foreseeable future, the index API (see 1) above) now only deals with slots. The remainder, and largest part, of this commit is then adjusting all scans in postgres to use the new APIs. Author: Andres Freund, Haribabu Kommi, Alvaro Herrera Discussion: https://postgr.es/m/20180703070645.wchpu5muyto5n647@alap3.anarazel.de https://postgr.es/m/20160812231527.GA690404@alvherre.pgsql
2019-03-11 20:46:41 +01:00
table_endscan(scan);
UnregisterSnapshot(snapshot);
ExecDropSingleTupleTableSlot(tupslot);
FreeExecutorState(estate);
if (RelationGetRelid(default_rel) != RelationGetRelid(part_rel))
table_close(part_rel, NoLock); /* keep the lock until commit */
}
}
/*
* get_hash_partition_greatest_modulus
*
* Returns the greatest modulus of the hash partition bound. The greatest
* modulus will be at the end of the datums array because hash partitions are
2018-04-19 17:37:23 +02:00
* arranged in the ascending order of their moduli and remainders.
*/
int
get_hash_partition_greatest_modulus(PartitionBoundInfo bound)
{
Assert(bound && bound->strategy == PARTITION_STRATEGY_HASH);
Assert(bound->datums && bound->ndatums > 0);
Assert(DatumGetInt32(bound->datums[bound->ndatums - 1][0]) > 0);
return DatumGetInt32(bound->datums[bound->ndatums - 1][0]);
}
/*
* make_one_partition_rbound
*
* Return a PartitionRangeBound given a list of PartitionRangeDatum elements
* and a flag telling whether the bound is lower or not. Made into a function
* because there are multiple sites that want to use this facility.
*/
static PartitionRangeBound *
make_one_partition_rbound(PartitionKey key, int index, List *datums, bool lower)
{
PartitionRangeBound *bound;
ListCell *lc;
int i;
Assert(datums != NIL);
bound = (PartitionRangeBound *) palloc0(sizeof(PartitionRangeBound));
bound->index = index;
bound->datums = (Datum *) palloc0(key->partnatts * sizeof(Datum));
bound->kind = (PartitionRangeDatumKind *) palloc0(key->partnatts *
sizeof(PartitionRangeDatumKind));
bound->lower = lower;
i = 0;
foreach(lc, datums)
{
PartitionRangeDatum *datum = castNode(PartitionRangeDatum, lfirst(lc));
/* What's contained in this range datum? */
bound->kind[i] = datum->kind;
if (datum->kind == PARTITION_RANGE_DATUM_VALUE)
{
Const *val = castNode(Const, datum->value);
if (val->constisnull)
elog(ERROR, "invalid range bound datum");
bound->datums[i] = val->constvalue;
}
i++;
}
return bound;
}
/*
* partition_rbound_cmp
*
* Return for two range bounds whether the 1st one (specified in datums1,
* kind1, and lower1) is <, =, or > the bound specified in *b2.
*
* partnatts, partsupfunc and partcollation give the number of attributes in the
* bounds to be compared, comparison function to be used and the collations of
* attributes, respectively.
*
* Note that if the values of the two range bounds compare equal, then we take
* into account whether they are upper or lower bounds, and an upper bound is
* considered to be smaller than a lower bound. This is important to the way
* that RelationBuildPartitionDesc() builds the PartitionBoundInfoData
* structure, which only stores the upper bound of a common boundary between
* two contiguous partitions.
*/
static int32
partition_rbound_cmp(int partnatts, FmgrInfo *partsupfunc,
Oid *partcollation,
Datum *datums1, PartitionRangeDatumKind *kind1,
bool lower1, PartitionRangeBound *b2)
{
int32 cmpval = 0; /* placate compiler */
int i;
Datum *datums2 = b2->datums;
PartitionRangeDatumKind *kind2 = b2->kind;
bool lower2 = b2->lower;
for (i = 0; i < partnatts; i++)
{
/*
* First, handle cases where the column is unbounded, which should not
* invoke the comparison procedure, and should not consider any later
* columns. Note that the PartitionRangeDatumKind enum elements
* compare the same way as the values they represent.
*/
if (kind1[i] < kind2[i])
return -1;
else if (kind1[i] > kind2[i])
return 1;
else if (kind1[i] != PARTITION_RANGE_DATUM_VALUE)
/*
* The column bounds are both MINVALUE or both MAXVALUE. No later
* columns should be considered, but we still need to compare
* whether they are upper or lower bounds.
*/
break;
cmpval = DatumGetInt32(FunctionCall2Coll(&partsupfunc[i],
partcollation[i],
datums1[i],
datums2[i]));
if (cmpval != 0)
break;
}
/*
* If the comparison is anything other than equal, we're done. If they
* compare equal though, we still have to consider whether the boundaries
* are inclusive or exclusive. Exclusive one is considered smaller of the
* two.
*/
if (cmpval == 0 && lower1 != lower2)
cmpval = lower1 ? 1 : -1;
return cmpval;
}
/*
* partition_rbound_datum_cmp
*
* Return whether range bound (specified in rb_datums and rb_kind)
* is <, =, or > partition key of tuple (tuple_datums)
*
* n_tuple_datums, partsupfunc and partcollation give number of attributes in
* the bounds to be compared, comparison function to be used and the collations
* of attributes resp.
*
*/
int32
partition_rbound_datum_cmp(FmgrInfo *partsupfunc, Oid *partcollation,
Datum *rb_datums, PartitionRangeDatumKind *rb_kind,
Datum *tuple_datums, int n_tuple_datums)
{
int i;
int32 cmpval = -1;
for (i = 0; i < n_tuple_datums; i++)
{
if (rb_kind[i] == PARTITION_RANGE_DATUM_MINVALUE)
return -1;
else if (rb_kind[i] == PARTITION_RANGE_DATUM_MAXVALUE)
return 1;
cmpval = DatumGetInt32(FunctionCall2Coll(&partsupfunc[i],
partcollation[i],
rb_datums[i],
tuple_datums[i]));
if (cmpval != 0)
break;
}
return cmpval;
}
/*
* partition_hbound_cmp
*
2018-04-19 17:37:23 +02:00
* Compares modulus first, then remainder if modulus is equal.
*/
static int32
partition_hbound_cmp(int modulus1, int remainder1, int modulus2, int remainder2)
{
if (modulus1 < modulus2)
return -1;
if (modulus1 > modulus2)
return 1;
if (modulus1 == modulus2 && remainder1 != remainder2)
return (remainder1 > remainder2) ? 1 : -1;
return 0;
}
/*
* partition_list_bsearch
* Returns the index of the greatest bound datum that is less than equal
* to the given value or -1 if all of the bound datums are greater
*
* *is_equal is set to true if the bound datum at the returned index is equal
* to the input value.
*/
int
partition_list_bsearch(FmgrInfo *partsupfunc, Oid *partcollation,
PartitionBoundInfo boundinfo,
Datum value, bool *is_equal)
{
int lo,
hi,
mid;
lo = -1;
hi = boundinfo->ndatums - 1;
while (lo < hi)
{
int32 cmpval;
mid = (lo + hi + 1) / 2;
cmpval = DatumGetInt32(FunctionCall2Coll(&partsupfunc[0],
partcollation[0],
boundinfo->datums[mid][0],
value));
if (cmpval <= 0)
{
lo = mid;
*is_equal = (cmpval == 0);
if (*is_equal)
break;
}
else
hi = mid - 1;
}
return lo;
}
/*
* partition_range_bsearch
* Returns the index of the greatest range bound that is less than or
* equal to the given range bound or -1 if all of the range bounds are
* greater
*
* *is_equal is set to true if the range bound at the returned index is equal
* to the input range bound
*/
static int
partition_range_bsearch(int partnatts, FmgrInfo *partsupfunc,
Oid *partcollation,
PartitionBoundInfo boundinfo,
PartitionRangeBound *probe, bool *is_equal)
{
int lo,
hi,
mid;
lo = -1;
hi = boundinfo->ndatums - 1;
while (lo < hi)
{
int32 cmpval;
mid = (lo + hi + 1) / 2;
cmpval = partition_rbound_cmp(partnatts, partsupfunc,
partcollation,
boundinfo->datums[mid],
boundinfo->kind[mid],
(boundinfo->indexes[mid] == -1),
probe);
if (cmpval <= 0)
{
lo = mid;
*is_equal = (cmpval == 0);
if (*is_equal)
break;
}
else
hi = mid - 1;
}
return lo;
}
/*
* partition_range_bsearch
* Returns the index of the greatest range bound that is less than or
* equal to the given tuple or -1 if all of the range bounds are greater
*
* *is_equal is set to true if the range bound at the returned index is equal
* to the input tuple.
*/
int
partition_range_datum_bsearch(FmgrInfo *partsupfunc, Oid *partcollation,
PartitionBoundInfo boundinfo,
int nvalues, Datum *values, bool *is_equal)
{
int lo,
hi,
mid;
lo = -1;
hi = boundinfo->ndatums - 1;
while (lo < hi)
{
int32 cmpval;
mid = (lo + hi + 1) / 2;
cmpval = partition_rbound_datum_cmp(partsupfunc,
partcollation,
boundinfo->datums[mid],
boundinfo->kind[mid],
values,
nvalues);
if (cmpval <= 0)
{
lo = mid;
*is_equal = (cmpval == 0);
if (*is_equal)
break;
}
else
hi = mid - 1;
}
return lo;
}
/*
* partition_hash_bsearch
* Returns the index of the greatest (modulus, remainder) pair that is
* less than or equal to the given (modulus, remainder) pair or -1 if
* all of them are greater
*/
int
partition_hash_bsearch(PartitionBoundInfo boundinfo,
int modulus, int remainder)
{
int lo,
hi,
mid;
lo = -1;
hi = boundinfo->ndatums - 1;
while (lo < hi)
{
int32 cmpval,
bound_modulus,
bound_remainder;
mid = (lo + hi + 1) / 2;
bound_modulus = DatumGetInt32(boundinfo->datums[mid][0]);
bound_remainder = DatumGetInt32(boundinfo->datums[mid][1]);
cmpval = partition_hbound_cmp(bound_modulus, bound_remainder,
modulus, remainder);
if (cmpval <= 0)
{
lo = mid;
if (cmpval == 0)
break;
}
else
hi = mid - 1;
}
return lo;
}
/*
* qsort_partition_hbound_cmp
*
* Hash bounds are sorted by modulus, then by remainder.
*/
static int32
qsort_partition_hbound_cmp(const void *a, const void *b)
{
PartitionHashBound *h1 = (*(PartitionHashBound *const *) a);
PartitionHashBound *h2 = (*(PartitionHashBound *const *) b);
return partition_hbound_cmp(h1->modulus, h1->remainder,
h2->modulus, h2->remainder);
}
/*
* qsort_partition_list_value_cmp
*
* Compare two list partition bound datums.
*/
static int32
qsort_partition_list_value_cmp(const void *a, const void *b, void *arg)
{
Datum val1 = (*(PartitionListValue *const *) a)->value,
val2 = (*(PartitionListValue *const *) b)->value;
PartitionKey key = (PartitionKey) arg;
return DatumGetInt32(FunctionCall2Coll(&key->partsupfunc[0],
key->partcollation[0],
val1, val2));
}
/*
* qsort_partition_rbound_cmp
*
* Used when sorting range bounds across all range partitions.
*/
static int32
qsort_partition_rbound_cmp(const void *a, const void *b, void *arg)
{
PartitionRangeBound *b1 = (*(PartitionRangeBound *const *) a);
PartitionRangeBound *b2 = (*(PartitionRangeBound *const *) b);
PartitionKey key = (PartitionKey) arg;
return partition_rbound_cmp(key->partnatts, key->partsupfunc,
key->partcollation, b1->datums, b1->kind,
b1->lower, b2);
}
/*
* get_partition_bound_num_indexes
*
* Returns the number of the entries in the partition bound indexes array.
*/
static int
get_partition_bound_num_indexes(PartitionBoundInfo bound)
{
int num_indexes;
Assert(bound);
switch (bound->strategy)
{
case PARTITION_STRATEGY_HASH:
/*
* The number of the entries in the indexes array is same as the
* greatest modulus.
*/
num_indexes = get_hash_partition_greatest_modulus(bound);
break;
case PARTITION_STRATEGY_LIST:
num_indexes = bound->ndatums;
break;
case PARTITION_STRATEGY_RANGE:
/* Range partitioned table has an extra index. */
num_indexes = bound->ndatums + 1;
break;
default:
elog(ERROR, "unexpected partition strategy: %d",
(int) bound->strategy);
}
return num_indexes;
}
/*
* get_partition_operator
*
* Return oid of the operator of the given strategy for the given partition
* key column. It is assumed that the partitioning key is of the same type as
* the chosen partitioning opclass, or at least binary-compatible. In the
* latter case, *need_relabel is set to true if the opclass is not of a
* polymorphic type (indicating a RelabelType node needed on top), otherwise
* false.
*/
static Oid
get_partition_operator(PartitionKey key, int col, StrategyNumber strategy,
bool *need_relabel)
{
Oid operoid;
/*
* Get the operator in the partitioning opfamily using the opclass'
* declared input type as both left- and righttype.
*/
operoid = get_opfamily_member(key->partopfamily[col],
key->partopcintype[col],
key->partopcintype[col],
strategy);
if (!OidIsValid(operoid))
elog(ERROR, "missing operator %d(%u,%u) in partition opfamily %u",
strategy, key->partopcintype[col], key->partopcintype[col],
key->partopfamily[col]);
/*
* If the partition key column is not of the same type as the operator
* class and not polymorphic, tell caller to wrap the non-Const expression
* in a RelabelType. This matches what parse_coerce.c does.
*/
*need_relabel = (key->parttypid[col] != key->partopcintype[col] &&
key->partopcintype[col] != RECORDOID &&
!IsPolymorphicType(key->partopcintype[col]));
return operoid;
}
/*
* make_partition_op_expr
* Returns an Expr for the given partition key column with arg1 and
* arg2 as its leftop and rightop, respectively
*/
static Expr *
make_partition_op_expr(PartitionKey key, int keynum,
uint16 strategy, Expr *arg1, Expr *arg2)
{
Oid operoid;
bool need_relabel = false;
Expr *result = NULL;
/* Get the correct btree operator for this partitioning column */
operoid = get_partition_operator(key, keynum, strategy, &need_relabel);
/*
* Chosen operator may be such that the non-Const operand needs to be
* coerced, so apply the same; see the comment in
* get_partition_operator().
*/
if (!IsA(arg1, Const) &&
(need_relabel ||
key->partcollation[keynum] != key->parttypcoll[keynum]))
arg1 = (Expr *) makeRelabelType(arg1,
key->partopcintype[keynum],
-1,
key->partcollation[keynum],
COERCE_EXPLICIT_CAST);
/* Generate the actual expression */
switch (key->strategy)
{
case PARTITION_STRATEGY_LIST:
{
List *elems = (List *) arg2;
int nelems = list_length(elems);
Assert(nelems >= 1);
Assert(keynum == 0);
if (nelems > 1 &&
!type_is_array(key->parttypid[keynum]))
{
ArrayExpr *arrexpr;
ScalarArrayOpExpr *saopexpr;
/* Construct an ArrayExpr for the right-hand inputs */
arrexpr = makeNode(ArrayExpr);
arrexpr->array_typeid =
get_array_type(key->parttypid[keynum]);
arrexpr->array_collid = key->parttypcoll[keynum];
arrexpr->element_typeid = key->parttypid[keynum];
arrexpr->elements = elems;
arrexpr->multidims = false;
arrexpr->location = -1;
/* Build leftop = ANY (rightop) */
saopexpr = makeNode(ScalarArrayOpExpr);
saopexpr->opno = operoid;
saopexpr->opfuncid = get_opcode(operoid);
saopexpr->useOr = true;
saopexpr->inputcollid = key->partcollation[keynum];
saopexpr->args = list_make2(arg1, arrexpr);
saopexpr->location = -1;
result = (Expr *) saopexpr;
}
else
{
List *elemops = NIL;
ListCell *lc;
foreach(lc, elems)
{
Expr *elem = lfirst(lc),
*elemop;
elemop = make_opclause(operoid,
BOOLOID,
false,
arg1, elem,
InvalidOid,
key->partcollation[keynum]);
elemops = lappend(elemops, elemop);
}
result = nelems > 1 ? makeBoolExpr(OR_EXPR, elemops, -1) : linitial(elemops);
}
break;
}
case PARTITION_STRATEGY_RANGE:
result = make_opclause(operoid,
BOOLOID,
false,
arg1, arg2,
InvalidOid,
key->partcollation[keynum]);
break;
default:
elog(ERROR, "invalid partitioning strategy");
break;
}
return result;
}
/*
* get_qual_for_hash
*
* Returns a CHECK constraint expression to use as a hash partition's
* constraint, given the parent relation and partition bound structure.
*
* The partition constraint for a hash partition is always a call to the
* built-in function satisfies_hash_partition().
*/
static List *
get_qual_for_hash(Relation parent, PartitionBoundSpec *spec)
{
PartitionKey key = RelationGetPartitionKey(parent);
FuncExpr *fexpr;
Node *relidConst;
Node *modulusConst;
Node *remainderConst;
List *args;
ListCell *partexprs_item;
int i;
/* Fixed arguments. */
relidConst = (Node *) makeConst(OIDOID,
-1,
InvalidOid,
sizeof(Oid),
ObjectIdGetDatum(RelationGetRelid(parent)),
false,
true);
modulusConst = (Node *) makeConst(INT4OID,
-1,
InvalidOid,
sizeof(int32),
Int32GetDatum(spec->modulus),
false,
true);
remainderConst = (Node *) makeConst(INT4OID,
-1,
InvalidOid,
sizeof(int32),
Int32GetDatum(spec->remainder),
false,
true);
args = list_make3(relidConst, modulusConst, remainderConst);
partexprs_item = list_head(key->partexprs);
/* Add an argument for each key column. */
for (i = 0; i < key->partnatts; i++)
{
Node *keyCol;
/* Left operand */
if (key->partattrs[i] != 0)
{
keyCol = (Node *) makeVar(1,
key->partattrs[i],
key->parttypid[i],
key->parttypmod[i],
key->parttypcoll[i],
0);
}
else
{
keyCol = (Node *) copyObject(lfirst(partexprs_item));
Represent Lists as expansible arrays, not chains of cons-cells. Originally, Postgres Lists were a more or less exact reimplementation of Lisp lists, which consist of chains of separately-allocated cons cells, each having a value and a next-cell link. We'd hacked that once before (commit d0b4399d8) to add a separate List header, but the data was still in cons cells. That makes some operations -- notably list_nth() -- O(N), and it's bulky because of the next-cell pointers and per-cell palloc overhead, and it's very cache-unfriendly if the cons cells end up scattered around rather than being adjacent. In this rewrite, we still have List headers, but the data is in a resizable array of values, with no next-cell links. Now we need at most two palloc's per List, and often only one, since we can allocate some values in the same palloc call as the List header. (Of course, extending an existing List may require repalloc's to enlarge the array. But this involves just O(log N) allocations not O(N).) Of course this is not without downsides. The key difficulty is that addition or deletion of a list entry may now cause other entries to move, which it did not before. For example, that breaks foreach() and sister macros, which historically used a pointer to the current cons-cell as loop state. We can repair those macros transparently by making their actual loop state be an integer list index; the exposed "ListCell *" pointer is no longer state carried across loop iterations, but is just a derived value. (In practice, modern compilers can optimize things back to having just one loop state value, at least for simple cases with inline loop bodies.) In principle, this is a semantics change for cases where the loop body inserts or deletes list entries ahead of the current loop index; but I found no such cases in the Postgres code. The change is not at all transparent for code that doesn't use foreach() but chases lists "by hand" using lnext(). The largest share of such code in the backend is in loops that were maintaining "prev" and "next" variables in addition to the current-cell pointer, in order to delete list cells efficiently using list_delete_cell(). However, we no longer need a previous-cell pointer to delete a list cell efficiently. Keeping a next-cell pointer doesn't work, as explained above, but we can improve matters by changing such code to use a regular foreach() loop and then using the new macro foreach_delete_current() to delete the current cell. (This macro knows how to update the associated foreach loop's state so that no cells will be missed in the traversal.) There remains a nontrivial risk of code assuming that a ListCell * pointer will remain good over an operation that could now move the list contents. To help catch such errors, list.c can be compiled with a new define symbol DEBUG_LIST_MEMORY_USAGE that forcibly moves list contents whenever that could possibly happen. This makes list operations significantly more expensive so it's not normally turned on (though it is on by default if USE_VALGRIND is on). There are two notable API differences from the previous code: * lnext() now requires the List's header pointer in addition to the current cell's address. * list_delete_cell() no longer requires a previous-cell argument. These changes are somewhat unfortunate, but on the other hand code using either function needs inspection to see if it is assuming anything it shouldn't, so it's not all bad. Programmers should be aware of these significant performance changes: * list_nth() and related functions are now O(1); so there's no major access-speed difference between a list and an array. * Inserting or deleting a list element now takes time proportional to the distance to the end of the list, due to moving the array elements. (However, it typically *doesn't* require palloc or pfree, so except in long lists it's probably still faster than before.) Notably, lcons() used to be about the same cost as lappend(), but that's no longer true if the list is long. Code that uses lcons() and list_delete_first() to maintain a stack might usefully be rewritten to push and pop at the end of the list rather than the beginning. * There are now list_insert_nth...() and list_delete_nth...() functions that add or remove a list cell identified by index. These have the data-movement penalty explained above, but there's no search penalty. * list_concat() and variants now copy the second list's data into storage belonging to the first list, so there is no longer any sharing of cells between the input lists. The second argument is now declared "const List *" to reflect that it isn't changed. This patch just does the minimum needed to get the new implementation in place and fix bugs exposed by the regression tests. As suggested by the foregoing, there's a fair amount of followup work remaining to do. Also, the ENABLE_LIST_COMPAT macros are finally removed in this commit. Code using those should have been gone a dozen years ago. Patch by me; thanks to David Rowley, Jesper Pedersen, and others for review. Discussion: https://postgr.es/m/11587.1550975080@sss.pgh.pa.us
2019-07-15 19:41:58 +02:00
partexprs_item = lnext(key->partexprs, partexprs_item);
}
args = lappend(args, keyCol);
}
fexpr = makeFuncExpr(F_SATISFIES_HASH_PARTITION,
BOOLOID,
args,
InvalidOid,
InvalidOid,
COERCE_EXPLICIT_CALL);
return list_make1(fexpr);
}
/*
* get_qual_for_list
*
* Returns an implicit-AND list of expressions to use as a list partition's
* constraint, given the parent relation and partition bound structure.
*
* The function returns NIL for a default partition when it's the only
* partition since in that case there is no constraint.
*/
static List *
get_qual_for_list(Relation parent, PartitionBoundSpec *spec)
{
PartitionKey key = RelationGetPartitionKey(parent);
List *result;
Expr *keyCol;
Expr *opexpr;
NullTest *nulltest;
ListCell *cell;
List *elems = NIL;
bool list_has_null = false;
/*
* Only single-column list partitioning is supported, so we are worried
* only about the partition key with index 0.
*/
Assert(key->partnatts == 1);
/* Construct Var or expression representing the partition column */
if (key->partattrs[0] != 0)
keyCol = (Expr *) makeVar(1,
key->partattrs[0],
key->parttypid[0],
key->parttypmod[0],
key->parttypcoll[0],
0);
else
keyCol = (Expr *) copyObject(linitial(key->partexprs));
/*
* For default list partition, collect datums for all the partitions. The
* default partition constraint should check that the partition key is
* equal to none of those.
*/
if (spec->is_default)
{
int i;
int ndatums = 0;
PartitionDesc pdesc = RelationGetPartitionDesc(parent);
PartitionBoundInfo boundinfo = pdesc->boundinfo;
if (boundinfo)
{
ndatums = boundinfo->ndatums;
if (partition_bound_accepts_nulls(boundinfo))
list_has_null = true;
}
/*
* If default is the only partition, there need not be any partition
* constraint on it.
*/
if (ndatums == 0 && !list_has_null)
return NIL;
for (i = 0; i < ndatums; i++)
{
Const *val;
/*
* Construct Const from known-not-null datum. We must be careful
* to copy the value, because our result has to be able to outlive
* the relcache entry we're copying from.
*/
val = makeConst(key->parttypid[0],
key->parttypmod[0],
key->parttypcoll[0],
key->parttyplen[0],
datumCopy(*boundinfo->datums[i],
key->parttypbyval[0],
key->parttyplen[0]),
false, /* isnull */
key->parttypbyval[0]);
elems = lappend(elems, val);
}
}
else
{
/*
* Create list of Consts for the allowed values, excluding any nulls.
*/
foreach(cell, spec->listdatums)
{
Const *val = castNode(Const, lfirst(cell));
if (val->constisnull)
list_has_null = true;
else
elems = lappend(elems, copyObject(val));
}
}
if (elems)
{
/*
* Generate the operator expression from the non-null partition
* values.
*/
opexpr = make_partition_op_expr(key, 0, BTEqualStrategyNumber,
keyCol, (Expr *) elems);
}
else
{
/*
* If there are no partition values, we don't need an operator
* expression.
*/
opexpr = NULL;
}
if (!list_has_null)
{
/*
* Gin up a "col IS NOT NULL" test that will be AND'd with the main
* expression. This might seem redundant, but the partition routing
* machinery needs it.
*/
nulltest = makeNode(NullTest);
nulltest->arg = keyCol;
nulltest->nulltesttype = IS_NOT_NULL;
nulltest->argisrow = false;
nulltest->location = -1;
result = opexpr ? list_make2(nulltest, opexpr) : list_make1(nulltest);
}
else
{
/*
* Gin up a "col IS NULL" test that will be OR'd with the main
* expression.
*/
nulltest = makeNode(NullTest);
nulltest->arg = keyCol;
nulltest->nulltesttype = IS_NULL;
nulltest->argisrow = false;
nulltest->location = -1;
if (opexpr)
{
Expr *or;
or = makeBoolExpr(OR_EXPR, list_make2(nulltest, opexpr), -1);
result = list_make1(or);
}
else
result = list_make1(nulltest);
}
/*
* Note that, in general, applying NOT to a constraint expression doesn't
* necessarily invert the set of rows it accepts, because NOT (NULL) is
* NULL. However, the partition constraints we construct here never
* evaluate to NULL, so applying NOT works as intended.
*/
if (spec->is_default)
{
result = list_make1(make_ands_explicit(result));
result = list_make1(makeBoolExpr(NOT_EXPR, result, -1));
}
return result;
}
/*
* get_qual_for_range
*
* Returns an implicit-AND list of expressions to use as a range partition's
* constraint, given the parent relation and partition bound structure.
*
* For a multi-column range partition key, say (a, b, c), with (al, bl, cl)
* as the lower bound tuple and (au, bu, cu) as the upper bound tuple, we
* generate an expression tree of the following form:
*
* (a IS NOT NULL) and (b IS NOT NULL) and (c IS NOT NULL)
* AND
* (a > al OR (a = al AND b > bl) OR (a = al AND b = bl AND c >= cl))
* AND
* (a < au OR (a = au AND b < bu) OR (a = au AND b = bu AND c < cu))
*
* It is often the case that a prefix of lower and upper bound tuples contains
* the same values, for example, (al = au), in which case, we will emit an
* expression tree of the following form:
*
* (a IS NOT NULL) and (b IS NOT NULL) and (c IS NOT NULL)
* AND
* (a = al)
* AND
* (b > bl OR (b = bl AND c >= cl))
* AND
* (b < bu) OR (b = bu AND c < cu))
*
* If a bound datum is either MINVALUE or MAXVALUE, these expressions are
* simplified using the fact that any value is greater than MINVALUE and less
* than MAXVALUE. So, for example, if cu = MAXVALUE, c < cu is automatically
* true, and we need not emit any expression for it, and the last line becomes
*
* (b < bu) OR (b = bu), which is simplified to (b <= bu)
*
* In most common cases with only one partition column, say a, the following
* expression tree will be generated: a IS NOT NULL AND a >= al AND a < au
*
* For default partition, it returns the negation of the constraints of all
* the other partitions.
*
* External callers should pass for_default as false; we set it to true only
* when recursing.
*/
static List *
get_qual_for_range(Relation parent, PartitionBoundSpec *spec,
bool for_default)
{
List *result = NIL;
ListCell *cell1,
*cell2,
*partexprs_item,
*partexprs_item_saved;
int i,
j;
PartitionRangeDatum *ldatum,
*udatum;
PartitionKey key = RelationGetPartitionKey(parent);
Expr *keyCol;
Const *lower_val,
*upper_val;
List *lower_or_arms,
*upper_or_arms;
int num_or_arms,
current_or_arm;
ListCell *lower_or_start_datum,
*upper_or_start_datum;
bool need_next_lower_arm,
need_next_upper_arm;
if (spec->is_default)
{
List *or_expr_args = NIL;
PartitionDesc pdesc = RelationGetPartitionDesc(parent);
Oid *inhoids = pdesc->oids;
int nparts = pdesc->nparts,
i;
for (i = 0; i < nparts; i++)
{
Oid inhrelid = inhoids[i];
HeapTuple tuple;
Datum datum;
bool isnull;
PartitionBoundSpec *bspec;
tuple = SearchSysCache1(RELOID, inhrelid);
if (!HeapTupleIsValid(tuple))
elog(ERROR, "cache lookup failed for relation %u", inhrelid);
datum = SysCacheGetAttr(RELOID, tuple,
Anum_pg_class_relpartbound,
&isnull);
if (isnull)
elog(ERROR, "null relpartbound for relation %u", inhrelid);
bspec = (PartitionBoundSpec *)
stringToNode(TextDatumGetCString(datum));
if (!IsA(bspec, PartitionBoundSpec))
elog(ERROR, "expected PartitionBoundSpec");
if (!bspec->is_default)
{
List *part_qual;
part_qual = get_qual_for_range(parent, bspec, true);
/*
* AND the constraints of the partition and add to
* or_expr_args
*/
or_expr_args = lappend(or_expr_args, list_length(part_qual) > 1
? makeBoolExpr(AND_EXPR, part_qual, -1)
: linitial(part_qual));
}
ReleaseSysCache(tuple);
}
if (or_expr_args != NIL)
{
Expr *other_parts_constr;
/*
* Combine the constraints obtained for non-default partitions
* using OR. As requested, each of the OR's args doesn't include
* the NOT NULL test for partition keys (which is to avoid its
* useless repetition). Add the same now.
*/
other_parts_constr =
makeBoolExpr(AND_EXPR,
lappend(get_range_nulltest(key),
list_length(or_expr_args) > 1
? makeBoolExpr(OR_EXPR, or_expr_args,
-1)
: linitial(or_expr_args)),
-1);
/*
* Finally, the default partition contains everything *NOT*
* contained in the non-default partitions.
*/
result = list_make1(makeBoolExpr(NOT_EXPR,
list_make1(other_parts_constr), -1));
}
return result;
}
lower_or_start_datum = list_head(spec->lowerdatums);
upper_or_start_datum = list_head(spec->upperdatums);
num_or_arms = key->partnatts;
/*
* If it is the recursive call for default, we skip the get_range_nulltest
* to avoid accumulating the NullTest on the same keys for each partition.
*/
if (!for_default)
result = get_range_nulltest(key);
/*
* Iterate over the key columns and check if the corresponding lower and
* upper datums are equal using the btree equality operator for the
* column's type. If equal, we emit single keyCol = common_value
* expression. Starting from the first column for which the corresponding
* lower and upper bound datums are not equal, we generate OR expressions
* as shown in the function's header comment.
*/
i = 0;
partexprs_item = list_head(key->partexprs);
partexprs_item_saved = partexprs_item; /* placate compiler */
forboth(cell1, spec->lowerdatums, cell2, spec->upperdatums)
{
EState *estate;
MemoryContext oldcxt;
Expr *test_expr;
ExprState *test_exprstate;
Datum test_result;
bool isNull;
ldatum = castNode(PartitionRangeDatum, lfirst(cell1));
udatum = castNode(PartitionRangeDatum, lfirst(cell2));
/*
* Since get_range_key_properties() modifies partexprs_item, and we
* might need to start over from the previous expression in the later
* part of this function, save away the current value.
*/
partexprs_item_saved = partexprs_item;
get_range_key_properties(key, i, ldatum, udatum,
&partexprs_item,
&keyCol,
&lower_val, &upper_val);
/*
* If either value is NULL, the corresponding partition bound is
* either MINVALUE or MAXVALUE, and we treat them as unequal, because
* even if they're the same, there is no common value to equate the
* key column with.
*/
if (!lower_val || !upper_val)
break;
/* Create the test expression */
estate = CreateExecutorState();
oldcxt = MemoryContextSwitchTo(estate->es_query_cxt);
test_expr = make_partition_op_expr(key, i, BTEqualStrategyNumber,
(Expr *) lower_val,
(Expr *) upper_val);
fix_opfuncids((Node *) test_expr);
test_exprstate = ExecInitExpr(test_expr, NULL);
test_result = ExecEvalExprSwitchContext(test_exprstate,
GetPerTupleExprContext(estate),
&isNull);
MemoryContextSwitchTo(oldcxt);
FreeExecutorState(estate);
/* If not equal, go generate the OR expressions */
if (!DatumGetBool(test_result))
break;
/*
* The bounds for the last key column can't be equal, because such a
* range partition would never be allowed to be defined (it would have
* an empty range otherwise).
*/
if (i == key->partnatts - 1)
elog(ERROR, "invalid range bound specification");
/* Equal, so generate keyCol = lower_val expression */
result = lappend(result,
make_partition_op_expr(key, i, BTEqualStrategyNumber,
keyCol, (Expr *) lower_val));
i++;
}
/* First pair of lower_val and upper_val that are not equal. */
lower_or_start_datum = cell1;
upper_or_start_datum = cell2;
/* OR will have as many arms as there are key columns left. */
num_or_arms = key->partnatts - i;
current_or_arm = 0;
lower_or_arms = upper_or_arms = NIL;
need_next_lower_arm = need_next_upper_arm = true;
while (current_or_arm < num_or_arms)
{
List *lower_or_arm_args = NIL,
*upper_or_arm_args = NIL;
/* Restart scan of columns from the i'th one */
j = i;
partexprs_item = partexprs_item_saved;
Represent Lists as expansible arrays, not chains of cons-cells. Originally, Postgres Lists were a more or less exact reimplementation of Lisp lists, which consist of chains of separately-allocated cons cells, each having a value and a next-cell link. We'd hacked that once before (commit d0b4399d8) to add a separate List header, but the data was still in cons cells. That makes some operations -- notably list_nth() -- O(N), and it's bulky because of the next-cell pointers and per-cell palloc overhead, and it's very cache-unfriendly if the cons cells end up scattered around rather than being adjacent. In this rewrite, we still have List headers, but the data is in a resizable array of values, with no next-cell links. Now we need at most two palloc's per List, and often only one, since we can allocate some values in the same palloc call as the List header. (Of course, extending an existing List may require repalloc's to enlarge the array. But this involves just O(log N) allocations not O(N).) Of course this is not without downsides. The key difficulty is that addition or deletion of a list entry may now cause other entries to move, which it did not before. For example, that breaks foreach() and sister macros, which historically used a pointer to the current cons-cell as loop state. We can repair those macros transparently by making their actual loop state be an integer list index; the exposed "ListCell *" pointer is no longer state carried across loop iterations, but is just a derived value. (In practice, modern compilers can optimize things back to having just one loop state value, at least for simple cases with inline loop bodies.) In principle, this is a semantics change for cases where the loop body inserts or deletes list entries ahead of the current loop index; but I found no such cases in the Postgres code. The change is not at all transparent for code that doesn't use foreach() but chases lists "by hand" using lnext(). The largest share of such code in the backend is in loops that were maintaining "prev" and "next" variables in addition to the current-cell pointer, in order to delete list cells efficiently using list_delete_cell(). However, we no longer need a previous-cell pointer to delete a list cell efficiently. Keeping a next-cell pointer doesn't work, as explained above, but we can improve matters by changing such code to use a regular foreach() loop and then using the new macro foreach_delete_current() to delete the current cell. (This macro knows how to update the associated foreach loop's state so that no cells will be missed in the traversal.) There remains a nontrivial risk of code assuming that a ListCell * pointer will remain good over an operation that could now move the list contents. To help catch such errors, list.c can be compiled with a new define symbol DEBUG_LIST_MEMORY_USAGE that forcibly moves list contents whenever that could possibly happen. This makes list operations significantly more expensive so it's not normally turned on (though it is on by default if USE_VALGRIND is on). There are two notable API differences from the previous code: * lnext() now requires the List's header pointer in addition to the current cell's address. * list_delete_cell() no longer requires a previous-cell argument. These changes are somewhat unfortunate, but on the other hand code using either function needs inspection to see if it is assuming anything it shouldn't, so it's not all bad. Programmers should be aware of these significant performance changes: * list_nth() and related functions are now O(1); so there's no major access-speed difference between a list and an array. * Inserting or deleting a list element now takes time proportional to the distance to the end of the list, due to moving the array elements. (However, it typically *doesn't* require palloc or pfree, so except in long lists it's probably still faster than before.) Notably, lcons() used to be about the same cost as lappend(), but that's no longer true if the list is long. Code that uses lcons() and list_delete_first() to maintain a stack might usefully be rewritten to push and pop at the end of the list rather than the beginning. * There are now list_insert_nth...() and list_delete_nth...() functions that add or remove a list cell identified by index. These have the data-movement penalty explained above, but there's no search penalty. * list_concat() and variants now copy the second list's data into storage belonging to the first list, so there is no longer any sharing of cells between the input lists. The second argument is now declared "const List *" to reflect that it isn't changed. This patch just does the minimum needed to get the new implementation in place and fix bugs exposed by the regression tests. As suggested by the foregoing, there's a fair amount of followup work remaining to do. Also, the ENABLE_LIST_COMPAT macros are finally removed in this commit. Code using those should have been gone a dozen years ago. Patch by me; thanks to David Rowley, Jesper Pedersen, and others for review. Discussion: https://postgr.es/m/11587.1550975080@sss.pgh.pa.us
2019-07-15 19:41:58 +02:00
for_both_cell(cell1, spec->lowerdatums, lower_or_start_datum,
cell2, spec->upperdatums, upper_or_start_datum)
{
PartitionRangeDatum *ldatum_next = NULL,
*udatum_next = NULL;
ldatum = castNode(PartitionRangeDatum, lfirst(cell1));
Represent Lists as expansible arrays, not chains of cons-cells. Originally, Postgres Lists were a more or less exact reimplementation of Lisp lists, which consist of chains of separately-allocated cons cells, each having a value and a next-cell link. We'd hacked that once before (commit d0b4399d8) to add a separate List header, but the data was still in cons cells. That makes some operations -- notably list_nth() -- O(N), and it's bulky because of the next-cell pointers and per-cell palloc overhead, and it's very cache-unfriendly if the cons cells end up scattered around rather than being adjacent. In this rewrite, we still have List headers, but the data is in a resizable array of values, with no next-cell links. Now we need at most two palloc's per List, and often only one, since we can allocate some values in the same palloc call as the List header. (Of course, extending an existing List may require repalloc's to enlarge the array. But this involves just O(log N) allocations not O(N).) Of course this is not without downsides. The key difficulty is that addition or deletion of a list entry may now cause other entries to move, which it did not before. For example, that breaks foreach() and sister macros, which historically used a pointer to the current cons-cell as loop state. We can repair those macros transparently by making their actual loop state be an integer list index; the exposed "ListCell *" pointer is no longer state carried across loop iterations, but is just a derived value. (In practice, modern compilers can optimize things back to having just one loop state value, at least for simple cases with inline loop bodies.) In principle, this is a semantics change for cases where the loop body inserts or deletes list entries ahead of the current loop index; but I found no such cases in the Postgres code. The change is not at all transparent for code that doesn't use foreach() but chases lists "by hand" using lnext(). The largest share of such code in the backend is in loops that were maintaining "prev" and "next" variables in addition to the current-cell pointer, in order to delete list cells efficiently using list_delete_cell(). However, we no longer need a previous-cell pointer to delete a list cell efficiently. Keeping a next-cell pointer doesn't work, as explained above, but we can improve matters by changing such code to use a regular foreach() loop and then using the new macro foreach_delete_current() to delete the current cell. (This macro knows how to update the associated foreach loop's state so that no cells will be missed in the traversal.) There remains a nontrivial risk of code assuming that a ListCell * pointer will remain good over an operation that could now move the list contents. To help catch such errors, list.c can be compiled with a new define symbol DEBUG_LIST_MEMORY_USAGE that forcibly moves list contents whenever that could possibly happen. This makes list operations significantly more expensive so it's not normally turned on (though it is on by default if USE_VALGRIND is on). There are two notable API differences from the previous code: * lnext() now requires the List's header pointer in addition to the current cell's address. * list_delete_cell() no longer requires a previous-cell argument. These changes are somewhat unfortunate, but on the other hand code using either function needs inspection to see if it is assuming anything it shouldn't, so it's not all bad. Programmers should be aware of these significant performance changes: * list_nth() and related functions are now O(1); so there's no major access-speed difference between a list and an array. * Inserting or deleting a list element now takes time proportional to the distance to the end of the list, due to moving the array elements. (However, it typically *doesn't* require palloc or pfree, so except in long lists it's probably still faster than before.) Notably, lcons() used to be about the same cost as lappend(), but that's no longer true if the list is long. Code that uses lcons() and list_delete_first() to maintain a stack might usefully be rewritten to push and pop at the end of the list rather than the beginning. * There are now list_insert_nth...() and list_delete_nth...() functions that add or remove a list cell identified by index. These have the data-movement penalty explained above, but there's no search penalty. * list_concat() and variants now copy the second list's data into storage belonging to the first list, so there is no longer any sharing of cells between the input lists. The second argument is now declared "const List *" to reflect that it isn't changed. This patch just does the minimum needed to get the new implementation in place and fix bugs exposed by the regression tests. As suggested by the foregoing, there's a fair amount of followup work remaining to do. Also, the ENABLE_LIST_COMPAT macros are finally removed in this commit. Code using those should have been gone a dozen years ago. Patch by me; thanks to David Rowley, Jesper Pedersen, and others for review. Discussion: https://postgr.es/m/11587.1550975080@sss.pgh.pa.us
2019-07-15 19:41:58 +02:00
if (lnext(spec->lowerdatums, cell1))
ldatum_next = castNode(PartitionRangeDatum,
Represent Lists as expansible arrays, not chains of cons-cells. Originally, Postgres Lists were a more or less exact reimplementation of Lisp lists, which consist of chains of separately-allocated cons cells, each having a value and a next-cell link. We'd hacked that once before (commit d0b4399d8) to add a separate List header, but the data was still in cons cells. That makes some operations -- notably list_nth() -- O(N), and it's bulky because of the next-cell pointers and per-cell palloc overhead, and it's very cache-unfriendly if the cons cells end up scattered around rather than being adjacent. In this rewrite, we still have List headers, but the data is in a resizable array of values, with no next-cell links. Now we need at most two palloc's per List, and often only one, since we can allocate some values in the same palloc call as the List header. (Of course, extending an existing List may require repalloc's to enlarge the array. But this involves just O(log N) allocations not O(N).) Of course this is not without downsides. The key difficulty is that addition or deletion of a list entry may now cause other entries to move, which it did not before. For example, that breaks foreach() and sister macros, which historically used a pointer to the current cons-cell as loop state. We can repair those macros transparently by making their actual loop state be an integer list index; the exposed "ListCell *" pointer is no longer state carried across loop iterations, but is just a derived value. (In practice, modern compilers can optimize things back to having just one loop state value, at least for simple cases with inline loop bodies.) In principle, this is a semantics change for cases where the loop body inserts or deletes list entries ahead of the current loop index; but I found no such cases in the Postgres code. The change is not at all transparent for code that doesn't use foreach() but chases lists "by hand" using lnext(). The largest share of such code in the backend is in loops that were maintaining "prev" and "next" variables in addition to the current-cell pointer, in order to delete list cells efficiently using list_delete_cell(). However, we no longer need a previous-cell pointer to delete a list cell efficiently. Keeping a next-cell pointer doesn't work, as explained above, but we can improve matters by changing such code to use a regular foreach() loop and then using the new macro foreach_delete_current() to delete the current cell. (This macro knows how to update the associated foreach loop's state so that no cells will be missed in the traversal.) There remains a nontrivial risk of code assuming that a ListCell * pointer will remain good over an operation that could now move the list contents. To help catch such errors, list.c can be compiled with a new define symbol DEBUG_LIST_MEMORY_USAGE that forcibly moves list contents whenever that could possibly happen. This makes list operations significantly more expensive so it's not normally turned on (though it is on by default if USE_VALGRIND is on). There are two notable API differences from the previous code: * lnext() now requires the List's header pointer in addition to the current cell's address. * list_delete_cell() no longer requires a previous-cell argument. These changes are somewhat unfortunate, but on the other hand code using either function needs inspection to see if it is assuming anything it shouldn't, so it's not all bad. Programmers should be aware of these significant performance changes: * list_nth() and related functions are now O(1); so there's no major access-speed difference between a list and an array. * Inserting or deleting a list element now takes time proportional to the distance to the end of the list, due to moving the array elements. (However, it typically *doesn't* require palloc or pfree, so except in long lists it's probably still faster than before.) Notably, lcons() used to be about the same cost as lappend(), but that's no longer true if the list is long. Code that uses lcons() and list_delete_first() to maintain a stack might usefully be rewritten to push and pop at the end of the list rather than the beginning. * There are now list_insert_nth...() and list_delete_nth...() functions that add or remove a list cell identified by index. These have the data-movement penalty explained above, but there's no search penalty. * list_concat() and variants now copy the second list's data into storage belonging to the first list, so there is no longer any sharing of cells between the input lists. The second argument is now declared "const List *" to reflect that it isn't changed. This patch just does the minimum needed to get the new implementation in place and fix bugs exposed by the regression tests. As suggested by the foregoing, there's a fair amount of followup work remaining to do. Also, the ENABLE_LIST_COMPAT macros are finally removed in this commit. Code using those should have been gone a dozen years ago. Patch by me; thanks to David Rowley, Jesper Pedersen, and others for review. Discussion: https://postgr.es/m/11587.1550975080@sss.pgh.pa.us
2019-07-15 19:41:58 +02:00
lfirst(lnext(spec->lowerdatums, cell1)));
udatum = castNode(PartitionRangeDatum, lfirst(cell2));
Represent Lists as expansible arrays, not chains of cons-cells. Originally, Postgres Lists were a more or less exact reimplementation of Lisp lists, which consist of chains of separately-allocated cons cells, each having a value and a next-cell link. We'd hacked that once before (commit d0b4399d8) to add a separate List header, but the data was still in cons cells. That makes some operations -- notably list_nth() -- O(N), and it's bulky because of the next-cell pointers and per-cell palloc overhead, and it's very cache-unfriendly if the cons cells end up scattered around rather than being adjacent. In this rewrite, we still have List headers, but the data is in a resizable array of values, with no next-cell links. Now we need at most two palloc's per List, and often only one, since we can allocate some values in the same palloc call as the List header. (Of course, extending an existing List may require repalloc's to enlarge the array. But this involves just O(log N) allocations not O(N).) Of course this is not without downsides. The key difficulty is that addition or deletion of a list entry may now cause other entries to move, which it did not before. For example, that breaks foreach() and sister macros, which historically used a pointer to the current cons-cell as loop state. We can repair those macros transparently by making their actual loop state be an integer list index; the exposed "ListCell *" pointer is no longer state carried across loop iterations, but is just a derived value. (In practice, modern compilers can optimize things back to having just one loop state value, at least for simple cases with inline loop bodies.) In principle, this is a semantics change for cases where the loop body inserts or deletes list entries ahead of the current loop index; but I found no such cases in the Postgres code. The change is not at all transparent for code that doesn't use foreach() but chases lists "by hand" using lnext(). The largest share of such code in the backend is in loops that were maintaining "prev" and "next" variables in addition to the current-cell pointer, in order to delete list cells efficiently using list_delete_cell(). However, we no longer need a previous-cell pointer to delete a list cell efficiently. Keeping a next-cell pointer doesn't work, as explained above, but we can improve matters by changing such code to use a regular foreach() loop and then using the new macro foreach_delete_current() to delete the current cell. (This macro knows how to update the associated foreach loop's state so that no cells will be missed in the traversal.) There remains a nontrivial risk of code assuming that a ListCell * pointer will remain good over an operation that could now move the list contents. To help catch such errors, list.c can be compiled with a new define symbol DEBUG_LIST_MEMORY_USAGE that forcibly moves list contents whenever that could possibly happen. This makes list operations significantly more expensive so it's not normally turned on (though it is on by default if USE_VALGRIND is on). There are two notable API differences from the previous code: * lnext() now requires the List's header pointer in addition to the current cell's address. * list_delete_cell() no longer requires a previous-cell argument. These changes are somewhat unfortunate, but on the other hand code using either function needs inspection to see if it is assuming anything it shouldn't, so it's not all bad. Programmers should be aware of these significant performance changes: * list_nth() and related functions are now O(1); so there's no major access-speed difference between a list and an array. * Inserting or deleting a list element now takes time proportional to the distance to the end of the list, due to moving the array elements. (However, it typically *doesn't* require palloc or pfree, so except in long lists it's probably still faster than before.) Notably, lcons() used to be about the same cost as lappend(), but that's no longer true if the list is long. Code that uses lcons() and list_delete_first() to maintain a stack might usefully be rewritten to push and pop at the end of the list rather than the beginning. * There are now list_insert_nth...() and list_delete_nth...() functions that add or remove a list cell identified by index. These have the data-movement penalty explained above, but there's no search penalty. * list_concat() and variants now copy the second list's data into storage belonging to the first list, so there is no longer any sharing of cells between the input lists. The second argument is now declared "const List *" to reflect that it isn't changed. This patch just does the minimum needed to get the new implementation in place and fix bugs exposed by the regression tests. As suggested by the foregoing, there's a fair amount of followup work remaining to do. Also, the ENABLE_LIST_COMPAT macros are finally removed in this commit. Code using those should have been gone a dozen years ago. Patch by me; thanks to David Rowley, Jesper Pedersen, and others for review. Discussion: https://postgr.es/m/11587.1550975080@sss.pgh.pa.us
2019-07-15 19:41:58 +02:00
if (lnext(spec->upperdatums, cell2))
udatum_next = castNode(PartitionRangeDatum,
Represent Lists as expansible arrays, not chains of cons-cells. Originally, Postgres Lists were a more or less exact reimplementation of Lisp lists, which consist of chains of separately-allocated cons cells, each having a value and a next-cell link. We'd hacked that once before (commit d0b4399d8) to add a separate List header, but the data was still in cons cells. That makes some operations -- notably list_nth() -- O(N), and it's bulky because of the next-cell pointers and per-cell palloc overhead, and it's very cache-unfriendly if the cons cells end up scattered around rather than being adjacent. In this rewrite, we still have List headers, but the data is in a resizable array of values, with no next-cell links. Now we need at most two palloc's per List, and often only one, since we can allocate some values in the same palloc call as the List header. (Of course, extending an existing List may require repalloc's to enlarge the array. But this involves just O(log N) allocations not O(N).) Of course this is not without downsides. The key difficulty is that addition or deletion of a list entry may now cause other entries to move, which it did not before. For example, that breaks foreach() and sister macros, which historically used a pointer to the current cons-cell as loop state. We can repair those macros transparently by making their actual loop state be an integer list index; the exposed "ListCell *" pointer is no longer state carried across loop iterations, but is just a derived value. (In practice, modern compilers can optimize things back to having just one loop state value, at least for simple cases with inline loop bodies.) In principle, this is a semantics change for cases where the loop body inserts or deletes list entries ahead of the current loop index; but I found no such cases in the Postgres code. The change is not at all transparent for code that doesn't use foreach() but chases lists "by hand" using lnext(). The largest share of such code in the backend is in loops that were maintaining "prev" and "next" variables in addition to the current-cell pointer, in order to delete list cells efficiently using list_delete_cell(). However, we no longer need a previous-cell pointer to delete a list cell efficiently. Keeping a next-cell pointer doesn't work, as explained above, but we can improve matters by changing such code to use a regular foreach() loop and then using the new macro foreach_delete_current() to delete the current cell. (This macro knows how to update the associated foreach loop's state so that no cells will be missed in the traversal.) There remains a nontrivial risk of code assuming that a ListCell * pointer will remain good over an operation that could now move the list contents. To help catch such errors, list.c can be compiled with a new define symbol DEBUG_LIST_MEMORY_USAGE that forcibly moves list contents whenever that could possibly happen. This makes list operations significantly more expensive so it's not normally turned on (though it is on by default if USE_VALGRIND is on). There are two notable API differences from the previous code: * lnext() now requires the List's header pointer in addition to the current cell's address. * list_delete_cell() no longer requires a previous-cell argument. These changes are somewhat unfortunate, but on the other hand code using either function needs inspection to see if it is assuming anything it shouldn't, so it's not all bad. Programmers should be aware of these significant performance changes: * list_nth() and related functions are now O(1); so there's no major access-speed difference between a list and an array. * Inserting or deleting a list element now takes time proportional to the distance to the end of the list, due to moving the array elements. (However, it typically *doesn't* require palloc or pfree, so except in long lists it's probably still faster than before.) Notably, lcons() used to be about the same cost as lappend(), but that's no longer true if the list is long. Code that uses lcons() and list_delete_first() to maintain a stack might usefully be rewritten to push and pop at the end of the list rather than the beginning. * There are now list_insert_nth...() and list_delete_nth...() functions that add or remove a list cell identified by index. These have the data-movement penalty explained above, but there's no search penalty. * list_concat() and variants now copy the second list's data into storage belonging to the first list, so there is no longer any sharing of cells between the input lists. The second argument is now declared "const List *" to reflect that it isn't changed. This patch just does the minimum needed to get the new implementation in place and fix bugs exposed by the regression tests. As suggested by the foregoing, there's a fair amount of followup work remaining to do. Also, the ENABLE_LIST_COMPAT macros are finally removed in this commit. Code using those should have been gone a dozen years ago. Patch by me; thanks to David Rowley, Jesper Pedersen, and others for review. Discussion: https://postgr.es/m/11587.1550975080@sss.pgh.pa.us
2019-07-15 19:41:58 +02:00
lfirst(lnext(spec->upperdatums, cell2)));
get_range_key_properties(key, j, ldatum, udatum,
&partexprs_item,
&keyCol,
&lower_val, &upper_val);
if (need_next_lower_arm && lower_val)
{
uint16 strategy;
/*
* For the non-last columns of this arm, use the EQ operator.
* For the last column of this arm, use GT, unless this is the
* last column of the whole bound check, or the next bound
* datum is MINVALUE, in which case use GE.
*/
if (j - i < current_or_arm)
strategy = BTEqualStrategyNumber;
else if (j == key->partnatts - 1 ||
(ldatum_next &&
ldatum_next->kind == PARTITION_RANGE_DATUM_MINVALUE))
strategy = BTGreaterEqualStrategyNumber;
else
strategy = BTGreaterStrategyNumber;
lower_or_arm_args = lappend(lower_or_arm_args,
make_partition_op_expr(key, j,
strategy,
keyCol,
(Expr *) lower_val));
}
if (need_next_upper_arm && upper_val)
{
uint16 strategy;
/*
* For the non-last columns of this arm, use the EQ operator.
* For the last column of this arm, use LT, unless the next
* bound datum is MAXVALUE, in which case use LE.
*/
if (j - i < current_or_arm)
strategy = BTEqualStrategyNumber;
else if (udatum_next &&
udatum_next->kind == PARTITION_RANGE_DATUM_MAXVALUE)
strategy = BTLessEqualStrategyNumber;
else
strategy = BTLessStrategyNumber;
upper_or_arm_args = lappend(upper_or_arm_args,
make_partition_op_expr(key, j,
strategy,
keyCol,
(Expr *) upper_val));
}
/*
* Did we generate enough of OR's arguments? First arm considers
* the first of the remaining columns, second arm considers first
* two of the remaining columns, and so on.
*/
++j;
if (j - i > current_or_arm)
{
/*
* We must not emit any more arms if the new column that will
* be considered is unbounded, or this one was.
*/
if (!lower_val || !ldatum_next ||
ldatum_next->kind != PARTITION_RANGE_DATUM_VALUE)
need_next_lower_arm = false;
if (!upper_val || !udatum_next ||
udatum_next->kind != PARTITION_RANGE_DATUM_VALUE)
need_next_upper_arm = false;
break;
}
}
if (lower_or_arm_args != NIL)
lower_or_arms = lappend(lower_or_arms,
list_length(lower_or_arm_args) > 1
? makeBoolExpr(AND_EXPR, lower_or_arm_args, -1)
: linitial(lower_or_arm_args));
if (upper_or_arm_args != NIL)
upper_or_arms = lappend(upper_or_arms,
list_length(upper_or_arm_args) > 1
? makeBoolExpr(AND_EXPR, upper_or_arm_args, -1)
: linitial(upper_or_arm_args));
/* If no work to do in the next iteration, break away. */
if (!need_next_lower_arm && !need_next_upper_arm)
break;
++current_or_arm;
}
/*
* Generate the OR expressions for each of lower and upper bounds (if
* required), and append to the list of implicitly ANDed list of
* expressions.
*/
if (lower_or_arms != NIL)
result = lappend(result,
list_length(lower_or_arms) > 1
? makeBoolExpr(OR_EXPR, lower_or_arms, -1)
: linitial(lower_or_arms));
if (upper_or_arms != NIL)
result = lappend(result,
list_length(upper_or_arms) > 1
? makeBoolExpr(OR_EXPR, upper_or_arms, -1)
: linitial(upper_or_arms));
/*
* As noted above, for non-default, we return list with constant TRUE. If
* the result is NIL during the recursive call for default, it implies
* this is the only other partition which can hold every value of the key
* except NULL. Hence we return the NullTest result skipped earlier.
*/
if (result == NIL)
result = for_default
? get_range_nulltest(key)
: list_make1(makeBoolConst(true, false));
return result;
}
/*
* get_range_key_properties
* Returns range partition key information for a given column
*
* This is a subroutine for get_qual_for_range, and its API is pretty
* specialized to that caller.
*
* Constructs an Expr for the key column (returned in *keyCol) and Consts
* for the lower and upper range limits (returned in *lower_val and
* *upper_val). For MINVALUE/MAXVALUE limits, NULL is returned instead of
* a Const. All of these structures are freshly palloc'd.
*
* *partexprs_item points to the cell containing the next expression in
* the key->partexprs list, or NULL. It may be advanced upon return.
*/
static void
get_range_key_properties(PartitionKey key, int keynum,
PartitionRangeDatum *ldatum,
PartitionRangeDatum *udatum,
ListCell **partexprs_item,
Expr **keyCol,
Const **lower_val, Const **upper_val)
{
/* Get partition key expression for this column */
if (key->partattrs[keynum] != 0)
{
*keyCol = (Expr *) makeVar(1,
key->partattrs[keynum],
key->parttypid[keynum],
key->parttypmod[keynum],
key->parttypcoll[keynum],
0);
}
else
{
if (*partexprs_item == NULL)
elog(ERROR, "wrong number of partition key expressions");
*keyCol = copyObject(lfirst(*partexprs_item));
Represent Lists as expansible arrays, not chains of cons-cells. Originally, Postgres Lists were a more or less exact reimplementation of Lisp lists, which consist of chains of separately-allocated cons cells, each having a value and a next-cell link. We'd hacked that once before (commit d0b4399d8) to add a separate List header, but the data was still in cons cells. That makes some operations -- notably list_nth() -- O(N), and it's bulky because of the next-cell pointers and per-cell palloc overhead, and it's very cache-unfriendly if the cons cells end up scattered around rather than being adjacent. In this rewrite, we still have List headers, but the data is in a resizable array of values, with no next-cell links. Now we need at most two palloc's per List, and often only one, since we can allocate some values in the same palloc call as the List header. (Of course, extending an existing List may require repalloc's to enlarge the array. But this involves just O(log N) allocations not O(N).) Of course this is not without downsides. The key difficulty is that addition or deletion of a list entry may now cause other entries to move, which it did not before. For example, that breaks foreach() and sister macros, which historically used a pointer to the current cons-cell as loop state. We can repair those macros transparently by making their actual loop state be an integer list index; the exposed "ListCell *" pointer is no longer state carried across loop iterations, but is just a derived value. (In practice, modern compilers can optimize things back to having just one loop state value, at least for simple cases with inline loop bodies.) In principle, this is a semantics change for cases where the loop body inserts or deletes list entries ahead of the current loop index; but I found no such cases in the Postgres code. The change is not at all transparent for code that doesn't use foreach() but chases lists "by hand" using lnext(). The largest share of such code in the backend is in loops that were maintaining "prev" and "next" variables in addition to the current-cell pointer, in order to delete list cells efficiently using list_delete_cell(). However, we no longer need a previous-cell pointer to delete a list cell efficiently. Keeping a next-cell pointer doesn't work, as explained above, but we can improve matters by changing such code to use a regular foreach() loop and then using the new macro foreach_delete_current() to delete the current cell. (This macro knows how to update the associated foreach loop's state so that no cells will be missed in the traversal.) There remains a nontrivial risk of code assuming that a ListCell * pointer will remain good over an operation that could now move the list contents. To help catch such errors, list.c can be compiled with a new define symbol DEBUG_LIST_MEMORY_USAGE that forcibly moves list contents whenever that could possibly happen. This makes list operations significantly more expensive so it's not normally turned on (though it is on by default if USE_VALGRIND is on). There are two notable API differences from the previous code: * lnext() now requires the List's header pointer in addition to the current cell's address. * list_delete_cell() no longer requires a previous-cell argument. These changes are somewhat unfortunate, but on the other hand code using either function needs inspection to see if it is assuming anything it shouldn't, so it's not all bad. Programmers should be aware of these significant performance changes: * list_nth() and related functions are now O(1); so there's no major access-speed difference between a list and an array. * Inserting or deleting a list element now takes time proportional to the distance to the end of the list, due to moving the array elements. (However, it typically *doesn't* require palloc or pfree, so except in long lists it's probably still faster than before.) Notably, lcons() used to be about the same cost as lappend(), but that's no longer true if the list is long. Code that uses lcons() and list_delete_first() to maintain a stack might usefully be rewritten to push and pop at the end of the list rather than the beginning. * There are now list_insert_nth...() and list_delete_nth...() functions that add or remove a list cell identified by index. These have the data-movement penalty explained above, but there's no search penalty. * list_concat() and variants now copy the second list's data into storage belonging to the first list, so there is no longer any sharing of cells between the input lists. The second argument is now declared "const List *" to reflect that it isn't changed. This patch just does the minimum needed to get the new implementation in place and fix bugs exposed by the regression tests. As suggested by the foregoing, there's a fair amount of followup work remaining to do. Also, the ENABLE_LIST_COMPAT macros are finally removed in this commit. Code using those should have been gone a dozen years ago. Patch by me; thanks to David Rowley, Jesper Pedersen, and others for review. Discussion: https://postgr.es/m/11587.1550975080@sss.pgh.pa.us
2019-07-15 19:41:58 +02:00
*partexprs_item = lnext(key->partexprs, *partexprs_item);
}
/* Get appropriate Const nodes for the bounds */
if (ldatum->kind == PARTITION_RANGE_DATUM_VALUE)
*lower_val = castNode(Const, copyObject(ldatum->value));
else
*lower_val = NULL;
if (udatum->kind == PARTITION_RANGE_DATUM_VALUE)
*upper_val = castNode(Const, copyObject(udatum->value));
else
*upper_val = NULL;
}
/*
* get_range_nulltest
*
* A non-default range partition table does not currently allow partition
* keys to be null, so emit an IS NOT NULL expression for each key column.
*/
static List *
get_range_nulltest(PartitionKey key)
{
List *result = NIL;
NullTest *nulltest;
ListCell *partexprs_item;
int i;
partexprs_item = list_head(key->partexprs);
for (i = 0; i < key->partnatts; i++)
{
Expr *keyCol;
if (key->partattrs[i] != 0)
{
keyCol = (Expr *) makeVar(1,
key->partattrs[i],
key->parttypid[i],
key->parttypmod[i],
key->parttypcoll[i],
0);
}
else
{
if (partexprs_item == NULL)
elog(ERROR, "wrong number of partition key expressions");
keyCol = copyObject(lfirst(partexprs_item));
Represent Lists as expansible arrays, not chains of cons-cells. Originally, Postgres Lists were a more or less exact reimplementation of Lisp lists, which consist of chains of separately-allocated cons cells, each having a value and a next-cell link. We'd hacked that once before (commit d0b4399d8) to add a separate List header, but the data was still in cons cells. That makes some operations -- notably list_nth() -- O(N), and it's bulky because of the next-cell pointers and per-cell palloc overhead, and it's very cache-unfriendly if the cons cells end up scattered around rather than being adjacent. In this rewrite, we still have List headers, but the data is in a resizable array of values, with no next-cell links. Now we need at most two palloc's per List, and often only one, since we can allocate some values in the same palloc call as the List header. (Of course, extending an existing List may require repalloc's to enlarge the array. But this involves just O(log N) allocations not O(N).) Of course this is not without downsides. The key difficulty is that addition or deletion of a list entry may now cause other entries to move, which it did not before. For example, that breaks foreach() and sister macros, which historically used a pointer to the current cons-cell as loop state. We can repair those macros transparently by making their actual loop state be an integer list index; the exposed "ListCell *" pointer is no longer state carried across loop iterations, but is just a derived value. (In practice, modern compilers can optimize things back to having just one loop state value, at least for simple cases with inline loop bodies.) In principle, this is a semantics change for cases where the loop body inserts or deletes list entries ahead of the current loop index; but I found no such cases in the Postgres code. The change is not at all transparent for code that doesn't use foreach() but chases lists "by hand" using lnext(). The largest share of such code in the backend is in loops that were maintaining "prev" and "next" variables in addition to the current-cell pointer, in order to delete list cells efficiently using list_delete_cell(). However, we no longer need a previous-cell pointer to delete a list cell efficiently. Keeping a next-cell pointer doesn't work, as explained above, but we can improve matters by changing such code to use a regular foreach() loop and then using the new macro foreach_delete_current() to delete the current cell. (This macro knows how to update the associated foreach loop's state so that no cells will be missed in the traversal.) There remains a nontrivial risk of code assuming that a ListCell * pointer will remain good over an operation that could now move the list contents. To help catch such errors, list.c can be compiled with a new define symbol DEBUG_LIST_MEMORY_USAGE that forcibly moves list contents whenever that could possibly happen. This makes list operations significantly more expensive so it's not normally turned on (though it is on by default if USE_VALGRIND is on). There are two notable API differences from the previous code: * lnext() now requires the List's header pointer in addition to the current cell's address. * list_delete_cell() no longer requires a previous-cell argument. These changes are somewhat unfortunate, but on the other hand code using either function needs inspection to see if it is assuming anything it shouldn't, so it's not all bad. Programmers should be aware of these significant performance changes: * list_nth() and related functions are now O(1); so there's no major access-speed difference between a list and an array. * Inserting or deleting a list element now takes time proportional to the distance to the end of the list, due to moving the array elements. (However, it typically *doesn't* require palloc or pfree, so except in long lists it's probably still faster than before.) Notably, lcons() used to be about the same cost as lappend(), but that's no longer true if the list is long. Code that uses lcons() and list_delete_first() to maintain a stack might usefully be rewritten to push and pop at the end of the list rather than the beginning. * There are now list_insert_nth...() and list_delete_nth...() functions that add or remove a list cell identified by index. These have the data-movement penalty explained above, but there's no search penalty. * list_concat() and variants now copy the second list's data into storage belonging to the first list, so there is no longer any sharing of cells between the input lists. The second argument is now declared "const List *" to reflect that it isn't changed. This patch just does the minimum needed to get the new implementation in place and fix bugs exposed by the regression tests. As suggested by the foregoing, there's a fair amount of followup work remaining to do. Also, the ENABLE_LIST_COMPAT macros are finally removed in this commit. Code using those should have been gone a dozen years ago. Patch by me; thanks to David Rowley, Jesper Pedersen, and others for review. Discussion: https://postgr.es/m/11587.1550975080@sss.pgh.pa.us
2019-07-15 19:41:58 +02:00
partexprs_item = lnext(key->partexprs, partexprs_item);
}
nulltest = makeNode(NullTest);
nulltest->arg = keyCol;
nulltest->nulltesttype = IS_NOT_NULL;
nulltest->argisrow = false;
nulltest->location = -1;
result = lappend(result, nulltest);
}
return result;
}
/*
* compute_partition_hash_value
*
* Compute the hash value for given partition key values.
*/
uint64
Collations with nondeterministic comparison This adds a flag "deterministic" to collations. If that is false, such a collation disables various optimizations that assume that strings are equal only if they are byte-wise equal. That then allows use cases such as case-insensitive or accent-insensitive comparisons or handling of strings with different Unicode normal forms. This functionality is only supported with the ICU provider. At least glibc doesn't appear to have any locales that work in a nondeterministic way, so it's not worth supporting this for the libc provider. The term "deterministic comparison" in this context is from Unicode Technical Standard #10 (https://unicode.org/reports/tr10/#Deterministic_Comparison). This patch makes changes in three areas: - CREATE COLLATION DDL changes and system catalog changes to support this new flag. - Many executor nodes and auxiliary code are extended to track collations. Previously, this code would just throw away collation information, because the eventually-called user-defined functions didn't use it since they only cared about equality, which didn't need collation information. - String data type functions that do equality comparisons and hashing are changed to take the (non-)deterministic flag into account. For comparison, this just means skipping various shortcuts and tie breakers that use byte-wise comparison. For hashing, we first need to convert the input string to a canonical "sort key" using the ICU analogue of strxfrm(). Reviewed-by: Daniel Verite <daniel@manitou-mail.org> Reviewed-by: Peter Geoghegan <pg@bowt.ie> Discussion: https://www.postgresql.org/message-id/flat/1ccc668f-4cbc-0bef-af67-450b47cdfee7@2ndquadrant.com
2019-03-22 12:09:32 +01:00
compute_partition_hash_value(int partnatts, FmgrInfo *partsupfunc, Oid *partcollation,
Datum *values, bool *isnull)
{
int i;
uint64 rowHash = 0;
Datum seed = UInt64GetDatum(HASH_PARTITION_SEED);
for (i = 0; i < partnatts; i++)
{
/* Nulls are just ignored */
if (!isnull[i])
{
Datum hash;
Assert(OidIsValid(partsupfunc[i].fn_oid));
/*
* Compute hash for each datum value by calling respective
* datatype-specific hash functions of each partition key
* attribute.
*/
hash = FunctionCall2Coll(&partsupfunc[i], partcollation[i],
values[i], seed);
/* Form a single 64-bit hash value */
rowHash = hash_combine64(rowHash, DatumGetUInt64(hash));
}
}
return rowHash;
}
/*
* satisfies_hash_partition
*
* This is an SQL-callable function for use in hash partition constraints.
* The first three arguments are the parent table OID, modulus, and remainder.
* The remaining arguments are the value of the partitioning columns (or
* expressions); these are hashed and the results are combined into a single
* hash value by calling hash_combine64.
*
* Returns true if remainder produced when this computed single hash value is
* divided by the given modulus is equal to given remainder, otherwise false.
*
* See get_qual_for_hash() for usage.
*/
Datum
satisfies_hash_partition(PG_FUNCTION_ARGS)
{
typedef struct ColumnsHashData
{
Oid relid;
int nkeys;
Oid variadic_type;
int16 variadic_typlen;
bool variadic_typbyval;
char variadic_typalign;
Oid partcollid[PARTITION_MAX_KEYS];
FmgrInfo partsupfunc[FLEXIBLE_ARRAY_MEMBER];
} ColumnsHashData;
Oid parentId;
int modulus;
int remainder;
Datum seed = UInt64GetDatum(HASH_PARTITION_SEED);
ColumnsHashData *my_extra;
uint64 rowHash = 0;
/* Return null if the parent OID, modulus, or remainder is NULL. */
if (PG_ARGISNULL(0) || PG_ARGISNULL(1) || PG_ARGISNULL(2))
PG_RETURN_NULL();
parentId = PG_GETARG_OID(0);
modulus = PG_GETARG_INT32(1);
remainder = PG_GETARG_INT32(2);
/* Sanity check modulus and remainder. */
if (modulus <= 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("modulus for hash partition must be a positive integer")));
if (remainder < 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("remainder for hash partition must be a non-negative integer")));
if (remainder >= modulus)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("remainder for hash partition must be less than modulus")));
/*
* Cache hash function information.
*/
my_extra = (ColumnsHashData *) fcinfo->flinfo->fn_extra;
if (my_extra == NULL || my_extra->relid != parentId)
{
Relation parent;
PartitionKey key;
int j;
/* Open parent relation and fetch partition key info */
parent = try_relation_open(parentId, AccessShareLock);
if (parent == NULL)
PG_RETURN_NULL();
key = RelationGetPartitionKey(parent);
/* Reject parent table that is not hash-partitioned. */
if (parent->rd_rel->relkind != RELKIND_PARTITIONED_TABLE ||
key->strategy != PARTITION_STRATEGY_HASH)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("\"%s\" is not a hash partitioned table",
get_rel_name(parentId))));
if (!get_fn_expr_variadic(fcinfo->flinfo))
{
int nargs = PG_NARGS() - 3;
/* complain if wrong number of column values */
if (key->partnatts != nargs)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("number of partitioning columns (%d) does not match number of partition keys provided (%d)",
key->partnatts, nargs)));
/* allocate space for our cache */
fcinfo->flinfo->fn_extra =
MemoryContextAllocZero(fcinfo->flinfo->fn_mcxt,
offsetof(ColumnsHashData, partsupfunc) +
sizeof(FmgrInfo) * nargs);
my_extra = (ColumnsHashData *) fcinfo->flinfo->fn_extra;
my_extra->relid = parentId;
my_extra->nkeys = key->partnatts;
memcpy(my_extra->partcollid, key->partcollation,
key->partnatts * sizeof(Oid));
/* check argument types and save fmgr_infos */
for (j = 0; j < key->partnatts; ++j)
{
Oid argtype = get_fn_expr_argtype(fcinfo->flinfo, j + 3);
if (argtype != key->parttypid[j] && !IsBinaryCoercible(argtype, key->parttypid[j]))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("column %d of the partition key has type \"%s\", but supplied value is of type \"%s\"",
j + 1, format_type_be(key->parttypid[j]), format_type_be(argtype))));
fmgr_info_copy(&my_extra->partsupfunc[j],
&key->partsupfunc[j],
fcinfo->flinfo->fn_mcxt);
}
}
else
{
ArrayType *variadic_array = PG_GETARG_ARRAYTYPE_P(3);
/* allocate space for our cache -- just one FmgrInfo in this case */
fcinfo->flinfo->fn_extra =
MemoryContextAllocZero(fcinfo->flinfo->fn_mcxt,
offsetof(ColumnsHashData, partsupfunc) +
sizeof(FmgrInfo));
my_extra = (ColumnsHashData *) fcinfo->flinfo->fn_extra;
my_extra->relid = parentId;
my_extra->nkeys = key->partnatts;
my_extra->variadic_type = ARR_ELEMTYPE(variadic_array);
get_typlenbyvalalign(my_extra->variadic_type,
&my_extra->variadic_typlen,
&my_extra->variadic_typbyval,
&my_extra->variadic_typalign);
my_extra->partcollid[0] = key->partcollation[0];
/* check argument types */
for (j = 0; j < key->partnatts; ++j)
if (key->parttypid[j] != my_extra->variadic_type)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("column %d of the partition key has type \"%s\", but supplied value is of type \"%s\"",
j + 1,
format_type_be(key->parttypid[j]),
format_type_be(my_extra->variadic_type))));
fmgr_info_copy(&my_extra->partsupfunc[0],
&key->partsupfunc[0],
fcinfo->flinfo->fn_mcxt);
}
/* Hold lock until commit */
relation_close(parent, NoLock);
}
if (!OidIsValid(my_extra->variadic_type))
{
int nkeys = my_extra->nkeys;
int i;
/*
* For a non-variadic call, neither the number of arguments nor their
* types can change across calls, so avoid the expense of rechecking
* here.
*/
for (i = 0; i < nkeys; i++)
{
Datum hash;
/* keys start from fourth argument of function. */
int argno = i + 3;
if (PG_ARGISNULL(argno))
continue;
hash = FunctionCall2Coll(&my_extra->partsupfunc[i],
my_extra->partcollid[i],
PG_GETARG_DATUM(argno),
seed);
/* Form a single 64-bit hash value */
rowHash = hash_combine64(rowHash, DatumGetUInt64(hash));
}
}
else
{
ArrayType *variadic_array = PG_GETARG_ARRAYTYPE_P(3);
int i;
int nelems;
Datum *datum;
bool *isnull;
deconstruct_array(variadic_array,
my_extra->variadic_type,
my_extra->variadic_typlen,
my_extra->variadic_typbyval,
my_extra->variadic_typalign,
&datum, &isnull, &nelems);
/* complain if wrong number of column values */
if (nelems != my_extra->nkeys)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("number of partitioning columns (%d) does not match number of partition keys provided (%d)",
my_extra->nkeys, nelems)));
for (i = 0; i < nelems; i++)
{
Datum hash;
if (isnull[i])
continue;
hash = FunctionCall2Coll(&my_extra->partsupfunc[0],
my_extra->partcollid[0],
datum[i],
seed);
/* Form a single 64-bit hash value */
rowHash = hash_combine64(rowHash, DatumGetUInt64(hash));
}
}
PG_RETURN_BOOL(rowHash % modulus == remainder);
}