Improve hash_create()'s API for some added robustness.

Invent a new flag bit HASH_STRINGS to specify C-string hashing, which
was formerly the default; and add assertions insisting that exactly
one of the bits HASH_STRINGS, HASH_BLOBS, and HASH_FUNCTION be set.
This is in hopes of preventing recurrences of the type of oversight
fixed in commit a1b8aa1e4 (i.e., mistakenly omitting HASH_BLOBS).

Also, when HASH_STRINGS is specified, insist that the keysize be
more than 8 bytes.  This is a heuristic, but it should catch
accidental use of HASH_STRINGS for integer or pointer keys.
(Nearly all existing use-cases set the keysize to NAMEDATALEN or
more, so there's little reason to think this restriction should
be problematic.)

Tweak hash_create() to insist that the HASH_ELEM flag be set, and
remove the defaults it had for keysize and entrysize.  Since those
defaults were undocumented and basically useless, no callers
omitted HASH_ELEM anyway.

Also, remove memset's zeroing the HASHCTL parameter struct from
those callers that had one.  This has never been really necessary,
and while it wasn't a bad coding convention it was confusing that
some callers did it and some did not.  We might as well save a few
cycles by standardizing on "not".

Also improve the documentation for hash_create().

In passing, improve reinit.c's usage of a hash table by storing
the key as a binary Oid rather than a string; and, since that's
a temporary hash table, allocate it in CurrentMemoryContext for
neatness.

Discussion: https://postgr.es/m/590625.1607878171@sss.pgh.pa.us
This commit is contained in:
Tom Lane 2020-12-15 11:38:53 -05:00
parent a58db3aa10
commit b3817f5f77
63 changed files with 112 additions and 158 deletions

View File

@ -2607,7 +2607,8 @@ createConnHash(void)
ctl.keysize = NAMEDATALEN;
ctl.entrysize = sizeof(remoteConnHashEnt);
return hash_create("Remote Con hash", NUMCONN, &ctl, HASH_ELEM);
return hash_create("Remote Con hash", NUMCONN, &ctl,
HASH_ELEM | HASH_STRINGS);
}
static void

View File

@ -567,7 +567,6 @@ pgss_shmem_startup(void)
pgss->stats.dealloc = 0;
}
memset(&info, 0, sizeof(info));
info.keysize = sizeof(pgssHashKey);
info.entrysize = sizeof(pgssEntry);
pgss_hash = ShmemInitHash("pg_stat_statements hash",

View File

@ -119,14 +119,11 @@ GetConnection(UserMapping *user, bool will_prep_stmt)
{
HASHCTL ctl;
MemSet(&ctl, 0, sizeof(ctl));
ctl.keysize = sizeof(ConnCacheKey);
ctl.entrysize = sizeof(ConnCacheEntry);
/* allocate ConnectionHash in the cache context */
ctl.hcxt = CacheMemoryContext;
ConnectionHash = hash_create("postgres_fdw connections", 8,
&ctl,
HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
HASH_ELEM | HASH_BLOBS);
/*
* Register some callback functions that manage connection cleanup.

View File

@ -93,7 +93,6 @@ InitializeShippableCache(void)
HASHCTL ctl;
/* Create the hash table. */
MemSet(&ctl, 0, sizeof(ctl));
ctl.keysize = sizeof(ShippableCacheKey);
ctl.entrysize = sizeof(ShippableCacheEntry);
ShippableCacheHash =

View File

@ -714,7 +714,6 @@ load_categories_hash(char *cats_sql, MemoryContext per_query_ctx)
MemoryContext SPIcontext;
/* initialize the category hash table */
MemSet(&ctl, 0, sizeof(ctl));
ctl.keysize = MAX_CATNAME_LEN;
ctl.entrysize = sizeof(crosstab_HashEnt);
ctl.hcxt = per_query_ctx;
@ -726,7 +725,7 @@ load_categories_hash(char *cats_sql, MemoryContext per_query_ctx)
crosstab_hash = hash_create("crosstab hash",
INIT_CATS,
&ctl,
HASH_ELEM | HASH_CONTEXT);
HASH_ELEM | HASH_STRINGS | HASH_CONTEXT);
/* Connect to SPI manager */
if ((ret = SPI_connect()) < 0)

View File

@ -76,7 +76,6 @@ gistInitBuildBuffers(int pagesPerBuffer, int levelStep, int maxLevel)
* nodeBuffersTab hash is association between index blocks and it's
* buffers.
*/
memset(&hashCtl, 0, sizeof(hashCtl));
hashCtl.keysize = sizeof(BlockNumber);
hashCtl.entrysize = sizeof(GISTNodeBuffer);
hashCtl.hcxt = CurrentMemoryContext;

View File

@ -1363,7 +1363,6 @@ _hash_finish_split(Relation rel, Buffer metabuf, Buffer obuf, Bucket obucket,
bool found;
/* Initialize hash tables used to track TIDs */
memset(&hash_ctl, 0, sizeof(hash_ctl));
hash_ctl.keysize = sizeof(ItemPointerData);
hash_ctl.entrysize = sizeof(ItemPointerData);
hash_ctl.hcxt = CurrentMemoryContext;

View File

@ -266,7 +266,6 @@ begin_heap_rewrite(Relation old_heap, Relation new_heap, TransactionId oldest_xm
state->rs_cxt = rw_cxt;
/* Initialize hash tables used to track update chains */
memset(&hash_ctl, 0, sizeof(hash_ctl));
hash_ctl.keysize = sizeof(TidHashKey);
hash_ctl.entrysize = sizeof(UnresolvedTupData);
hash_ctl.hcxt = state->rs_cxt;
@ -824,7 +823,6 @@ logical_begin_heap_rewrite(RewriteState state)
state->rs_begin_lsn = GetXLogInsertRecPtr();
state->rs_num_rewrite_mappings = 0;
memset(&hash_ctl, 0, sizeof(hash_ctl));
hash_ctl.keysize = sizeof(TransactionId);
hash_ctl.entrysize = sizeof(RewriteMappingFile);
hash_ctl.hcxt = state->rs_cxt;

View File

@ -113,7 +113,6 @@ log_invalid_page(RelFileNode node, ForkNumber forkno, BlockNumber blkno,
/* create hash table when first needed */
HASHCTL ctl;
memset(&ctl, 0, sizeof(ctl));
ctl.keysize = sizeof(xl_invalid_page_key);
ctl.entrysize = sizeof(xl_invalid_page);

View File

@ -188,7 +188,6 @@ init_enum_blacklist(void)
{
HASHCTL hash_ctl;
memset(&hash_ctl, 0, sizeof(hash_ctl));
hash_ctl.keysize = sizeof(Oid);
hash_ctl.entrysize = sizeof(Oid);
hash_ctl.hcxt = TopTransactionContext;

View File

@ -171,7 +171,6 @@ find_all_inheritors(Oid parentrelId, LOCKMODE lockmode, List **numparents)
*rel_numparents;
ListCell *l;
memset(&ctl, 0, sizeof(ctl));
ctl.keysize = sizeof(Oid);
ctl.entrysize = sizeof(SeenRelsEntry);
ctl.hcxt = CurrentMemoryContext;

View File

@ -2375,7 +2375,6 @@ AddEventToPendingNotifies(Notification *n)
ListCell *l;
/* Create the hash table */
MemSet(&hash_ctl, 0, sizeof(hash_ctl));
hash_ctl.keysize = sizeof(Notification *);
hash_ctl.entrysize = sizeof(NotificationHash);
hash_ctl.hash = notification_hash;

View File

@ -406,15 +406,13 @@ InitQueryHashTable(void)
{
HASHCTL hash_ctl;
MemSet(&hash_ctl, 0, sizeof(hash_ctl));
hash_ctl.keysize = NAMEDATALEN;
hash_ctl.entrysize = sizeof(PreparedStatement);
prepared_queries = hash_create("Prepared Queries",
32,
&hash_ctl,
HASH_ELEM);
HASH_ELEM | HASH_STRINGS);
}
/*

View File

@ -1087,7 +1087,6 @@ create_seq_hashtable(void)
{
HASHCTL ctl;
memset(&ctl, 0, sizeof(ctl));
ctl.keysize = sizeof(Oid);
ctl.entrysize = sizeof(SeqTableData);

View File

@ -521,7 +521,6 @@ ExecHashSubPlanResultRelsByOid(ModifyTableState *mtstate,
HTAB *htab;
int i;
memset(&ctl, 0, sizeof(ctl));
ctl.keysize = sizeof(Oid);
ctl.entrysize = sizeof(SubplanResultRelHashElem);
ctl.hcxt = CurrentMemoryContext;

View File

@ -47,11 +47,11 @@ RegisterExtensibleNodeEntry(HTAB **p_htable, const char *htable_label,
{
HASHCTL ctl;
memset(&ctl, 0, sizeof(HASHCTL));
ctl.keysize = EXTNODENAME_MAX_LEN;
ctl.entrysize = sizeof(ExtensibleNodeEntry);
*p_htable = hash_create(htable_label, 100, &ctl, HASH_ELEM);
*p_htable = hash_create(htable_label, 100, &ctl,
HASH_ELEM | HASH_STRINGS);
}
if (strlen(extnodename) >= EXTNODENAME_MAX_LEN)

View File

@ -1982,7 +1982,6 @@ lookup_proof_cache(Oid pred_op, Oid clause_op, bool refute_it)
/* First time through: initialize the hash table */
HASHCTL ctl;
MemSet(&ctl, 0, sizeof(ctl));
ctl.keysize = sizeof(OprProofCacheKey);
ctl.entrysize = sizeof(OprProofCacheEntry);
OprProofCacheHash = hash_create("Btree proof lookup cache", 256,

View File

@ -400,7 +400,6 @@ build_join_rel_hash(PlannerInfo *root)
ListCell *l;
/* Create the hash table */
MemSet(&hash_ctl, 0, sizeof(hash_ctl));
hash_ctl.keysize = sizeof(Relids);
hash_ctl.entrysize = sizeof(JoinHashEntry);
hash_ctl.hash = bitmap_hash;

View File

@ -999,7 +999,6 @@ find_oper_cache_entry(OprCacheKey *key)
/* First time through: initialize the hash table */
HASHCTL ctl;
MemSet(&ctl, 0, sizeof(ctl));
ctl.keysize = sizeof(OprCacheKey);
ctl.entrysize = sizeof(OprCacheEntry);
OprCacheHash = hash_create("Operator lookup cache", 256,

View File

@ -286,13 +286,13 @@ CreatePartitionDirectory(MemoryContext mcxt)
PartitionDirectory pdir;
HASHCTL ctl;
MemSet(&ctl, 0, sizeof(HASHCTL));
pdir = palloc(sizeof(PartitionDirectoryData));
pdir->pdir_mcxt = mcxt;
ctl.keysize = sizeof(Oid);
ctl.entrysize = sizeof(PartitionDirectoryEntry);
ctl.hcxt = mcxt;
pdir = palloc(sizeof(PartitionDirectoryData));
pdir->pdir_mcxt = mcxt;
pdir->pdir_hash = hash_create("partition directory", 256, &ctl,
HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);

View File

@ -2043,7 +2043,6 @@ do_autovacuum(void)
pg_class_desc = CreateTupleDescCopy(RelationGetDescr(classRel));
/* create hash table for toast <-> main relid mapping */
MemSet(&ctl, 0, sizeof(ctl));
ctl.keysize = sizeof(Oid);
ctl.entrysize = sizeof(av_relation);

View File

@ -1161,7 +1161,6 @@ CompactCheckpointerRequestQueue(void)
skip_slot = palloc0(sizeof(bool) * CheckpointerShmem->num_requests);
/* Initialize temporary hash table */
MemSet(&ctl, 0, sizeof(ctl));
ctl.keysize = sizeof(CheckpointerRequest);
ctl.entrysize = sizeof(struct CheckpointerSlotMapping);
ctl.hcxt = CurrentMemoryContext;

View File

@ -1265,7 +1265,6 @@ pgstat_collect_oids(Oid catalogid, AttrNumber anum_oid)
HeapTuple tup;
Snapshot snapshot;
memset(&hash_ctl, 0, sizeof(hash_ctl));
hash_ctl.keysize = sizeof(Oid);
hash_ctl.entrysize = sizeof(Oid);
hash_ctl.hcxt = CurrentMemoryContext;
@ -1815,7 +1814,6 @@ pgstat_init_function_usage(FunctionCallInfo fcinfo,
/* First time through - initialize function stat table */
HASHCTL hash_ctl;
memset(&hash_ctl, 0, sizeof(hash_ctl));
hash_ctl.keysize = sizeof(Oid);
hash_ctl.entrysize = sizeof(PgStat_BackendFunctionEntry);
pgStatFunctions = hash_create("Function stat entries",
@ -1975,7 +1973,6 @@ get_tabstat_entry(Oid rel_id, bool isshared)
{
HASHCTL ctl;
memset(&ctl, 0, sizeof(ctl));
ctl.keysize = sizeof(Oid);
ctl.entrysize = sizeof(TabStatHashEntry);
@ -4994,7 +4991,6 @@ reset_dbentry_counters(PgStat_StatDBEntry *dbentry)
dbentry->stat_reset_timestamp = GetCurrentTimestamp();
dbentry->stats_timestamp = 0;
memset(&hash_ctl, 0, sizeof(hash_ctl));
hash_ctl.keysize = sizeof(Oid);
hash_ctl.entrysize = sizeof(PgStat_StatTabEntry);
dbentry->tables = hash_create("Per-database table",
@ -5423,7 +5419,6 @@ pgstat_read_statsfiles(Oid onlydb, bool permanent, bool deep)
/*
* Create the DB hashtable
*/
memset(&hash_ctl, 0, sizeof(hash_ctl));
hash_ctl.keysize = sizeof(Oid);
hash_ctl.entrysize = sizeof(PgStat_StatDBEntry);
hash_ctl.hcxt = pgStatLocalContext;
@ -5608,7 +5603,6 @@ pgstat_read_statsfiles(Oid onlydb, bool permanent, bool deep)
break;
}
memset(&hash_ctl, 0, sizeof(hash_ctl));
hash_ctl.keysize = sizeof(Oid);
hash_ctl.entrysize = sizeof(PgStat_StatTabEntry);
hash_ctl.hcxt = pgStatLocalContext;

View File

@ -111,7 +111,6 @@ logicalrep_relmap_init(void)
ALLOCSET_DEFAULT_SIZES);
/* Initialize the relation hash table. */
MemSet(&ctl, 0, sizeof(ctl));
ctl.keysize = sizeof(LogicalRepRelId);
ctl.entrysize = sizeof(LogicalRepRelMapEntry);
ctl.hcxt = LogicalRepRelMapContext;
@ -120,7 +119,6 @@ logicalrep_relmap_init(void)
HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
/* Initialize the type hash table. */
MemSet(&ctl, 0, sizeof(ctl));
ctl.keysize = sizeof(Oid);
ctl.entrysize = sizeof(LogicalRepTyp);
ctl.hcxt = LogicalRepRelMapContext;
@ -606,7 +604,6 @@ logicalrep_partmap_init(void)
ALLOCSET_DEFAULT_SIZES);
/* Initialize the relation hash table. */
MemSet(&ctl, 0, sizeof(ctl));
ctl.keysize = sizeof(Oid); /* partition OID */
ctl.entrysize = sizeof(LogicalRepPartMapEntry);
ctl.hcxt = LogicalRepPartMapContext;

View File

@ -1619,8 +1619,6 @@ ReorderBufferBuildTupleCidHash(ReorderBuffer *rb, ReorderBufferTXN *txn)
if (!rbtxn_has_catalog_changes(txn) || dlist_is_empty(&txn->tuplecids))
return;
memset(&hash_ctl, 0, sizeof(hash_ctl));
hash_ctl.keysize = sizeof(ReorderBufferTupleCidKey);
hash_ctl.entrysize = sizeof(ReorderBufferTupleCidEnt);
hash_ctl.hcxt = rb->context;
@ -4116,7 +4114,6 @@ ReorderBufferToastInitHash(ReorderBuffer *rb, ReorderBufferTXN *txn)
Assert(txn->toast_hash == NULL);
memset(&hash_ctl, 0, sizeof(hash_ctl));
hash_ctl.keysize = sizeof(Oid);
hash_ctl.entrysize = sizeof(ReorderBufferToastEnt);
hash_ctl.hcxt = rb->context;

View File

@ -372,7 +372,6 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn)
{
HASHCTL ctl;
memset(&ctl, 0, sizeof(ctl));
ctl.keysize = sizeof(Oid);
ctl.entrysize = sizeof(struct tablesync_start_time_mapping);
last_start_times = hash_create("Logical replication table sync worker start times",

View File

@ -867,22 +867,18 @@ static void
init_rel_sync_cache(MemoryContext cachectx)
{
HASHCTL ctl;
MemoryContext old_ctxt;
if (RelationSyncCache != NULL)
return;
/* Make a new hash table for the cache */
MemSet(&ctl, 0, sizeof(ctl));
ctl.keysize = sizeof(Oid);
ctl.entrysize = sizeof(RelationSyncEntry);
ctl.hcxt = cachectx;
old_ctxt = MemoryContextSwitchTo(cachectx);
RelationSyncCache = hash_create("logical replication output relation cache",
128, &ctl,
HASH_ELEM | HASH_CONTEXT | HASH_BLOBS);
(void) MemoryContextSwitchTo(old_ctxt);
Assert(RelationSyncCache != NULL);

View File

@ -2505,7 +2505,6 @@ InitBufferPoolAccess(void)
memset(&PrivateRefCountArray, 0, sizeof(PrivateRefCountArray));
MemSet(&hash_ctl, 0, sizeof(hash_ctl));
hash_ctl.keysize = sizeof(int32);
hash_ctl.entrysize = sizeof(PrivateRefCountEntry);

View File

@ -465,7 +465,6 @@ InitLocalBuffers(void)
}
/* Create the lookup hash table */
MemSet(&info, 0, sizeof(info));
info.keysize = sizeof(BufferTag);
info.entrysize = sizeof(LocalBufferLookupEnt);

View File

@ -30,7 +30,7 @@ static void ResetUnloggedRelationsInDbspaceDir(const char *dbspacedirname,
typedef struct
{
char oid[OIDCHARS + 1];
Oid reloid; /* hash key */
} unlogged_relation_entry;
/*
@ -172,10 +172,11 @@ ResetUnloggedRelationsInDbspaceDir(const char *dbspacedirname, int op)
* need to be reset. Otherwise, this cleanup operation would be
* O(n^2).
*/
memset(&ctl, 0, sizeof(ctl));
ctl.keysize = sizeof(unlogged_relation_entry);
ctl.keysize = sizeof(Oid);
ctl.entrysize = sizeof(unlogged_relation_entry);
hash = hash_create("unlogged hash", 32, &ctl, HASH_ELEM);
ctl.hcxt = CurrentMemoryContext;
hash = hash_create("unlogged relation OIDs", 32, &ctl,
HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
/* Scan the directory. */
dbspace_dir = AllocateDir(dbspacedirname);
@ -198,9 +199,8 @@ ResetUnloggedRelationsInDbspaceDir(const char *dbspacedirname, int op)
* Put the OID portion of the name into the hash table, if it
* isn't already.
*/
memset(ent.oid, 0, sizeof(ent.oid));
memcpy(ent.oid, de->d_name, oidchars);
hash_search(hash, &ent, HASH_ENTER, NULL);
ent.reloid = atooid(de->d_name);
(void) hash_search(hash, &ent, HASH_ENTER, NULL);
}
/* Done with the first pass. */
@ -224,7 +224,6 @@ ResetUnloggedRelationsInDbspaceDir(const char *dbspacedirname, int op)
{
ForkNumber forkNum;
int oidchars;
bool found;
unlogged_relation_entry ent;
/* Skip anything that doesn't look like a relation data file. */
@ -238,14 +237,10 @@ ResetUnloggedRelationsInDbspaceDir(const char *dbspacedirname, int op)
/*
* See whether the OID portion of the name shows up in the hash
* table.
* table. If so, nuke it!
*/
memset(ent.oid, 0, sizeof(ent.oid));
memcpy(ent.oid, de->d_name, oidchars);
hash_search(hash, &ent, HASH_FIND, &found);
/* If so, nuke it! */
if (found)
ent.reloid = atooid(de->d_name);
if (hash_search(hash, &ent, HASH_FIND, NULL))
{
snprintf(rm_path, sizeof(rm_path), "%s/%s",
dbspacedirname, de->d_name);

View File

@ -292,7 +292,6 @@ void
InitShmemIndex(void)
{
HASHCTL info;
int hash_flags;
/*
* Create the shared memory shmem index.
@ -304,11 +303,11 @@ InitShmemIndex(void)
*/
info.keysize = SHMEM_INDEX_KEYSIZE;
info.entrysize = sizeof(ShmemIndexEnt);
hash_flags = HASH_ELEM;
ShmemIndex = ShmemInitHash("ShmemIndex",
SHMEM_INDEX_SIZE, SHMEM_INDEX_SIZE,
&info, hash_flags);
&info,
HASH_ELEM | HASH_STRINGS);
}
/*
@ -329,6 +328,11 @@ InitShmemIndex(void)
* whose maximum size is certain, this should be equal to max_size; that
* ensures that no run-time out-of-shared-memory failures can occur.
*
* *infoP and hash_flags must specify at least the entry sizes and key
* comparison semantics (see hash_create()). Flag bits and values specific
* to shared-memory hash tables are added here, except that callers may
* choose to specify HASH_PARTITION and/or HASH_FIXED_SIZE.
*
* Note: before Postgres 9.0, this function returned NULL for some failure
* cases. Now, it always throws error instead, so callers need not check
* for NULL.

View File

@ -81,7 +81,6 @@ InitRecoveryTransactionEnvironment(void)
* Initialize the hash table for tracking the list of locks held by each
* transaction.
*/
memset(&hash_ctl, 0, sizeof(hash_ctl));
hash_ctl.keysize = sizeof(TransactionId);
hash_ctl.entrysize = sizeof(RecoveryLockListsEntry);
RecoveryLockLists = hash_create("RecoveryLockLists",

View File

@ -419,7 +419,6 @@ InitLocks(void)
* Allocate hash table for LOCK structs. This stores per-locked-object
* information.
*/
MemSet(&info, 0, sizeof(info));
info.keysize = sizeof(LOCKTAG);
info.entrysize = sizeof(LOCK);
info.num_partitions = NUM_LOCK_PARTITIONS;

View File

@ -342,7 +342,6 @@ init_lwlock_stats(void)
ALLOCSET_DEFAULT_SIZES);
MemoryContextAllowInCriticalSection(lwlock_stats_cxt, true);
MemSet(&ctl, 0, sizeof(ctl));
ctl.keysize = sizeof(lwlock_stats_key);
ctl.entrysize = sizeof(lwlock_stats);
ctl.hcxt = lwlock_stats_cxt;

View File

@ -1096,7 +1096,6 @@ InitPredicateLocks(void)
* Allocate hash table for PREDICATELOCKTARGET structs. This stores
* per-predicate-lock-target information.
*/
MemSet(&info, 0, sizeof(info));
info.keysize = sizeof(PREDICATELOCKTARGETTAG);
info.entrysize = sizeof(PREDICATELOCKTARGET);
info.num_partitions = NUM_PREDICATELOCK_PARTITIONS;
@ -1129,7 +1128,6 @@ InitPredicateLocks(void)
* Allocate hash table for PREDICATELOCK structs. This stores per
* xact-lock-of-a-target information.
*/
MemSet(&info, 0, sizeof(info));
info.keysize = sizeof(PREDICATELOCKTAG);
info.entrysize = sizeof(PREDICATELOCK);
info.hash = predicatelock_hash;
@ -1212,7 +1210,6 @@ InitPredicateLocks(void)
* Allocate hash table for SERIALIZABLEXID structs. This stores per-xid
* information for serializable transactions which have accessed data.
*/
MemSet(&info, 0, sizeof(info));
info.keysize = sizeof(SERIALIZABLEXIDTAG);
info.entrysize = sizeof(SERIALIZABLEXID);
@ -1853,7 +1850,6 @@ CreateLocalPredicateLockHash(void)
/* Initialize the backend-local hash table of parent locks */
Assert(LocalPredicateLockHash == NULL);
MemSet(&hash_ctl, 0, sizeof(hash_ctl));
hash_ctl.keysize = sizeof(PREDICATELOCKTARGETTAG);
hash_ctl.entrysize = sizeof(LOCALPREDICATELOCK);
LocalPredicateLockHash = hash_create("Local predicate lock",

View File

@ -154,7 +154,6 @@ smgropen(RelFileNode rnode, BackendId backend)
/* First time through: initialize the hash table */
HASHCTL ctl;
MemSet(&ctl, 0, sizeof(ctl));
ctl.keysize = sizeof(RelFileNodeBackend);
ctl.entrysize = sizeof(SMgrRelationData);
SMgrRelationHash = hash_create("smgr relation table", 400,

View File

@ -150,7 +150,6 @@ InitSync(void)
ALLOCSET_DEFAULT_SIZES);
MemoryContextAllowInCriticalSection(pendingOpsCxt, true);
MemSet(&hash_ctl, 0, sizeof(hash_ctl));
hash_ctl.keysize = sizeof(FileTag);
hash_ctl.entrysize = sizeof(PendingFsyncEntry);
hash_ctl.hcxt = pendingOpsCxt;

View File

@ -180,7 +180,6 @@ compute_tsvector_stats(VacAttrStats *stats,
* worry about overflowing the initial size. Also we don't need to pay any
* attention to locking and memory management.
*/
MemSet(&hash_ctl, 0, sizeof(hash_ctl));
hash_ctl.keysize = sizeof(LexemeHashKey);
hash_ctl.entrysize = sizeof(TrackItem);
hash_ctl.hash = lexeme_hash;

View File

@ -277,7 +277,6 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
* worry about overflowing the initial size. Also we don't need to pay any
* attention to locking and memory management.
*/
MemSet(&elem_hash_ctl, 0, sizeof(elem_hash_ctl));
elem_hash_ctl.keysize = sizeof(Datum);
elem_hash_ctl.entrysize = sizeof(TrackItem);
elem_hash_ctl.hash = element_hash;
@ -289,7 +288,6 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT);
/* hashtable for array distinct elements counts */
MemSet(&count_hash_ctl, 0, sizeof(count_hash_ctl));
count_hash_ctl.keysize = sizeof(int);
count_hash_ctl.entrysize = sizeof(DECountItem);
count_hash_ctl.hcxt = CurrentMemoryContext;

View File

@ -3439,14 +3439,13 @@ get_json_object_as_hash(char *json, int len, const char *funcname)
JsonLexContext *lex = makeJsonLexContextCstringLen(json, len, GetDatabaseEncoding(), true);
JsonSemAction *sem;
memset(&ctl, 0, sizeof(ctl));
ctl.keysize = NAMEDATALEN;
ctl.entrysize = sizeof(JsonHashEntry);
ctl.hcxt = CurrentMemoryContext;
tab = hash_create("json object hashtable",
100,
&ctl,
HASH_ELEM | HASH_CONTEXT);
HASH_ELEM | HASH_STRINGS | HASH_CONTEXT);
state = palloc0(sizeof(JHashState));
sem = palloc0(sizeof(JsonSemAction));
@ -3831,14 +3830,13 @@ populate_recordset_object_start(void *state)
return;
/* Object at level 1: set up a new hash table for this object */
memset(&ctl, 0, sizeof(ctl));
ctl.keysize = NAMEDATALEN;
ctl.entrysize = sizeof(JsonHashEntry);
ctl.hcxt = CurrentMemoryContext;
_state->json_hash = hash_create("json object hashtable",
100,
&ctl,
HASH_ELEM | HASH_CONTEXT);
HASH_ELEM | HASH_STRINGS | HASH_CONTEXT);
}
static void

View File

@ -1297,7 +1297,6 @@ lookup_collation_cache(Oid collation, bool set_flags)
/* First time through, initialize the hash table */
HASHCTL ctl;
memset(&ctl, 0, sizeof(ctl));
ctl.keysize = sizeof(Oid);
ctl.entrysize = sizeof(collation_cache_entry);
collation_cache = hash_create("Collation cache", 100, &ctl,

View File

@ -2540,7 +2540,6 @@ ri_InitHashTables(void)
{
HASHCTL ctl;
memset(&ctl, 0, sizeof(ctl));
ctl.keysize = sizeof(Oid);
ctl.entrysize = sizeof(RI_ConstraintInfo);
ri_constraint_cache = hash_create("RI constraint cache",
@ -2552,14 +2551,12 @@ ri_InitHashTables(void)
InvalidateConstraintCacheCallBack,
(Datum) 0);
memset(&ctl, 0, sizeof(ctl));
ctl.keysize = sizeof(RI_QueryKey);
ctl.entrysize = sizeof(RI_QueryHashEntry);
ri_query_cache = hash_create("RI query cache",
RI_INIT_QUERYHASHSIZE,
&ctl, HASH_ELEM | HASH_BLOBS);
memset(&ctl, 0, sizeof(ctl));
ctl.keysize = sizeof(RI_CompareKey);
ctl.entrysize = sizeof(RI_CompareHashEntry);
ri_compare_cache = hash_create("RI compare cache",

View File

@ -3464,14 +3464,14 @@ set_rtable_names(deparse_namespace *dpns, List *parent_namespaces,
* We use a hash table to hold known names, so that this process is O(N)
* not O(N^2) for N names.
*/
MemSet(&hash_ctl, 0, sizeof(hash_ctl));
hash_ctl.keysize = NAMEDATALEN;
hash_ctl.entrysize = sizeof(NameHashEntry);
hash_ctl.hcxt = CurrentMemoryContext;
names_hash = hash_create("set_rtable_names names",
list_length(dpns->rtable),
&hash_ctl,
HASH_ELEM | HASH_CONTEXT);
HASH_ELEM | HASH_STRINGS | HASH_CONTEXT);
/* Preload the hash table with names appearing in parent_namespaces */
foreach(lc, parent_namespaces)
{

View File

@ -79,7 +79,6 @@ InitializeAttoptCache(void)
HASHCTL ctl;
/* Initialize the hash table. */
MemSet(&ctl, 0, sizeof(ctl));
ctl.keysize = sizeof(AttoptCacheKey);
ctl.entrysize = sizeof(AttoptCacheEntry);
AttoptCacheHash =

View File

@ -118,7 +118,6 @@ BuildEventTriggerCache(void)
EventTriggerCacheState = ETCS_REBUILD_STARTED;
/* Create new hash table. */
MemSet(&ctl, 0, sizeof(ctl));
ctl.keysize = sizeof(EventTriggerEvent);
ctl.entrysize = sizeof(EventTriggerCacheEntry);
ctl.hcxt = EventTriggerCacheContext;

View File

@ -1607,7 +1607,6 @@ LookupOpclassInfo(Oid operatorClassOid,
/* First time through: initialize the opclass cache */
HASHCTL ctl;
MemSet(&ctl, 0, sizeof(ctl));
ctl.keysize = sizeof(Oid);
ctl.entrysize = sizeof(OpClassCacheEnt);
OpClassCache = hash_create("Operator class cache", 64,
@ -3775,7 +3774,6 @@ RelationCacheInitialize(void)
/*
* create hashtable that indexes the relcache
*/
MemSet(&ctl, 0, sizeof(ctl));
ctl.keysize = sizeof(Oid);
ctl.entrysize = sizeof(RelIdCacheEnt);
RelationIdCache = hash_create("Relcache by OID", INITRELCACHESIZE,

View File

@ -110,17 +110,15 @@ InitializeRelfilenodeMap(void)
relfilenode_skey[0].sk_attno = Anum_pg_class_reltablespace;
relfilenode_skey[1].sk_attno = Anum_pg_class_relfilenode;
/* Initialize the hash table. */
MemSet(&ctl, 0, sizeof(ctl));
ctl.keysize = sizeof(RelfilenodeMapKey);
ctl.entrysize = sizeof(RelfilenodeMapEntry);
ctl.hcxt = CacheMemoryContext;
/*
* Only create the RelfilenodeMapHash now, so we don't end up partially
* initialized when fmgr_info_cxt() above ERRORs out with an out of memory
* error.
*/
ctl.keysize = sizeof(RelfilenodeMapKey);
ctl.entrysize = sizeof(RelfilenodeMapEntry);
ctl.hcxt = CacheMemoryContext;
RelfilenodeMapHash =
hash_create("RelfilenodeMap cache", 64, &ctl,
HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);

View File

@ -79,7 +79,6 @@ InitializeTableSpaceCache(void)
HASHCTL ctl;
/* Initialize the hash table. */
MemSet(&ctl, 0, sizeof(ctl));
ctl.keysize = sizeof(Oid);
ctl.entrysize = sizeof(TableSpaceCacheEntry);
TableSpaceCacheHash =

View File

@ -117,7 +117,6 @@ lookup_ts_parser_cache(Oid prsId)
/* First time through: initialize the hash table */
HASHCTL ctl;
MemSet(&ctl, 0, sizeof(ctl));
ctl.keysize = sizeof(Oid);
ctl.entrysize = sizeof(TSParserCacheEntry);
TSParserCacheHash = hash_create("Tsearch parser cache", 4,
@ -215,7 +214,6 @@ lookup_ts_dictionary_cache(Oid dictId)
/* First time through: initialize the hash table */
HASHCTL ctl;
MemSet(&ctl, 0, sizeof(ctl));
ctl.keysize = sizeof(Oid);
ctl.entrysize = sizeof(TSDictionaryCacheEntry);
TSDictionaryCacheHash = hash_create("Tsearch dictionary cache", 8,
@ -365,7 +363,6 @@ init_ts_config_cache(void)
{
HASHCTL ctl;
MemSet(&ctl, 0, sizeof(ctl));
ctl.keysize = sizeof(Oid);
ctl.entrysize = sizeof(TSConfigCacheEntry);
TSConfigCacheHash = hash_create("Tsearch configuration cache", 16,

View File

@ -341,7 +341,6 @@ lookup_type_cache(Oid type_id, int flags)
/* First time through: initialize the hash table */
HASHCTL ctl;
MemSet(&ctl, 0, sizeof(ctl));
ctl.keysize = sizeof(Oid);
ctl.entrysize = sizeof(TypeCacheEntry);
TypeCacheHash = hash_create("Type information cache", 64,
@ -1874,7 +1873,6 @@ assign_record_type_typmod(TupleDesc tupDesc)
/* First time through: initialize the hash table */
HASHCTL ctl;
MemSet(&ctl, 0, sizeof(ctl));
ctl.keysize = sizeof(TupleDesc); /* just the pointer */
ctl.entrysize = sizeof(RecordCacheEntry);
ctl.hash = record_type_typmod_hash;

View File

@ -680,13 +680,12 @@ find_rendezvous_variable(const char *varName)
{
HASHCTL ctl;
MemSet(&ctl, 0, sizeof(ctl));
ctl.keysize = NAMEDATALEN;
ctl.entrysize = sizeof(rendezvousHashEntry);
rendezvousHash = hash_create("Rendezvous variable hash",
16,
&ctl,
HASH_ELEM);
HASH_ELEM | HASH_STRINGS);
}
/* Find or create the hashtable entry for this varName */

View File

@ -565,7 +565,6 @@ record_C_func(HeapTuple procedureTuple,
{
HASHCTL hash_ctl;
MemSet(&hash_ctl, 0, sizeof(hash_ctl));
hash_ctl.keysize = sizeof(Oid);
hash_ctl.entrysize = sizeof(CFuncHashTabEntry);
CFuncHash = hash_create("CFuncHash",

View File

@ -30,11 +30,12 @@
* dynahash.c provides support for these types of lookup keys:
*
* 1. Null-terminated C strings (truncated if necessary to fit in keysize),
* compared as though by strcmp(). This is the default behavior.
* compared as though by strcmp(). This is selected by specifying the
* HASH_STRINGS flag to hash_create.
*
* 2. Arbitrary binary data of size keysize, compared as though by memcmp().
* (Caller must ensure there are no undefined padding bits in the keys!)
* This is selected by specifying HASH_BLOBS flag to hash_create.
* This is selected by specifying the HASH_BLOBS flag to hash_create.
*
* 3. More complex key behavior can be selected by specifying user-supplied
* hashing, comparison, and/or key-copying functions. At least a hashing
@ -47,8 +48,8 @@
* locks.
* - Shared memory hashes are allocated in a fixed size area at startup and
* are discoverable by name from other processes.
* - Because entries don't need to be moved in the case of hash conflicts, has
* better performance for large entries
* - Because entries don't need to be moved in the case of hash conflicts,
* dynahash has better performance for large entries.
* - Guarantees stable pointers to entries.
*
* Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
@ -316,6 +317,28 @@ string_compare(const char *key1, const char *key2, Size keysize)
* *info: additional table parameters, as indicated by flags
* flags: bitmask indicating which parameters to take from *info
*
* The flags value *must* include HASH_ELEM. (Formerly, this was nominally
* optional, but the default keysize and entrysize values were useless.)
* The flags value must also include exactly one of HASH_STRINGS, HASH_BLOBS,
* or HASH_FUNCTION, to define the key hashing semantics (C strings,
* binary blobs, or custom, respectively). Callers specifying a custom
* hash function will likely also want to use HASH_COMPARE, and perhaps
* also HASH_KEYCOPY, to control key comparison and copying.
* Another often-used flag is HASH_CONTEXT, to allocate the hash table
* under info->hcxt rather than under TopMemoryContext; the default
* behavior is only suitable for session-lifespan hash tables.
* Other flags bits are special-purpose and seldom used, except for those
* associated with shared-memory hash tables, for which see ShmemInitHash().
*
* Fields in *info are read only when the associated flags bit is set.
* It is not necessary to initialize other fields of *info.
* Neither tabname nor *info need persist after the hash_create() call.
*
* Note: It is deprecated for callers of hash_create() to explicitly specify
* string_hash, tag_hash, uint32_hash, or oid_hash. Just set HASH_STRINGS or
* HASH_BLOBS. Use HASH_FUNCTION only when you want something other than
* one of these.
*
* Note: for a shared-memory hashtable, nelem needs to be a pretty good
* estimate, since we can't expand the table on the fly. But an unshared
* hashtable can be expanded on-the-fly, so it's better for nelem to be
@ -323,11 +346,19 @@ string_compare(const char *key1, const char *key2, Size keysize)
* large nelem will penalize hash_seq_search speed without buying much.
*/
HTAB *
hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
{
HTAB *hashp;
HASHHDR *hctl;
/*
* Hash tables now allocate space for key and data, but you have to say
* how much space to allocate.
*/
Assert(flags & HASH_ELEM);
Assert(info->keysize > 0);
Assert(info->entrysize >= info->keysize);
/*
* For shared hash tables, we have a local hash header (HTAB struct) that
* we allocate in TopMemoryContext; all else is in shared memory.
@ -370,28 +401,43 @@ hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
* Select the appropriate hash function (see comments at head of file).
*/
if (flags & HASH_FUNCTION)
{
Assert(!(flags & (HASH_BLOBS | HASH_STRINGS)));
hashp->hash = info->hash;
}
else if (flags & HASH_BLOBS)
{
Assert(!(flags & HASH_STRINGS));
/* We can optimize hashing for common key sizes */
Assert(flags & HASH_ELEM);
if (info->keysize == sizeof(uint32))
hashp->hash = uint32_hash;
else
hashp->hash = tag_hash;
}
else
hashp->hash = string_hash; /* default hash function */
{
/*
* string_hash used to be considered the default hash method, and in a
* non-assert build it effectively still is. But we now consider it
* an assertion error to not say HASH_STRINGS explicitly. To help
* catch mistaken usage of HASH_STRINGS, we also insist on a
* reasonably long string length: if the keysize is only 4 or 8 bytes,
* it's almost certainly an integer or pointer not a string.
*/
Assert(flags & HASH_STRINGS);
Assert(info->keysize > 8);
hashp->hash = string_hash;
}
/*
* If you don't specify a match function, it defaults to string_compare if
* you used string_hash (either explicitly or by default) and to memcmp
* otherwise.
* you used string_hash, and to memcmp otherwise.
*
* Note: explicitly specifying string_hash is deprecated, because this
* might not work for callers in loadable modules on some platforms due to
* referencing a trampoline instead of the string_hash function proper.
* Just let it default, eh?
* Specify HASH_STRINGS instead.
*/
if (flags & HASH_COMPARE)
hashp->match = info->match;
@ -505,16 +551,9 @@ hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
hctl->dsize = info->dsize;
}
/*
* hash table now allocates space for key and data but you have to say how
* much space to allocate
*/
if (flags & HASH_ELEM)
{
Assert(info->entrysize >= info->keysize);
hctl->keysize = info->keysize;
hctl->entrysize = info->entrysize;
}
/* remember the entry sizes, too */
hctl->keysize = info->keysize;
hctl->entrysize = info->entrysize;
/* make local copies of heavily-used constant fields */
hashp->keysize = hctl->keysize;
@ -593,10 +632,6 @@ hdefault(HTAB *hashp)
hctl->dsize = DEF_DIRSIZE;
hctl->nsegs = 0;
/* rather pointless defaults for key & entry size */
hctl->keysize = sizeof(char *);
hctl->entrysize = 2 * sizeof(char *);
hctl->num_partitions = 0; /* not partitioned */
/* table has no fixed maximum size */

View File

@ -119,7 +119,7 @@ EnablePortalManager(void)
* create, initially
*/
PortalHashTable = hash_create("Portal hash", PORTALS_PER_USER,
&ctl, HASH_ELEM);
&ctl, HASH_ELEM | HASH_STRINGS);
}
/*

View File

@ -223,7 +223,6 @@ GetComboCommandId(CommandId cmin, CommandId cmax)
sizeComboCids = CCID_ARRAY_SIZE;
usedComboCids = 0;
memset(&hash_ctl, 0, sizeof(hash_ctl));
hash_ctl.keysize = sizeof(ComboCidKeyData);
hash_ctl.entrysize = sizeof(ComboCidEntryData);
hash_ctl.hcxt = TopTransactionContext;

View File

@ -64,25 +64,36 @@ typedef struct HTAB HTAB;
/* Only those fields indicated by hash_flags need be set */
typedef struct HASHCTL
{
/* Used if HASH_PARTITION flag is set: */
long num_partitions; /* # partitions (must be power of 2) */
/* Used if HASH_SEGMENT flag is set: */
long ssize; /* segment size */
/* Used if HASH_DIRSIZE flag is set: */
long dsize; /* (initial) directory size */
long max_dsize; /* limit to dsize if dir size is limited */
/* Used if HASH_ELEM flag is set (which is now required): */
Size keysize; /* hash key length in bytes */
Size entrysize; /* total user element size in bytes */
/* Used if HASH_FUNCTION flag is set: */
HashValueFunc hash; /* hash function */
/* Used if HASH_COMPARE flag is set: */
HashCompareFunc match; /* key comparison function */
/* Used if HASH_KEYCOPY flag is set: */
HashCopyFunc keycopy; /* key copying function */
/* Used if HASH_ALLOC flag is set: */
HashAllocFunc alloc; /* memory allocator */
/* Used if HASH_CONTEXT flag is set: */
MemoryContext hcxt; /* memory context to use for allocations */
/* Used if HASH_SHARED_MEM flag is set: */
HASHHDR *hctl; /* location of header in shared mem */
} HASHCTL;
/* Flags to indicate which parameters are supplied */
/* Flag bits for hash_create; most indicate which parameters are supplied */
#define HASH_PARTITION 0x0001 /* Hashtable is used w/partitioned locking */
#define HASH_SEGMENT 0x0002 /* Set segment size */
#define HASH_DIRSIZE 0x0004 /* Set directory size (initial and max) */
#define HASH_ELEM 0x0010 /* Set keysize and entrysize */
#define HASH_ELEM 0x0008 /* Set keysize and entrysize (now required!) */
#define HASH_STRINGS 0x0010 /* Select support functions for string keys */
#define HASH_BLOBS 0x0020 /* Select support functions for binary keys */
#define HASH_FUNCTION 0x0040 /* Set user defined hash function */
#define HASH_COMPARE 0x0080 /* Set user defined comparison function */
@ -93,7 +104,6 @@ typedef struct HASHCTL
#define HASH_ATTACH 0x1000 /* Do not initialize hctl */
#define HASH_FIXED_SIZE 0x2000 /* Initial size is a hard limit */
/* max_dsize value to indicate expansible directory */
#define NO_MAX_DSIZE (-1)
@ -116,13 +126,9 @@ typedef struct
/*
* prototypes for functions in dynahash.c
*
* Note: It is deprecated for callers of hash_create to explicitly specify
* string_hash, tag_hash, uint32_hash, or oid_hash. Just set HASH_BLOBS or
* not. Use HASH_FUNCTION only when you want something other than those.
*/
extern HTAB *hash_create(const char *tabname, long nelem,
HASHCTL *info, int flags);
const HASHCTL *info, int flags);
extern void hash_destroy(HTAB *hashp);
extern void hash_stats(const char *where, HTAB *hashp);
extern void *hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action,

View File

@ -458,7 +458,6 @@ _PG_init(void)
/*
* Create hash tables.
*/
memset(&hash_ctl, 0, sizeof(hash_ctl));
hash_ctl.keysize = sizeof(Oid);
hash_ctl.entrysize = sizeof(plperl_interp_desc);
plperl_interp_hash = hash_create("PL/Perl interpreters",
@ -466,7 +465,6 @@ _PG_init(void)
&hash_ctl,
HASH_ELEM | HASH_BLOBS);
memset(&hash_ctl, 0, sizeof(hash_ctl));
hash_ctl.keysize = sizeof(plperl_proc_key);
hash_ctl.entrysize = sizeof(plperl_proc_ptr);
plperl_proc_hash = hash_create("PL/Perl procedures",
@ -580,13 +578,12 @@ select_perl_context(bool trusted)
{
HASHCTL hash_ctl;
memset(&hash_ctl, 0, sizeof(hash_ctl));
hash_ctl.keysize = NAMEDATALEN;
hash_ctl.entrysize = sizeof(plperl_query_entry);
interp_desc->query_hash = hash_create("PL/Perl queries",
32,
&hash_ctl,
HASH_ELEM);
HASH_ELEM | HASH_STRINGS);
}
/*

View File

@ -2567,7 +2567,6 @@ plpgsql_HashTableInit(void)
/* don't allow double-initialization */
Assert(plpgsql_HashTable == NULL);
memset(&ctl, 0, sizeof(ctl));
ctl.keysize = sizeof(PLpgSQL_func_hashkey);
ctl.entrysize = sizeof(plpgsql_HashEnt);
plpgsql_HashTable = hash_create("PLpgSQL function hash",

View File

@ -4058,7 +4058,6 @@ plpgsql_estate_setup(PLpgSQL_execstate *estate,
{
estate->simple_eval_estate = simple_eval_estate;
/* Private cast hash just lives in function's main context */
memset(&ctl, 0, sizeof(ctl));
ctl.keysize = sizeof(plpgsql_CastHashKey);
ctl.entrysize = sizeof(plpgsql_CastHashEntry);
ctl.hcxt = CurrentMemoryContext;
@ -4077,7 +4076,6 @@ plpgsql_estate_setup(PLpgSQL_execstate *estate,
shared_cast_context = AllocSetContextCreate(TopMemoryContext,
"PLpgSQL cast info",
ALLOCSET_DEFAULT_SIZES);
memset(&ctl, 0, sizeof(ctl));
ctl.keysize = sizeof(plpgsql_CastHashKey);
ctl.entrysize = sizeof(plpgsql_CastHashEntry);
ctl.hcxt = shared_cast_context;

View File

@ -214,7 +214,6 @@ PLy_add_exceptions(PyObject *plpy)
PLy_exc_spi_error = PLy_create_exception("plpy.SPIError", NULL, NULL,
"SPIError", plpy);
memset(&hash_ctl, 0, sizeof(hash_ctl));
hash_ctl.keysize = sizeof(int);
hash_ctl.entrysize = sizeof(PLyExceptionEntry);
PLy_spi_exceptions = hash_create("PL/Python SPI exceptions", 256,

View File

@ -34,7 +34,6 @@ init_procedure_caches(void)
{
HASHCTL hash_ctl;
memset(&hash_ctl, 0, sizeof(hash_ctl));
hash_ctl.keysize = sizeof(PLyProcedureKey);
hash_ctl.entrysize = sizeof(PLyProcedureEntry);
PLy_procedure_cache = hash_create("PL/Python procedures", 32, &hash_ctl,

View File

@ -439,7 +439,6 @@ _PG_init(void)
/************************************************************
* Create the hash table for working interpreters
************************************************************/
memset(&hash_ctl, 0, sizeof(hash_ctl));
hash_ctl.keysize = sizeof(Oid);
hash_ctl.entrysize = sizeof(pltcl_interp_desc);
pltcl_interp_htab = hash_create("PL/Tcl interpreters",
@ -450,7 +449,6 @@ _PG_init(void)
/************************************************************
* Create the hash table for function lookup
************************************************************/
memset(&hash_ctl, 0, sizeof(hash_ctl));
hash_ctl.keysize = sizeof(pltcl_proc_key);
hash_ctl.entrysize = sizeof(pltcl_proc_ptr);
pltcl_proc_htab = hash_create("PL/Tcl functions",

View File

@ -203,15 +203,13 @@ init_timezone_hashtable(void)
{
HASHCTL hash_ctl;
MemSet(&hash_ctl, 0, sizeof(hash_ctl));
hash_ctl.keysize = TZ_STRLEN_MAX + 1;
hash_ctl.entrysize = sizeof(pg_tz_cache);
timezone_cache = hash_create("Timezones",
4,
&hash_ctl,
HASH_ELEM);
HASH_ELEM | HASH_STRINGS);
if (!timezone_cache)
return false;