*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/ipc/shmem.c,v 1.48 2000/01/26 05:56:58 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/ipc/shmem.c,v 1.49 2000/02/26 05:25:55 tgl Exp $
*
*-------------------------------------------------------------------------
*/
HTAB *
ShmemInitHash(char *name, /* table string name for shmem index */
long init_size, /* initial table size */
- long max_size, /* max size of the table (NOT USED) */
+ long max_size, /* max size of the table */
HASHCTL *infoP, /* info about key and bucket size */
int hash_flags) /* info about infoP */
{
long *location;
/*
- * Hash tables allocated in shared memory have a fixed directory; it
- * can't grow or other backends wouldn't be able to find it. The
- * segbase is for calculating pointer values. The shared memory
+ * Hash tables allocated in shared memory have a fixed directory;
+ * it can't grow or other backends wouldn't be able to find it.
+ * So, make sure we make it big enough to start with.
+ *
+ * The segbase is for calculating pointer values. The shared memory
* allocator must be specified too.
*/
- infoP->dsize = infoP->max_dsize = DEF_DIRSIZE;
+ infoP->dsize = infoP->max_dsize = hash_select_dirsize(max_size);
infoP->segbase = (long *) ShmemBase;
infoP->alloc = ShmemAlloc;
hash_flags |= HASH_SHARED_MEM | HASH_DIRSIZE;
/* look it up in the shmem index */
location = ShmemInitStruct(name,
- sizeof(HHDR) + DEF_DIRSIZE * sizeof(SEG_OFFSET),
+ sizeof(HHDR) + infoP->dsize * sizeof(SEG_OFFSET),
&found);
/*
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/hash/dynahash.c,v 1.28 2000/01/26 05:57:24 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/hash/dynahash.c,v 1.29 2000/02/26 05:25:54 tgl Exp $
*
*-------------------------------------------------------------------------
*/
{
*segp = seg_alloc(hashp);
if (*segp == (SEG_OFFSET) 0)
- {
- hash_destroy(hashp);
- return 0;
- }
+ return -1;
}
#if HASH_DEBUG
return size;
}
+/*
+ * Select an appropriate directory size for a hashtable with the given
+ * maximum number of entries.
+ * This is only needed for hashtables in shared memory, whose directories
+ * cannot be expanded dynamically.
+ * NB: assumes that all hash structure parameters have default values!
+ *
+ * XXX this had better agree with the behavior of init_htab()...
+ */
+long
+hash_select_dirsize(long num_entries)
+{
+ long nBuckets,
+ nSegments,
+ nDirEntries;
+
+ /* estimate number of buckets wanted */
+ nBuckets = 1L << my_log2((num_entries - 1) / DEF_FFACTOR + 1);
+ /* # of segments needed for nBuckets */
+ nSegments = 1L << my_log2((nBuckets - 1) / DEF_SEGSIZE + 1);
+ /* directory entries */
+ nDirEntries = DEF_DIRSIZE;
+ while (nDirEntries < nSegments)
+ nDirEntries <<= 1; /* dir_alloc doubles dsize at each call */
+
+ return nDirEntries;
+}
+
/********************** DESTROY ROUTINES ************************/
/*-------------------------------------------------------------------------
*
* hsearch.h
- * for hashing in the new buffer manager
+ * for hash tables, particularly hash tables in shared memory
*
*
* Portions Copyright (c) 1996-2000, PostgreSQL, Inc
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Id: hsearch.h,v 1.13 2000/01/26 05:58:38 momjian Exp $
+ * $Id: hsearch.h,v 1.14 2000/02/26 05:25:53 tgl Exp $
*
*-------------------------------------------------------------------------
*/
* whole lot of records per bucket or performance goes down.
*
* In a hash table allocated in shared memory, the directory cannot be
- * expanded because it must stay at a fixed address.
+ * expanded because it must stay at a fixed address. The directory size
+ * should be selected using hash_select_dirsize (and you'd better have
+ * a good idea of the maximum number of entries!). For non-shared hash
+ * tables, the initial directory size can be left at the default.
*/
#define DEF_SEGSIZE 256
-#define DEF_SEGSIZE_SHIFT 8/* log2(SEGSIZE) */
+#define DEF_SEGSIZE_SHIFT 8 /* must be log2(DEF_SEGSIZE) */
#define DEF_DIRSIZE 256
-#define DEF_FFACTOR 1/* default fill factor */
+#define DEF_FFACTOR 1 /* default fill factor */
#define PRIME1 37 /* for the hash function */
#define PRIME2 1048583
*/
typedef struct element
{
- unsigned long next; /* secret from user */
+ unsigned long next; /* secret from user */
long key;
} ELEMENT;
typedef unsigned long BUCKET_INDEX;
-/* segment is an array of bucket pointers */
+/* segment is an array of bucket pointers */
typedef BUCKET_INDEX *SEGMENT;
typedef unsigned long SEG_OFFSET;
long nsegs; /* Number of allocated segments */
long keysize; /* hash key length in bytes */
long datasize; /* elem data length in bytes */
- long max_dsize; /* 'dsize' limit if directory is fixed
- * size */
- BUCKET_INDEX freeBucketIndex;
- /* index of first free bucket */
+ long max_dsize; /* 'dsize' limit if directory is fixed size */
+ BUCKET_INDEX freeBucketIndex; /* index of first free bucket */
#ifdef HASH_STATISTICS
long accesses;
long collisions;
SEG_OFFSET *dir; /* 'directory' of segm starts */
long *(*alloc) (); /* memory allocator (long * for alignment
* reasons) */
-
} HTAB;
typedef struct hashctl
#define HASH_ALLOC 0x100 /* Setting memory allocator */
-/* seg_alloc assumes that INVALID_INDEX is 0*/
+/* seg_alloc assumes that INVALID_INDEX is 0 */
#define INVALID_INDEX (0)
#define NO_MAX_DSIZE (-1)
/* number of hash buckets allocated at once */
bool *foundPtr);
extern long *hash_seq(HTAB *hashp);
extern long hash_estimate_size(long num_entries, long keysize, long datasize);
+extern long hash_select_dirsize(long num_entries);
/*
* prototypes from functions in hashfn.c