Path: blob/main/crypto/krb5/src/plugins/kdb/db2/libdb2/hash/hash.c
34928 views
/*-1* Copyright (c) 1990, 1993, 19942* The Regents of the University of California. All rights reserved.3*4* This code is derived from software contributed to Berkeley by5* Margo Seltzer.6*7* Redistribution and use in source and binary forms, with or without8* modification, are permitted provided that the following conditions9* are met:10* 1. Redistributions of source code must retain the above copyright11* notice, this list of conditions and the following disclaimer.12* 2. Redistributions in binary form must reproduce the above copyright13* notice, this list of conditions and the following disclaimer in the14* documentation and/or other materials provided with the distribution.15* 3. All advertising materials mentioning features or use of this software16* must display the following acknowledgement:17* This product includes software developed by the University of18* California, Berkeley and its contributors.19* 4. Neither the name of the University nor the names of its contributors20* may be used to endorse or promote products derived from this software21* without specific prior written permission.22*23* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND24* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE25* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE26* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE27* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL28* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS29* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)30* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT31* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY32* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF33* SUCH DAMAGE.34*/3536#if defined(LIBC_SCCS) && !defined(lint)37static char sccsid[] = "@(#)hash.c 8.12 (Berkeley) 11/7/95";38#endif /* LIBC_SCCS and not lint */3940#include <sys/param.h>41#include <sys/stat.h>4243#include <errno.h>44#include <fcntl.h>45#include <stdio.h>46#include <stdlib.h>47#include <string.h>48#include <unistd.h>49#ifdef DEBUG50#include <assert.h>51#endif5253#include "db-int.h"54#include "hash.h"55#include "page.h"56#include "extern.h"5758static int32_t flush_meta __P((HTAB *));59static int32_t hash_access __P((HTAB *, ACTION, const DBT *, DBT *));60static int32_t hash_close __P((DB *));61static int32_t hash_delete __P((const DB *, const DBT *, u_int32_t));62static int32_t hash_fd __P((const DB *));63static int32_t hash_get __P((const DB *, const DBT *, DBT *, u_int32_t));64static int32_t hash_put __P((const DB *, DBT *, const DBT *, u_int32_t));65static int32_t hash_seq __P((const DB *, DBT *, DBT *, u_int32_t));66static int32_t hash_sync __P((const DB *, u_int32_t));67static int32_t hdestroy __P((HTAB *));68static int32_t cursor_get __P((const DB *, CURSOR *, DBT *, DBT *, \69u_int32_t));70static int32_t cursor_delete __P((const DB *, CURSOR *, u_int32_t));71static HTAB *init_hash __P((HTAB *, const char *, const HASHINFO *));72static int32_t init_htab __P((HTAB *, int32_t));73#if DB_BYTE_ORDER == DB_LITTLE_ENDIAN74static void swap_header __P((HTAB *));75static void swap_header_copy __P((HASHHDR *, HASHHDR *));76#endif77static u_int32_t hget_header __P((HTAB *, u_int32_t));78static void hput_header __P((HTAB *));7980#define RETURN_ERROR(ERR, LOC) { save_errno = ERR; goto LOC; }8182/* Return values */83#define SUCCESS (0)84#define ERROR (-1)85#define ABNORMAL (1)8687#ifdef HASH_STATISTICS88u_int32_t hash_accesses, hash_collisions, hash_expansions, hash_overflows,89hash_bigpages;90#endif9192/************************** INTERFACE ROUTINES ***************************/93/* OPEN/CLOSE */9495extern DB *96__kdb2_hash_open(const char *file, int flags, int mode, const HASHINFO *info,97int dflags)98{99struct stat statbuf;100DB *dbp;101DBT mpool_key;102HTAB *hashp;103int32_t bpages, csize, new_table, save_errno;104105if (!file || (flags & O_ACCMODE) == O_WRONLY) {106errno = EINVAL;107return (NULL);108}109if (!(hashp = (HTAB *)calloc(1, sizeof(HTAB))))110return (NULL);111hashp->fp = -1;112/*113* Even if user wants write only, we need to be able to read114* the actual file, so we need to open it read/write. But, the115* field in the hashp structure needs to be accurate so that116* we can check accesses.117*/118hashp->flags = flags;119hashp->save_file = hashp->flags & O_RDWR;120121new_table = 0;122if (!file || (flags & O_TRUNC) ||123(stat(file, &statbuf) && (errno == ENOENT))) {124if (errno == ENOENT)125errno = 0; /* In case someone looks at errno. */126new_table = 1;127}128if (file) {129if ((hashp->fp = open(file, flags|O_BINARY, mode)) == -1)130RETURN_ERROR(errno, error0);131(void)fcntl(hashp->fp, F_SETFD, 1);132}133134/* Process arguments to set up hash table header. */135if (new_table) {136if (!(hashp = init_hash(hashp, file, info)))137RETURN_ERROR(errno, error1);138} else {139/* Table already exists */140if (info && info->hash)141hashp->hash = info->hash;142else143hashp->hash = __default_hash;144145/* copy metadata from page into header */146if (hget_header(hashp,147(info && info->bsize ? info->bsize : DEF_BUCKET_SIZE)) !=148sizeof(HASHHDR))149RETURN_ERROR(EFTYPE, error1);150151/* Verify file type, versions and hash function */152if (hashp->hdr.magic != HASHMAGIC)153RETURN_ERROR(EFTYPE, error1);154#define OLDHASHVERSION 1155if (hashp->hdr.version != HASHVERSION &&156hashp->hdr.version != OLDHASHVERSION)157RETURN_ERROR(EFTYPE, error1);158if (hashp->hash(CHARKEY, sizeof(CHARKEY))159!= hashp->hdr.h_charkey)160RETURN_ERROR(EFTYPE, error1);161/*162* Figure out how many segments we need. Max_Bucket is the163* maximum bucket number, so the number of buckets is164* max_bucket + 1.165*/166167/* Read in bitmaps */168bpages = (hashp->hdr.spares[hashp->hdr.ovfl_point] +169(hashp->hdr.bsize << BYTE_SHIFT) - 1) >>170(hashp->hdr.bshift + BYTE_SHIFT);171172hashp->nmaps = bpages;173(void)memset(&hashp->mapp[0], 0, bpages * sizeof(u_int32_t *));174}175176/* start up mpool */177mpool_key.data = (u_int8_t *)file;178mpool_key.size = strlen(file);179180if (info && info->cachesize)181csize = info->cachesize / hashp->hdr.bsize;182else183csize = DEF_CACHESIZE / hashp->hdr.bsize;184hashp->mp = mpool_open(&mpool_key, hashp->fp, hashp->hdr.bsize, csize);185186if (!hashp->mp)187RETURN_ERROR(errno, error1);188mpool_filter(hashp->mp, __pgin_routine, __pgout_routine, hashp);189190/*191* For a new table, set up the bitmaps.192*/193if (new_table &&194init_htab(hashp, info && info->nelem ? info->nelem : 1))195goto error2;196197/* initialize the cursor queue */198TAILQ_INIT(&hashp->curs_queue);199hashp->seq_cursor = NULL;200201202/* get a chunk of memory for our split buffer */203hashp->split_buf = (PAGE16 *)malloc(hashp->hdr.bsize);204if (!hashp->split_buf)205goto error2;206207hashp->new_file = new_table;208209if (!(dbp = (DB *)malloc(sizeof(DB))))210goto error2;211212dbp->internal = hashp;213dbp->close = hash_close;214dbp->del = hash_delete;215dbp->fd = hash_fd;216dbp->get = hash_get;217dbp->put = hash_put;218dbp->seq = hash_seq;219dbp->sync = hash_sync;220dbp->type = DB_HASH;221222#ifdef DEBUG223(void)fprintf(stderr,224"%s\n%s%lx\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%x\n%s%x\n%s%d\n%s%d\n",225"init_htab:",226"TABLE POINTER ", (void *)hashp,227"BUCKET SIZE ", hashp->hdr.bsize,228"BUCKET SHIFT ", hashp->hdr.bshift,229"FILL FACTOR ", hashp->hdr.ffactor,230"MAX BUCKET ", hashp->hdr.max_bucket,231"OVFL POINT ", hashp->hdr.ovfl_point,232"LAST FREED ", hashp->hdr.last_freed,233"HIGH MASK ", hashp->hdr.high_mask,234"LOW MASK ", hashp->hdr.low_mask,235"NKEYS ", hashp->hdr.nkeys);236#endif237#ifdef HASH_STATISTICS238hash_overflows = hash_accesses = hash_collisions = hash_expansions = 0;239hash_bigpages = 0;240#endif241return (dbp);242243error2:244save_errno = errno;245hdestroy(hashp);246errno = save_errno;247return (NULL);248249error1:250if (hashp != NULL)251(void)close(hashp->fp);252253error0:254free(hashp);255errno = save_errno;256return (NULL);257}258259static int32_t260hash_close(DB *dbp)261{262HTAB *hashp;263int32_t retval;264265if (!dbp)266return (ERROR);267268hashp = (HTAB *)dbp->internal;269retval = hdestroy(hashp);270free(dbp);271return (retval);272}273274static int32_t275hash_fd(const DB *dbp)276{277HTAB *hashp;278279if (!dbp)280return (ERROR);281282hashp = (HTAB *)dbp->internal;283if (hashp->fp == -1) {284errno = ENOENT;285return (-1);286}287return (hashp->fp);288}289290/************************** LOCAL CREATION ROUTINES **********************/291static HTAB *292init_hash(HTAB *hashp, const char *file, const HASHINFO *info)293{294struct stat statbuf;295296hashp->hdr.nkeys = 0;297hashp->hdr.lorder = DB_BYTE_ORDER;298hashp->hdr.bsize = DEF_BUCKET_SIZE;299hashp->hdr.bshift = DEF_BUCKET_SHIFT;300hashp->hdr.ffactor = DEF_FFACTOR;301hashp->hash = __default_hash;302memset(hashp->hdr.spares, 0, sizeof(hashp->hdr.spares));303memset(hashp->hdr.bitmaps, 0, sizeof(hashp->hdr.bitmaps));304305/* Fix bucket size to be optimal for file system */306if (file != NULL) {307if (stat(file, &statbuf))308return (NULL);309hashp->hdr.bsize = statbuf.st_blksize;310if (hashp->hdr.bsize > MAX_BSIZE)311hashp->hdr.bsize = MAX_BSIZE;312hashp->hdr.bshift = __log2(hashp->hdr.bsize);313}314if (info) {315if (info->bsize) {316/* Round pagesize up to power of 2 */317hashp->hdr.bshift = __log2(info->bsize);318hashp->hdr.bsize = 1 << hashp->hdr.bshift;319if (hashp->hdr.bsize > MAX_BSIZE) {320errno = EINVAL;321return (NULL);322}323}324if (info->ffactor)325hashp->hdr.ffactor = info->ffactor;326if (info->hash)327hashp->hash = info->hash;328if (info->lorder) {329if ((info->lorder != DB_BIG_ENDIAN) &&330(info->lorder != DB_LITTLE_ENDIAN)) {331errno = EINVAL;332return (NULL);333}334hashp->hdr.lorder = info->lorder;335}336}337return (hashp);338}339340/*341* Returns 0 on No Error342*/343static int32_t344init_htab(HTAB *hashp, int32_t nelem)345{346int32_t l2, nbuckets;347348/*349* Divide number of elements by the fill factor and determine a350* desired number of buckets. Allocate space for the next greater351* power of two number of buckets.352*/353nelem = (nelem - 1) / hashp->hdr.ffactor + 1;354355l2 = __log2(MAX(nelem, 2));356nbuckets = 1 << l2;357358hashp->hdr.spares[l2] = l2 + 1;359hashp->hdr.spares[l2 + 1] = l2 + 1;360hashp->hdr.ovfl_point = l2;361hashp->hdr.last_freed = 2;362363hashp->hdr.max_bucket = hashp->hdr.low_mask = nbuckets - 1;364hashp->hdr.high_mask = (nbuckets << 1) - 1;365366/*367* The number of header pages is the size of the header divided by368* the amount of freespace on header pages (the page size - the369* size of 1 integer where the length of the header info on that370* page is stored) plus another page if it didn't divide evenly.371*/372hashp->hdr.hdrpages =373(sizeof(HASHHDR) / (hashp->hdr.bsize - HEADER_OVERHEAD)) +374(((sizeof(HASHHDR) % (hashp->hdr.bsize - HEADER_OVERHEAD)) == 0)375? 0 : 1);376377/* Create pages for these buckets */378/*379for (i = 0; i <= hashp->hdr.max_bucket; i++) {380if (__new_page(hashp, (u_int32_t)i, A_BUCKET) != 0)381return (-1);382}383*/384385/* First bitmap page is at: splitpoint l2 page offset 1 */386if (__ibitmap(hashp, OADDR_OF(l2, 1), l2 + 1, 0))387return (-1);388389return (0);390}391392/*393* Functions to get/put hash header. We access the file directly.394*/395static u_int32_t396hget_header(HTAB *hashp, u_int32_t page_size)397{398u_int32_t num_copied;399u_int8_t *hdr_dest;400401num_copied = 0;402403hdr_dest = (u_int8_t *)&hashp->hdr;404405/*406* XXX407* This should not be printing to stderr on a "normal" error case.408*/409lseek(hashp->fp, 0, SEEK_SET);410num_copied = read(hashp->fp, hdr_dest, sizeof(HASHHDR));411if (num_copied != sizeof(HASHHDR)) {412fprintf(stderr, "hash: could not retrieve header");413return (0);414}415#if DB_BYTE_ORDER == DB_LITTLE_ENDIAN416swap_header(hashp);417#endif418return (num_copied);419}420421static void422hput_header(HTAB *hashp)423{424HASHHDR *whdrp;425#if DB_BYTE_ORDER == DB_LITTLE_ENDIAN426HASHHDR whdr;427#endif428u_int32_t num_copied;429430num_copied = 0;431432whdrp = &hashp->hdr;433#if DB_BYTE_ORDER == DB_LITTLE_ENDIAN434whdrp = &whdr;435swap_header_copy(&hashp->hdr, whdrp);436#endif437438lseek(hashp->fp, 0, SEEK_SET);439num_copied = write(hashp->fp, whdrp, sizeof(HASHHDR));440if (num_copied != sizeof(HASHHDR))441(void)fprintf(stderr, "hash: could not write hash header");442return;443}444445/********************** DESTROY/CLOSE ROUTINES ************************/446447/*448* Flushes any changes to the file if necessary and destroys the hashp449* structure, freeing all allocated space.450*/451static int32_t452hdestroy(HTAB *hashp)453{454int32_t save_errno;455456save_errno = 0;457458#ifdef HASH_STATISTICS459{ int i;460(void)fprintf(stderr, "hdestroy: accesses %ld collisions %ld\n",461hash_accesses, hash_collisions);462(void)fprintf(stderr,463"hdestroy: expansions %ld\n", hash_expansions);464(void)fprintf(stderr,465"hdestroy: overflows %ld\n", hash_overflows);466(void)fprintf(stderr,467"hdestroy: big key/data pages %ld\n", hash_bigpages);468(void)fprintf(stderr,469"keys %ld maxp %d\n", hashp->hdr.nkeys, hashp->hdr.max_bucket);470471for (i = 0; i < NCACHED; i++)472(void)fprintf(stderr,473"spares[%d] = %d\n", i, hashp->hdr.spares[i]);474}475#endif476477if (flush_meta(hashp) && !save_errno)478save_errno = errno;479480/* Free the split page */481if (hashp->split_buf)482free(hashp->split_buf);483484/* Free the big key and big data returns */485if (hashp->bigkey_buf)486free(hashp->bigkey_buf);487if (hashp->bigdata_buf)488free(hashp->bigdata_buf);489490/* XXX This should really iterate over the cursor queue, but491it's not clear how to do that, and the only cursor a hash492table ever creates is the one used by hash_seq(). Passing493NULL as the first arg is also a kludge, but I know that494it's never used, so I do it. The intent is to plug the495memory leak. Correctness can come later. */496497if (hashp->seq_cursor)498hashp->seq_cursor->delete(NULL, hashp->seq_cursor, 0);499500/* shut down mpool */501mpool_sync(hashp->mp);502mpool_close(hashp->mp);503504if (hashp->fp != -1)505(void)close(hashp->fp);506507/*508* *** This may cause problems if hashp->fname is set in any case509* other than the case that we are generating a temporary file name.510* Note that the new version of mpool should support temporary511* files within mpool itself.512*/513if (hashp->fname && !hashp->save_file) {514#ifdef DEBUG515fprintf(stderr, "Unlinking file %s.\n", hashp->fname);516#endif517/* we need to chmod the file to allow it to be deleted... */518chmod(hashp->fname, 0700);519unlink(hashp->fname);520}521free(hashp);522523if (save_errno) {524errno = save_errno;525return (ERROR);526}527return (SUCCESS);528}529530/*531* Write modified pages to disk532*533* Returns:534* 0 == OK535* -1 ERROR536*/537static int32_t538hash_sync(const DB *dbp, u_int32_t flags)539{540HTAB *hashp;541542hashp = (HTAB *)dbp->internal;543544/*545* XXX546* Check success/failure conditions.547*/548return (flush_meta(hashp) || mpool_sync(hashp->mp));549}550551/*552* Returns:553* 0 == OK554* -1 indicates that errno should be set555*/556static int32_t557flush_meta(HTAB *hashp)558{559int32_t i;560561if (!hashp->save_file)562return (0);563hashp->hdr.magic = HASHMAGIC;564hashp->hdr.version = HASHVERSION;565hashp->hdr.h_charkey = hashp->hash(CHARKEY, sizeof(CHARKEY));566567/* write out metadata */568hput_header(hashp);569570for (i = 0; i < NCACHED; i++)571if (hashp->mapp[i]) {572if (__put_page(hashp,573(PAGE16 *)hashp->mapp[i], A_BITMAP, 1))574return (-1);575hashp->mapp[i] = NULL;576}577return (0);578}579580/*******************************SEARCH ROUTINES *****************************/581/*582* All the access routines return583*584* Returns:585* 0 on SUCCESS586* 1 to indicate an external ERROR (i.e. key not found, etc)587* -1 to indicate an internal ERROR (i.e. out of memory, etc)588*/589590/* *** make sure this is true! */591592static int32_t593hash_get(const DB *dbp, const DBT *key, DBT *data, u_int32_t flag)594{595HTAB *hashp;596597hashp = (HTAB *)dbp->internal;598if (flag) {599hashp->local_errno = errno = EINVAL;600return (ERROR);601}602return (hash_access(hashp, HASH_GET, key, data));603}604605static int32_t606hash_put(const DB *dbp, DBT *key, const DBT *data, u_int32_t flag)607{608HTAB *hashp;609610hashp = (HTAB *)dbp->internal;611if (flag && flag != R_NOOVERWRITE) {612hashp->local_errno = errno = EINVAL;613return (ERROR);614}615if ((hashp->flags & O_ACCMODE) == O_RDONLY) {616hashp->local_errno = errno = EPERM;617return (ERROR);618}619return (hash_access(hashp, flag == R_NOOVERWRITE ?620HASH_PUTNEW : HASH_PUT, key, (DBT *)data));621}622623static int32_t624hash_delete(const DB *dbp, const DBT *key, u_int32_t flag)625{626HTAB *hashp;627628hashp = (HTAB *)dbp->internal;629if (flag) {630hashp->local_errno = errno = EINVAL;631return (ERROR);632}633if ((hashp->flags & O_ACCMODE) == O_RDONLY) {634hashp->local_errno = errno = EPERM;635return (ERROR);636}637638return (hash_access(hashp, HASH_DELETE, key, NULL));639}640641/*642* Assume that hashp has been set in wrapper routine.643*/644static int32_t645hash_access(HTAB *hashp, ACTION action, const DBT *key, DBT *val)646{647DBT page_key, page_val;648CURSOR cursor;649ITEM_INFO item_info;650u_int32_t bucket;651u_int32_t num_items;652653#ifdef HASH_STATISTICS654hash_accesses++;655#endif656657num_items = 0;658659/*660* Set up item_info so that we're looking for space to add an item661* as we cycle through the pages looking for the key.662*/663if (action == HASH_PUT || action == HASH_PUTNEW) {664if (ISBIG(key->size + val->size, hashp))665item_info.seek_size = PAIR_OVERHEAD;666else667item_info.seek_size = key->size + val->size;668} else669item_info.seek_size = 0;670item_info.seek_found_page = 0;671672bucket = __call_hash(hashp, (int8_t *)key->data, key->size);673674cursor.pagep = NULL;675__get_item_reset(hashp, &cursor);676677cursor.bucket = bucket;678while (1) {679__get_item_next(hashp, &cursor, &page_key, &page_val, &item_info);680if (item_info.status == ITEM_ERROR)681return (ABNORMAL);682if (item_info.status == ITEM_NO_MORE)683break;684num_items++;685if (item_info.key_off == BIGPAIR) {686/*687* !!!688* 0 is a valid index.689*/690if (__find_bigpair(hashp, &cursor, (int8_t *)key->data,691key->size) > 0)692goto found;693} else if (key->size == page_key.size &&694!memcmp(key->data, page_key.data, key->size))695goto found;696}697#ifdef HASH_STATISTICS698hash_collisions++;699#endif700__get_item_done(hashp, &cursor);701702/*703* At this point, item_info will list either the last page in704* the chain, or the last page in the chain plus a pgno for where705* to find the first page in the chain with space for the706* item we wish to add.707*/708709/* Not found */710switch (action) {711case HASH_PUT:712case HASH_PUTNEW:713if (__addel(hashp, &item_info, key, val, num_items, 0))714return (ERROR);715break;716case HASH_GET:717case HASH_DELETE:718default:719return (ABNORMAL);720}721722if (item_info.caused_expand)723__expand_table(hashp);724return (SUCCESS);725726found: __get_item_done(hashp, &cursor);727728switch (action) {729case HASH_PUTNEW:730/* mpool_put(hashp->mp, pagep, 0); */731return (ABNORMAL);732case HASH_GET:733if (item_info.key_off == BIGPAIR) {734if (__big_return(hashp, &item_info, val, 0))735return (ERROR);736} else {737val->data = page_val.data;738val->size = page_val.size;739}740/* *** data may not be available! */741break;742case HASH_PUT:743if (__delpair(hashp, &cursor, &item_info) ||744__addel(hashp, &item_info, key, val, UNKNOWN, 0))745return (ERROR);746__get_item_done(hashp, &cursor);747if (item_info.caused_expand)748__expand_table(hashp);749break;750case HASH_DELETE:751if (__delpair(hashp, &cursor, &item_info))752return (ERROR);753break;754default:755abort();756}757return (SUCCESS);758}759760/* ****************** CURSORS ********************************** */761CURSOR *762__cursor_creat(const DB *dbp)763{764CURSOR *new_curs;765HTAB *hashp;766767new_curs = (CURSOR *)malloc(sizeof(struct cursor_t));768if (!new_curs)769return NULL;770new_curs->internal =771(struct item_info *)malloc(sizeof(struct item_info));772if (!new_curs->internal) {773free(new_curs);774return NULL;775}776new_curs->get = cursor_get;777new_curs->delete = cursor_delete;778779new_curs->bucket = 0;780new_curs->pgno = INVALID_PGNO;781new_curs->ndx = 0;782new_curs->pgndx = 0;783new_curs->pagep = NULL;784785/* place onto queue of cursors */786hashp = (HTAB *)dbp->internal;787TAILQ_INSERT_TAIL(&hashp->curs_queue, new_curs, queue);788789return new_curs;790}791792static int32_t793cursor_get(const DB *dbp, CURSOR *cursorp, DBT *key, DBT *val, u_int32_t flags)794{795HTAB *hashp;796ITEM_INFO item_info;797798hashp = (HTAB *)dbp->internal;799800if (flags && flags != R_FIRST && flags != R_NEXT) {801hashp->local_errno = errno = EINVAL;802return (ERROR);803}804#ifdef HASH_STATISTICS805hash_accesses++;806#endif807808item_info.seek_size = 0;809810if (flags == R_FIRST)811__get_item_first(hashp, cursorp, key, val, &item_info);812else813__get_item_next(hashp, cursorp, key, val, &item_info);814815/*816* This needs to be changed around. As is, get_item_next advances817* the pointers on the page but this function actually advances818* bucket pointers. This works, since the only other place we819* use get_item_next is in hash_access which only deals with one820* bucket at a time. However, there is the problem that certain other821* functions (such as find_bigpair and delpair) depend on the822* pgndx member of the cursor. Right now, they are using pngdx - 1823* since indices refer to the __next__ item that is to be fetched824* from the page. This is ugly, as you may have noticed, whoever825* you are. The best solution would be to depend on item_infos to826* deal with _current_ information, and have the cursors only827* deal with _next_ information. In that scheme, get_item_next828* would also advance buckets. Version 3...829*/830831832/*833* Must always enter this loop to do error handling and834* check for big key/data pair.835*/836while (1) {837if (item_info.status == ITEM_OK) {838if (item_info.key_off == BIGPAIR &&839__big_keydata(hashp, cursorp->pagep, key, val,840item_info.pgndx))841return (ABNORMAL);842843break;844} else if (item_info.status != ITEM_NO_MORE)845return (ABNORMAL);846847__put_page(hashp, cursorp->pagep, A_RAW, 0);848cursorp->ndx = cursorp->pgndx = 0;849cursorp->bucket++;850cursorp->pgno = INVALID_PGNO;851cursorp->pagep = NULL;852if (cursorp->bucket > hashp->hdr.max_bucket)853return (ABNORMAL);854__get_item_next(hashp, cursorp, key, val, &item_info);855}856857__get_item_done(hashp, cursorp);858return (0);859}860861static int32_t862cursor_delete(const DB *dbp, CURSOR *cursor, u_int32_t flags)863{864/* XXX this is empirically determined, so it might not be completely865correct, but it seems to work. At the very least it fixes866a memory leak */867868free(cursor->internal);869free(cursor);870871return (0);872}873874static int32_t875hash_seq(const DB *dbp, DBT *key, DBT *val, u_int32_t flag)876{877HTAB *hashp;878879/*880* Seq just uses the default cursor to go sequecing through the881* database. Note that the default cursor is the first in the list.882*/883884hashp = (HTAB *)dbp->internal;885if (!hashp->seq_cursor)886hashp->seq_cursor = __cursor_creat(dbp);887888return (hashp->seq_cursor->get(dbp, hashp->seq_cursor, key, val, flag));889}890891/********************************* UTILITIES ************************/892893/*894* Returns:895* 0 ==> OK896* -1 ==> Error897*/898int32_t899__expand_table(HTAB *hashp)900{901u_int32_t old_bucket, new_bucket;902int32_t spare_ndx;903904#ifdef HASH_STATISTICS905hash_expansions++;906#endif907new_bucket = ++hashp->hdr.max_bucket;908old_bucket = (hashp->hdr.max_bucket & hashp->hdr.low_mask);909910/* Get a page for this new bucket */911if (__new_page(hashp, new_bucket, A_BUCKET) != 0)912return (-1);913914/*915* If the split point is increasing (hdr.max_bucket's log base 2916* increases), we need to copy the current contents of the spare917* split bucket to the next bucket.918*/919spare_ndx = __log2(hashp->hdr.max_bucket + 1);920if (spare_ndx > hashp->hdr.ovfl_point) {921hashp->hdr.spares[spare_ndx] = hashp->hdr.spares[hashp->hdr.ovfl_point];922hashp->hdr.ovfl_point = spare_ndx;923}924if (new_bucket > hashp->hdr.high_mask) {925/* Starting a new doubling */926hashp->hdr.low_mask = hashp->hdr.high_mask;927hashp->hdr.high_mask = new_bucket | hashp->hdr.low_mask;928}929if (BUCKET_TO_PAGE(new_bucket) > MAX_PAGES(hashp)) {930fprintf(stderr, "hash: Cannot allocate new bucket. Pages exhausted.\n");931return (-1);932}933/* Relocate records to the new bucket */934return (__split_page(hashp, old_bucket, new_bucket));935}936937u_int32_t938__call_hash(HTAB *hashp, int8_t *k, int32_t len)939{940u_int32_t n, bucket;941942n = hashp->hash(k, len);943bucket = n & hashp->hdr.high_mask;944if (bucket > hashp->hdr.max_bucket)945bucket = bucket & hashp->hdr.low_mask;946return (bucket);947}948949#if DB_BYTE_ORDER == DB_LITTLE_ENDIAN950/*951* Hashp->hdr needs to be byteswapped.952*/953static void954swap_header_copy(HASHHDR *srcp, HASHHDR *destp)955{956int32_t i;957958P_32_COPY(srcp->magic, destp->magic);959P_32_COPY(srcp->version, destp->version);960P_32_COPY(srcp->lorder, destp->lorder);961P_32_COPY(srcp->bsize, destp->bsize);962P_32_COPY(srcp->bshift, destp->bshift);963P_32_COPY(srcp->ovfl_point, destp->ovfl_point);964P_32_COPY(srcp->last_freed, destp->last_freed);965P_32_COPY(srcp->max_bucket, destp->max_bucket);966P_32_COPY(srcp->high_mask, destp->high_mask);967P_32_COPY(srcp->low_mask, destp->low_mask);968P_32_COPY(srcp->ffactor, destp->ffactor);969P_32_COPY(srcp->nkeys, destp->nkeys);970P_32_COPY(srcp->hdrpages, destp->hdrpages);971P_32_COPY(srcp->h_charkey, destp->h_charkey);972for (i = 0; i < NCACHED; i++) {973P_32_COPY(srcp->spares[i], destp->spares[i]);974P_16_COPY(srcp->bitmaps[i], destp->bitmaps[i]);975}976}977978static void979swap_header(HTAB *hashp)980{981HASHHDR *hdrp;982int32_t i;983984hdrp = &hashp->hdr;985986M_32_SWAP(hdrp->magic);987M_32_SWAP(hdrp->version);988M_32_SWAP(hdrp->lorder);989M_32_SWAP(hdrp->bsize);990M_32_SWAP(hdrp->bshift);991M_32_SWAP(hdrp->ovfl_point);992M_32_SWAP(hdrp->last_freed);993M_32_SWAP(hdrp->max_bucket);994M_32_SWAP(hdrp->high_mask);995M_32_SWAP(hdrp->low_mask);996M_32_SWAP(hdrp->ffactor);997M_32_SWAP(hdrp->nkeys);998M_32_SWAP(hdrp->hdrpages);999M_32_SWAP(hdrp->h_charkey);1000for (i = 0; i < NCACHED; i++) {1001M_32_SWAP(hdrp->spares[i]);1002M_16_SWAP(hdrp->bitmaps[i]);1003}1004}1005#endif /* DB_BYTE_ORDER == DB_LITTLE_ENDIAN */100610071008