diff options
author | Ulrich Drepper <drepper@redhat.com> | 1997-08-27 20:26:10 +0000 |
---|---|---|
committer | Ulrich Drepper <drepper@redhat.com> | 1997-08-27 20:26:10 +0000 |
commit | 92f1da4da04a7a86ddee91be5eaf0b10c333ac64 (patch) | |
tree | 2a10ce9e4e407e7e5b5ca092ca0947d234b5ff60 /db2/hash/hash_dup.c | |
parent | 22be878ecbc66606371bd33258f56e6711e6ba7a (diff) | |
download | glibc-92f1da4da04a7a86ddee91be5eaf0b10c333ac64.tar.gz glibc-92f1da4da04a7a86ddee91be5eaf0b10c333ac64.tar.xz glibc-92f1da4da04a7a86ddee91be5eaf0b10c333ac64.zip |
Update. cvs/libc-ud-970827
1997-08-10 19:17 Philip Blundell <Philip.Blundell@pobox.com> * nss/nss_db/db-XXX.c: Include <db_185.h> not <db.h>. Somebody should update this to use the new db API. * nss/nss_db/db-netgrp.c: Likewise. * nss/nss_db/db-alias.c: Likewise. * db2/Makefile: Makefile for db-2.x in glibc. 1997-08-27 21:20 Ulrich Drepper <drepper@cygnus.com> * csu/Makefile (before-compile): New goal. Make sure abi-tag.h is generated. [$(elf)=yes] (asm-CPPFLAGS): Make sure abi-tag.h file can be found. * Makeconfig [$(build-omitfp)=yes] (CFLAGS-.o): Add -D__USE_STRING_INLINES. * string/string.f: Move strnlen optimization after inclusion of <bits/string.h>. Include <bits/string.h> only if __USE_STRING_INLINES is defined. * sysdeps/generic/memcpy.c: Undef memcpy to allow macro of this name in <bits/string.h>. * sysdeps/generic/memset.c: Likewise. * sysdeps/i386/string.h: i386 optimized string functions. * sysdeps/i386/i486string.h: i486+ optimized string functions. * Makefile (subdirs): Change db to db2. * shlib-versions: Bump libdb verion number to 3. * include/db.h: Include from db2 directory. * include/db_185.h: New file. * sysdeps/i386/Makefile [$(subdirs)=db2] (CPPFLAGS): Add macros to provide spinlock information for db2. * sysdeps/m68k/m68020/Makefile: New file. Likewise. * sysdeps/sparc/Makefile: New file. Likewise. * sysdeps/unix/sysv/linux/Makefile [$(subdirs)=db2] (CPPFLAGS): Add -DHAVE_LLSEEK. * db2/config.h: Hand-edited config file for db2 in glibc. * db2/compat.h: New file from db-2.3.4. * db2/db.h: Likewise. * db2/db_185.h: Likewise. * db2/db_int.h: Likewise. * db2/makedb.c: Likewise. * db2/btree/bt_close.c: Likewise. * db2/btree/bt_compare.c: Likewise. * db2/btree/bt_conv.c: Likewise. * db2/btree/bt_cursor.c: Likewise. * db2/btree/bt_delete.c: Likewise. * db2/btree/bt_open.c: Likewise. * db2/btree/bt_page.c: Likewise. * db2/btree/bt_put.c: Likewise. * db2/btree/bt_rec.c: Likewise. * db2/btree/bt_recno.c: Likewise. * db2/btree/btree_auto.c: Likewise. * db2/btree/bt_rsearch.c: Likewise. * db2/btree/bt_search.c: Likewise. * db2/btree/bt_split.c: Likewise. * db2/btree/bt_stat.c: Likewise. * db2/btree/btree.src: Likewise. * db2/common/db_appinit.c: Likewise. * db2/common/db_err.c: Likewise. * db2/common/db_byteorder.c: Likewise. * db2/common/db_apprec.c: Likewise. * db2/common/db_salloc.c: Likewise. * db2/common/db_log2.c: Likewise. * db2/common/db_region.c: Likewise. * db2/common/db_shash.c: Likewise. * db2/db/db.c: Likewise. * db2/db/db.src: Likewise. * db2/db/db_conv.c: Likewise. * db2/db/db_dispatch.c: Likewise. * db2/db/db_dup.c: Likewise. * db2/db/db_overflow.c: Likewise. * db2/db/db_pr.c: Likewise. * db2/db/db_rec.c: Likewise. * db2/db/db_ret.c: Likewise. * db2/db/db_thread.c: Likewise. * db2/db/db_auto.c: Likewise. * db2/db185/db185.c: Likewise. * db2/db185/db185_int.h: Likewise. * db2/dbm/dbm.c: Likewise. * db2/hash/hash.c: Likewise. * db2/hash/hash.src: Likewise. * db2/hash/hash_page.c: Likewise. * db2/hash/hash_conv.c: Likewise. * db2/hash/hash_debug.c: Likewise. * db2/hash/hash_stat.c: Likewise. * db2/hash/hash_rec.c: Likewise. * db2/hash/hash_dup.c: Likewise. * db2/hash/hash_func.c: Likewise. * db2/hash/hash_auto.c: Likewise. * db2/include/mp.h: Likewise. * db2/include/btree.h: Likewise. * db2/include/db.h.src: Likewise. * db2/include/db_int.h.src: Likewise. * db2/include/db_shash.h: Likewise. * db2/include/db_swap.h: Likewise. * db2/include/db_185.h.src: Likewise. * db2/include/txn.h: Likewise. * db2/include/db_am.h: Likewise. * db2/include/shqueue.h: Likewise. * db2/include/hash.h: Likewise. * db2/include/db_dispatch.h: Likewise. * db2/include/lock.h: Likewise. * db2/include/db_page.h: Likewise. * db2/include/log.h: Likewise. * db2/include/db_auto.h: Likewise. * db2/include/btree_auto.h: Likewise. * db2/include/hash_auto.h: Likewise. * db2/include/log_auto.h: Likewise. * db2/include/txn_auto.h: Likewise. * db2/include/db_ext.h: Likewise. * db2/include/btree_ext.h: Likewise. * db2/include/clib_ext.h: Likewise. * db2/include/common_ext.h: Likewise. * db2/include/hash_ext.h: Likewise. * db2/include/lock_ext.h: Likewise. * db2/include/log_ext.h: Likewise. * db2/include/mp_ext.h: Likewise. * db2/include/mutex_ext.h: Likewise. * db2/include/os_ext.h: Likewise. * db2/include/txn_ext.h: Likewise. * db2/include/cxx_int.h: Likewise. * db2/include/db_cxx.h: Likewise. * db2/include/queue.h: Likewise. * db2/lock/lock.c: Likewise. * db2/lock/lock_conflict.c: Likewise. * db2/lock/lock_util.c: Likewise. * db2/lock/lock_deadlock.c: Likewise. * db2/log/log.c: Likewise. * db2/log/log_get.c: Likewise. * db2/log/log.src: Likewise. * db2/log/log_compare.c: Likewise. * db2/log/log_put.c: Likewise. * db2/log/log_rec.c: Likewise. * db2/log/log_archive.c: Likewise. * db2/log/log_register.c: Likewise. * db2/log/log_auto.c: Likewise. * db2/log/log_findckp.c: Likewise. * db2/mp/mp_bh.c: Likewise. * db2/mp/mp_fget.c: Likewise. * db2/mp/mp_fopen.c: Likewise. * db2/mp/mp_fput.c: Likewise. * db2/mp/mp_fset.c: Likewise. * db2/mp/mp_open.c: Likewise. * db2/mp/mp_region.c: Likewise. * db2/mp/mp_pr.c: Likewise. * db2/mp/mp_sync.c: Likewise. * db2/mutex/68020.gcc: Likewise. * db2/mutex/mutex.c: Likewise. * db2/mutex/README: Likewise. * db2/mutex/x86.gcc: Likewise. * db2/mutex/sparc.gcc: Likewise. * db2/mutex/uts4.cc.s: Likewise. * db2/mutex/alpha.dec: Likewise. * db2/mutex/alpha.gcc: Likewise. * db2/mutex/parisc.gcc: Likewise. * db2/mutex/parisc.hp: Likewise. * db2/os/db_os_abs.c: Likewise. * db2/os/db_os_dir.c: Likewise. * db2/os/db_os_fid.c: Likewise. * db2/os/db_os_lseek.c: Likewise. * db2/os/db_os_mmap.c: Likewise. * db2/os/db_os_open.c: Likewise. * db2/os/db_os_rw.c: Likewise. * db2/os/db_os_sleep.c: Likewise. * db2/os/db_os_stat.c: Likewise. * db2/os/db_os_unlink.c: Likewise. * db2/txn/txn.c: Likewise. * db2/txn/txn.src: Likewise. * db2/txn/txn_rec.c: Likewise. * db2/txn/txn_auto.c: Likewise. * db2/clib/getlong.c: Likewise. * db2/progs/db_archive/db_archive.c: Likewise. * db2/progs/db_checkpoint/db_checkpoint.c: Likewise. * db2/progs/db_deadlock/db_deadlock.c: Likewise. * db2/progs/db_dump/db_dump.c: Likewise. * db2/progs/db_dump185/db_dump185.c: Likewise. * db2/progs/db_load/db_load.c: Likewise. * db2/progs/db_printlog/db_printlog.c: Likewise. * db2/progs/db_recover/db_recover.c: Likewise. * db2/progs/db_stat/db_stat.c: Likewise. * libio/stdio.h [__cplusplus] (__STDIO_INLINE): Define as inline. * po/de.po, po/sv.po: Update from 2.0.5 translations. * sysdeps/unix/sysv/linux/netinet/tcp.h: Pretty print. * sunrpc/rpc/xdr.h (XDR): Don't define argument of x_destroy callback as const. * sunrpc/xdr_mem.c (xdrmem_destroy): Don't define argument as const. * sunrpx/xdr_rec.c (xdrrec_destroy): Likewise. * sunrpx/xdr_stdio.c (xdrstdio_destroy): Likewise. 1997-08-27 18:47 Ulrich Drepper <drepper@cygnus.com> * sysdeps/unix/sysv/linux/if_index.c: Include <errno.h>. Reported by Benjamin Kosnik <bkoz@cygnus.com>. 1997-08-27 02:27 Roland McGrath <roland@baalperazim.frob.com> * abi-tags: New file. * csu/Makefile (distribute): Remove abi-tag.h. ($(objpfx)abi-tag.h): New target. * Makefile (distribute): Add abi-tags. * sysdeps/unix/sysv/linux/abi-tag.h: File removed. * sysdeps/mach/hurd/abi-tag.h: File removed. * sysdeps/stub/abi-tag.h: File removed. 1997-08-25 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de> * sysdeps/unix/make-syscalls.sh: Change output so that it generates compilation rules only for the currently selected object suffixes. 1997-08-25 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de> * sysdeps/m68k/dl-machine.h (RTLD_START): Switch back to previous section to avoid confusing the compiler. * sysdeps/alpha/dl-machine.h (RTLD_START): Likewise. * sysdeps/i386/dl-machine.h (RTLD_START): Likewise. * sysdeps/mips/dl-machine.h (RTLD_START): Likewise. * sysdeps/mips/mips64/dl-machine.h (RTLD_START): Likewise. * sysdeps/sparc/sparc32/dl-machine.h (RTLD_START): Likewise. * sysdeps/m68k/dl-machine.h (elf_machine_load_address): Use a GOT relocation instead of a constant to avoid text relocation. (ELF_MACHINE_BEFORE_RTLD_RELOC): Removed. (RTLD_START): Declare global labels as functions and add size directive. 1997-08-25 17:01 Ulrich Drepper <drepper@cygnus.com> * sysdeps/i386/bits/select.h: Correct assembler versions to work even for descriptors >= 32. * stdlib/alloca.h: Don't define alloca to __alloca since if gcc is used __alloca is not defined to __builtin_alloca and so might not be available. Reported by Uwe Ohse <uwe@ohse.de>. * sysdeps/unix/sysv/linux/sys/sysmacros.h: Define macros in a special way if gcc is not used and so dev_t is an array. Reported by Uwe Ohse <uwe@ohse.de>. 1997-08-23 Andreas Schwab <schwab@issan.informatik.uni-dortmund.de> * manual/libc.texinfo: Reorder chapters to match logical order. 1997-08-25 12:22 Ulrich Drepper <drepper@cygnus.com> * sunrpc/rpc/xdr.h: Change name of parameters in prototypes of xdr_reference, xdrmem_create, and xdrstdio_create because of clash with g++ internal symbols. Patch by Sudish Joseph <sj@eng.mindspring.net>. * elf/dl-deps.c: Implement handling of DT_FILTER.
Diffstat (limited to 'db2/hash/hash_dup.c')
-rw-r--r-- | db2/hash/hash_dup.c | 544 |
1 files changed, 544 insertions, 0 deletions
diff --git a/db2/hash/hash_dup.c b/db2/hash/hash_dup.c new file mode 100644 index 0000000000..059eec6f92 --- /dev/null +++ b/db2/hash/hash_dup.c @@ -0,0 +1,544 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 1996, 1997 + * Sleepycat Software. All rights reserved. + */ +/* + * Copyright (c) 1990, 1993, 1994 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Margo Seltzer. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ +#include "config.h" + +#ifndef lint +static const char sccsid[] = "@(#)hash_dup.c 10.5 (Sleepycat) 7/27/97"; +#endif /* not lint */ + +/* + * PACKAGE: hashing + * + * DESCRIPTION: + * Manipulation of duplicates for the hash package. + * + * ROUTINES: + * + * External + * __add_dup + * Internal + */ + +#ifndef NO_SYSTEM_INCLUDES +#include <sys/types.h> + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <unistd.h> +#endif + +#include "db_int.h" +#include "db_page.h" +#include "db_swap.h" +#include "hash.h" + +static int __ham_check_move __P((HTAB *, HASH_CURSOR *, int32_t)); +static int __ham_dup_convert __P((HTAB *, HASH_CURSOR *)); +static int __ham_make_dup __P((const DBT *, DBT *d, void **, u_int32_t *)); + +/* + * Called from hash_access to add a duplicate key. nval is the new + * value that we want to add. The flags correspond to the flag values + * to cursor_put indicating where to add the new element. + * There are 4 cases. + * Case 1: The existing duplicate set already resides on a separate page. + * We can use common code for this. + * Case 2: The element is small enough to just be added to the existing set. + * Case 3: The element is large enough to be a big item, so we're going to + * have to push the set onto a new page. + * Case 4: The element is large enough to push the duplicate set onto a + * separate page. + * + * PUBLIC: int __ham_add_dup __P((HTAB *, HASH_CURSOR *, DBT *, int)); + */ +int +__ham_add_dup(hashp, hcp, nval, flags) + HTAB *hashp; + HASH_CURSOR *hcp; + DBT *nval; + int flags; +{ + DBT pval, tmp_val; + HKEYDATA *hk; + u_int32_t del_len, new_size; + int ret; + + if (flags == DB_CURRENT && hcp->dpgno == PGNO_INVALID) + del_len = hcp->dup_len; + else + del_len = 0; + + if ((ret = __ham_check_move(hashp, hcp, + (int32_t)DUP_SIZE(nval->size) - (int32_t)del_len)) != 0) + return (ret); + + /* + * Check if resulting duplicate set is going to need to go + * onto a separate duplicate page. If so, convert the + * duplicate set and add the new one. After conversion, + * hcp->dndx is the first free ndx or the index of the + * current pointer into the duplicate set. + */ + hk = H_PAIRDATA(hcp->pagep, hcp->bndx); + new_size = DUP_SIZE(nval->size) - del_len + LEN_HKEYDATA(hcp->pagep, + hashp->hdr->pagesize, H_DATAINDEX(hcp->bndx)); + + /* + * We convert to off-page duplicates if the item is a big item, + * the addition of the new item will make the set large, or + * if there isn't enough room on this page to add the next item. + */ + if (hk->type != H_OFFDUP && + (hk->type == H_OFFPAGE || ISBIG(hashp, new_size) || + DUP_SIZE(nval->size) - del_len > P_FREESPACE(hcp->pagep))) { + + if ((ret = __ham_dup_convert(hashp, hcp)) != 0) + return (ret); + else + hk = H_PAIRDATA(hcp->pagep, hcp->bndx); + } + + /* There are two separate cases here: on page and off page. */ + if (hk->type != H_OFFDUP) { + if (hk->type != H_DUPLICATE) { + hk->type = H_DUPLICATE; + pval.flags = 0; + pval.data = hk->data; + pval.size = LEN_HDATA(hcp->pagep, hashp->hdr->pagesize, + hcp->bndx); + if ((ret = __ham_make_dup(&pval, &tmp_val, &hcp->big_data, + &hcp->big_datalen)) != 0 || + (ret = __ham_replpair(hashp, hcp, &tmp_val, 1)) != 0) + return (ret); + } + + /* Now make the new entry a duplicate. */ + if ((ret = __ham_make_dup(nval, + &tmp_val, &hcp->big_data, &hcp->big_datalen)) != 0) + return (ret); + + tmp_val.dlen = 0; + switch (flags) { /* On page. */ + case DB_KEYFIRST: + tmp_val.doff = 0; + break; + case DB_KEYLAST: + tmp_val.doff = LEN_HDATA(hcp->pagep, + hashp->hdr->pagesize, hcp->bndx); + break; + case DB_CURRENT: + tmp_val.doff = hcp->dup_off; + tmp_val.dlen = DUP_SIZE(hcp->dup_len); + break; + case DB_BEFORE: + tmp_val.doff = hcp->dup_off; + break; + case DB_AFTER: + tmp_val.doff = hcp->dup_off + DUP_SIZE(hcp->dup_len); + break; + } + /* Add the duplicate. */ + ret = __ham_replpair(hashp, hcp, &tmp_val, 0); + if (ret == 0) + ret = __ham_dirty_page(hashp, hcp->pagep); + __ham_c_update(hashp, hcp, hcp->pgno, tmp_val.size, 1, 1); + return (ret); + } + + /* If we get here, then we're on duplicate pages. */ + if (hcp->dpgno == PGNO_INVALID) { + memcpy(&hcp->dpgno, + (u_int8_t *)hk + SSZ(HOFFDUP, pgno), sizeof(db_pgno_t)); + hcp->dndx = 0; + } + + switch (flags) { + case DB_KEYFIRST: + /* + * The only way that we are already on a dup page is + * if we just converted the on-page representation. + * In that case, we've only got one page of duplicates. + */ + if (hcp->dpagep == NULL && (ret = + __db_dend(hashp->dbp, hcp->dpgno, &hcp->dpagep)) != 0) + return (ret); + hcp->dndx = 0; + break; + case DB_KEYLAST: + if (hcp->dpagep == NULL && (ret = + __db_dend(hashp->dbp, hcp->dpgno, &hcp->dpagep)) != 0) + return (ret); + hcp->dpgno = PGNO(hcp->dpagep); + hcp->dndx = NUM_ENT(hcp->dpagep); + break; + case DB_CURRENT: + if ((ret = __db_ditem(hashp->dbp, hcp->dpagep, hcp->dndx, + BKEYDATA_SIZE(GET_BKEYDATA(hcp->dpagep, hcp->dndx)->len))) + != 0) + return (ret); + break; + case DB_BEFORE: /* The default behavior is correct. */ + break; + case DB_AFTER: + hcp->dndx++; + break; + } + + ret = __db_dput(hashp->dbp, + nval, &hcp->dpagep, &hcp->dndx, __ham_overflow_page); + hcp->pgno = PGNO(hcp->pagep); + __ham_c_update(hashp, hcp, hcp->pgno, nval->size, 1, 1); + return (ret); +} + +/* + * Convert an on-page set of duplicates to an offpage set of duplicates. + */ +static int +__ham_dup_convert(hashp, hcp) + HTAB *hashp; + HASH_CURSOR *hcp; +{ + BOVERFLOW bo; + DBT dbt; + HOFFPAGE ho; + db_indx_t dndx, len; + int ret; + u_int8_t *p, *pend; + + /* + * Create a new page for the duplicates. + */ + if ((ret = + __ham_overflow_page(hashp->dbp, P_DUPLICATE, &hcp->dpagep)) != 0) + return (ret); + hcp->dpagep->type = P_DUPLICATE; + hcp->dpgno = PGNO(hcp->dpagep); + + /* + * Now put the duplicates onto the new page. + */ + dbt.flags = 0; + switch (((HKEYDATA *)H_PAIRDATA(hcp->pagep, hcp->bndx))->type) { + case H_KEYDATA: + /* Simple case, one key on page; move it to dup page. */ + dndx = 0; + dbt.size = + LEN_HDATA(hcp->pagep, hashp->hdr->pagesize, hcp->bndx); + dbt.data = + ((HKEYDATA *)H_PAIRDATA(hcp->pagep, hcp->bndx))->data; + ret = __db_pitem(hashp->dbp, hcp->dpagep, + (u_int32_t)dndx, BKEYDATA_SIZE(dbt.size), NULL, &dbt); + if (ret == 0) + __ham_dirty_page(hashp, hcp->dpagep); + break; + case H_OFFPAGE: + /* Simple case, one key on page; move it to dup page. */ + dndx = 0; + memcpy(&ho, + P_ENTRY(hcp->pagep, H_DATAINDEX(hcp->bndx)), HOFFPAGE_SIZE); + bo.deleted = 0; + bo.type = ho.type; + bo.pgno = ho.pgno; + bo.tlen = ho.tlen; + dbt.size = BOVERFLOW_SIZE; + dbt.data = &bo; + + ret = __db_pitem(hashp->dbp, hcp->dpagep, + (u_int32_t)dndx, dbt.size, &dbt, NULL); + if (ret == 0) + __ham_dirty_page(hashp, hcp->dpagep); + break; + case H_DUPLICATE: + p = ((HKEYDATA *)H_PAIRDATA(hcp->pagep, hcp->bndx))->data; + pend = p + + LEN_HDATA(hcp->pagep, hashp->hdr->pagesize, hcp->bndx); + + for (dndx = 0; p < pend; dndx++) { + memcpy(&len, p, sizeof(db_indx_t)); + dbt.size = len; + p += sizeof(db_indx_t); + dbt.data = p; + p += len + sizeof(db_indx_t); + ret = __db_dput(hashp->dbp, &dbt, + &hcp->dpagep, &dndx, __ham_overflow_page); + if (ret != 0) + break; + } + break; + default: + ret = __db_pgfmt(hashp->dbp, (u_long)hcp->pgno); + } + if (ret == 0) { + /* + * Now attach this to the source page in place of + * the old duplicate item. + */ + __ham_move_offpage(hashp, hcp->pagep, + (u_int32_t)H_DATAINDEX(hcp->bndx), hcp->dpgno); + + /* Can probably just do a "put" here. */ + ret = __ham_dirty_page(hashp, hcp->pagep); + } else { + (void)__ham_del_page(hashp->dbp, hcp->dpagep); + hcp->dpagep = NULL; + } + return (ret); +} + +static int +__ham_make_dup(notdup, dup, bufp, sizep) + const DBT *notdup; + DBT *dup; + void **bufp; + u_int32_t *sizep; +{ + db_indx_t tsize, item_size; + int ret; + u_int8_t *p; + + item_size = (db_indx_t)notdup->size; + tsize = DUP_SIZE(item_size); + if ((ret = __ham_init_dbt(dup, tsize, bufp, sizep)) != 0) + return (ret); + + dup->dlen = 0; + dup->flags = notdup->flags; + F_SET(dup, DB_DBT_PARTIAL); + + p = dup->data; + memcpy(p, &item_size, sizeof(db_indx_t)); + p += sizeof(db_indx_t); + memcpy(p, notdup->data, notdup->size); + p += notdup->size; + memcpy(p, &item_size, sizeof(db_indx_t)); + + dup->doff = 0; + dup->dlen = notdup->size; + + return (0); +} + +static int +__ham_check_move(hashp, hcp, add_len) + HTAB *hashp; + HASH_CURSOR *hcp; + int32_t add_len; +{ + DBT k, d; + DB_LSN new_lsn; + HKEYDATA *hk; + PAGE *next_pagep; + db_pgno_t next_pgno; + int rectype, ret; + u_int32_t new_datalen, old_len; + + /* + * Check if we can do whatever we need to on this page. If not, + * then we'll have to move the current element to a new page. + */ + + hk = H_PAIRDATA(hcp->pagep, hcp->bndx); + + /* + * If the item is already off page duplicates or an offpage item, + * then we know we can do whatever we need to do in-place + */ + if (hk->type == H_OFFDUP || hk->type == H_OFFPAGE) + return (0); + + old_len = + LEN_HITEM(hcp->pagep, hashp->hdr->pagesize, H_DATAINDEX(hcp->bndx)); + new_datalen = old_len - HKEYDATA_SIZE(0) + add_len; + + /* + * We need to add a new page under two conditions: + * 1. The addition makes the total data length cross the BIG + * threshold and the OFFDUP structure won't fit on this page. + * 2. The addition does not make the total data cross the + * threshold, but the new data won't fit on the page. + * If neither of these is true, then we can return. + */ + if (ISBIG(hashp, new_datalen) && (old_len > HOFFDUP_SIZE || + HOFFDUP_SIZE - old_len <= P_FREESPACE(hcp->pagep))) + return (0); + + if (!ISBIG(hashp, new_datalen) && + add_len <= (int32_t)P_FREESPACE(hcp->pagep)) + return (0); + + /* + * If we get here, then we need to move the item to a new page. + * Check if there are more pages in the chain. + */ + + new_datalen = ISBIG(hashp, new_datalen) ? + HOFFDUP_SIZE : HKEYDATA_SIZE(new_datalen); + + next_pagep = NULL; + for (next_pgno = NEXT_PGNO(hcp->pagep); next_pgno != PGNO_INVALID; + next_pgno = NEXT_PGNO(next_pagep)) { + if (next_pagep != NULL && + (ret = __ham_put_page(hashp->dbp, next_pagep, 0)) != 0) + return (ret); + + if ((ret = __ham_get_page(hashp->dbp, next_pgno, &next_pagep)) != 0) + return (ret); + + if (P_FREESPACE(next_pagep) >= new_datalen) + break; + } + + /* No more pages, add one. */ + if (next_pagep == NULL && + (ret = __ham_add_ovflpage(hashp, hcp->pagep, 0, &next_pagep)) != 0) + return (ret); + + /* Add new page at the end of the chain. */ + if (P_FREESPACE(next_pagep) < new_datalen && + (ret = __ham_add_ovflpage(hashp, next_pagep, 1, &next_pagep)) != 0) + return (ret); + + /* Copy the item to the new page. */ + if (DB_LOGGING(hashp->dbp)) { + rectype = PUTPAIR; + k.flags = 0; + d.flags = 0; + if (H_PAIRKEY(hcp->pagep, hcp->bndx)->type == H_OFFPAGE) { + rectype |= PAIR_KEYMASK; + k.data = H_PAIRKEY(hcp->pagep, hcp->bndx); + k.size = HOFFPAGE_SIZE; + } else { + k.data = H_PAIRKEY(hcp->pagep, hcp->bndx)->data; + k.size = LEN_HKEY(hcp->pagep, + hashp->hdr->pagesize, hcp->bndx); + } + + if (hk->type == H_OFFPAGE) { + rectype |= PAIR_DATAMASK; + d.data = H_PAIRDATA(hcp->pagep, hcp->bndx); + d.size = HOFFPAGE_SIZE; + } else { + d.data = H_PAIRDATA(hcp->pagep, hcp->bndx)->data; + d.size = LEN_HDATA(hcp->pagep, + hashp->hdr->pagesize, hcp->bndx); + } + + + if ((ret = __ham_insdel_log(hashp->dbp->dbenv->lg_info, + (DB_TXN *)hashp->dbp->txn, &new_lsn, 0, rectype, + hashp->dbp->log_fileid, PGNO(next_pagep), + (u_int32_t)H_NUMPAIRS(next_pagep), &LSN(next_pagep), + &k, &d)) != 0) + return (ret); + + /* Move lsn onto page. */ + LSN(next_pagep) = new_lsn; /* Structure assignment. */ + } + + __ham_copy_item(hashp, hcp->pagep, H_KEYINDEX(hcp->bndx), next_pagep); + __ham_copy_item(hashp, hcp->pagep, H_DATAINDEX(hcp->bndx), next_pagep); + + /* Now delete the pair from the current page. */ + ret = __ham_del_pair(hashp, hcp); + + (void)__ham_put_page(hashp->dbp, hcp->pagep, 1); + hcp->pagep = next_pagep; + hcp->pgno = PGNO(hcp->pagep); + hcp->bndx = H_NUMPAIRS(hcp->pagep) - 1; + F_SET(hcp, H_EXPAND); + return (ret); +} + +/* + * Replace an onpage set of duplicates with the OFFDUP structure that + * references the duplicate page. + * XXX This is really just a special case of __onpage_replace; we should + * probably combine them. + * PUBLIC: void __ham_move_offpage __P((HTAB *, PAGE *, u_int32_t, db_pgno_t)); + */ +void +__ham_move_offpage(hashp, pagep, ndx, pgno) + HTAB *hashp; + PAGE *pagep; + u_int32_t ndx; + db_pgno_t pgno; +{ + DBT new_dbt; + DBT old_dbt; + HOFFDUP od; + db_indx_t i; + int32_t shrink; + u_int8_t *src; + + od.type = H_OFFDUP; + od.pgno = pgno; + + if (DB_LOGGING(hashp->dbp)) { + new_dbt.data = &od; + new_dbt.size = HOFFDUP_SIZE; + old_dbt.data = P_ENTRY(pagep, ndx); + old_dbt.size = LEN_HITEM(pagep, hashp->hdr->pagesize, ndx); + (void)__ham_replace_log(hashp->dbp->dbenv->lg_info, + (DB_TXN *)hashp->dbp->txn, &LSN(pagep), 0, + hashp->dbp->log_fileid, PGNO(pagep), (u_int32_t)ndx, + &LSN(pagep), -1, &old_dbt, &new_dbt, 0); + } + + shrink = + LEN_HITEM(pagep, hashp->hdr->pagesize, ndx) - HOFFDUP_SIZE; + + if (shrink != 0) { + /* Copy data. */ + src = (u_int8_t *)(pagep) + HOFFSET(pagep); + memmove(src + shrink, src, pagep->inp[ndx] - HOFFSET(pagep)); + HOFFSET(pagep) += shrink; + + /* Update index table. */ + for (i = ndx; i < NUM_ENT(pagep); i++) + pagep->inp[i] += shrink; + } + + /* Now copy the offdup entry onto the page. */ + memcpy(P_ENTRY(pagep, ndx), &od, HOFFDUP_SIZE); +} |