diff options
Diffstat (limited to 'db2/lock/lock.c')
-rw-r--r-- | db2/lock/lock.c | 666 |
1 files changed, 28 insertions, 638 deletions
diff --git a/db2/lock/lock.c b/db2/lock/lock.c index 0846d3c29f..3d20e0d65b 100644 --- a/db2/lock/lock.c +++ b/db2/lock/lock.c @@ -1,28 +1,21 @@ /*- * See the file LICENSE for redistribution information. * - * Copyright (c) 1996, 1997 + * Copyright (c) 1996, 1997, 1998 * Sleepycat Software. All rights reserved. */ #include "config.h" #ifndef lint -static const char sccsid[] = "@(#)lock.c 10.43 (Sleepycat) 1/8/98"; +static const char sccsid[] = "@(#)lock.c 10.52 (Sleepycat) 5/10/98"; #endif /* not lint */ #ifndef NO_SYSTEM_INCLUDES #include <sys/types.h> -#include <sys/mman.h> -#include <sys/stat.h> #include <errno.h> -#include <fcntl.h> -#include <stddef.h> -#include <stdio.h> -#include <stdlib.h> #include <string.h> -#include <unistd.h> #endif #include "db_int.h" @@ -34,248 +27,15 @@ static const char sccsid[] = "@(#)lock.c 10.43 (Sleepycat) 1/8/98"; #include "db_am.h" static void __lock_checklocker __P((DB_LOCKTAB *, struct __db_lock *, int)); -static int __lock_count_locks __P((DB_LOCKREGION *)); -static int __lock_count_objs __P((DB_LOCKREGION *)); -static int __lock_create __P((const char *, int, DB_ENV *)); static void __lock_freeobj __P((DB_LOCKTAB *, DB_LOCKOBJ *)); -static int __lock_get_internal __P((DB_LOCKTAB *, u_int32_t, int, const DBT *, - db_lockmode_t, struct __db_lock **)); -static int __lock_grow_region __P((DB_LOCKTAB *, int, size_t)); +static int __lock_get_internal __P((DB_LOCKTAB *, u_int32_t, u_int32_t, + const DBT *, db_lockmode_t, struct __db_lock **)); static int __lock_put_internal __P((DB_LOCKTAB *, struct __db_lock *, int)); static void __lock_remove_waiter __P((DB_LOCKTAB *, DB_LOCKOBJ *, struct __db_lock *, db_status_t)); -static void __lock_reset_region __P((DB_LOCKTAB *)); -static int __lock_validate_region __P((DB_LOCKTAB *)); -#ifdef DEBUG -static void __lock_dump_locker __P((DB_LOCKTAB *, DB_LOCKOBJ *)); -static void __lock_dump_object __P((DB_LOCKTAB *, DB_LOCKOBJ *)); -static void __lock_printlock __P((DB_LOCKTAB *, struct __db_lock *, int)); -#endif - -/* - * Create and initialize a lock region in shared memory. - */ - -/* - * __lock_create -- - * Create the lock region. Returns an errno. In most cases, - * the errno should be that returned by __db_ropen, in which case - * an EAGAIN means that we should retry, and an EEXIST means that - * the region exists and we didn't need to create it. Any other - * sort of errno should be treated as a system error, leading to a - * failure of the original interface call. - */ -static int -__lock_create(path, mode, dbenv) - const char *path; - int mode; - DB_ENV *dbenv; -{ - struct __db_lock *lp; - struct lock_header *tq_head; - struct obj_header *obj_head; - DB_LOCKOBJ *op; - DB_LOCKREGION *lrp; - u_int maxlocks; - u_int32_t i; - int fd, lock_modes, nelements, ret; - const u_int8_t *conflicts; - u_int8_t *curaddr; - - maxlocks = dbenv == NULL || dbenv->lk_max == 0 ? - DB_LOCK_DEFAULT_N : dbenv->lk_max; - lock_modes = dbenv == NULL || dbenv->lk_modes == 0 ? - DB_LOCK_RW_N : dbenv->lk_modes; - conflicts = dbenv == NULL || dbenv->lk_conflicts == NULL ? - db_rw_conflicts : dbenv->lk_conflicts; - - if ((ret = - __db_rcreate(dbenv, DB_APP_NONE, path, DB_DEFAULT_LOCK_FILE, mode, - LOCK_REGION_SIZE(lock_modes, maxlocks, __db_tablesize(maxlocks)), - 0, &fd, &lrp)) != 0) - return (ret); - - /* Region exists; now initialize it. */ - lrp->table_size = __db_tablesize(maxlocks); - lrp->magic = DB_LOCKMAGIC; - lrp->version = DB_LOCKVERSION; - lrp->id = 0; - lrp->maxlocks = maxlocks; - lrp->need_dd = 0; - lrp->detect = DB_LOCK_NORUN; - lrp->numobjs = maxlocks; - lrp->nlockers = 0; - lrp->mem_bytes = ALIGN(STRING_SIZE(maxlocks), sizeof(size_t)); - lrp->increment = lrp->hdr.size / 2; - lrp->nmodes = lock_modes; - lrp->nconflicts = 0; - lrp->nrequests = 0; - lrp->nreleases = 0; - lrp->ndeadlocks = 0; - - /* - * As we write the region, we've got to maintain the alignment - * for the structures that follow each chunk. This information - * ends up being encapsulated both in here as well as in the - * lock.h file for the XXX_SIZE macros. - */ - /* Initialize conflict matrix. */ - curaddr = (u_int8_t *)lrp + sizeof(DB_LOCKREGION); - memcpy(curaddr, conflicts, lock_modes * lock_modes); - curaddr += lock_modes * lock_modes; - - /* - * Initialize hash table. - */ - curaddr = (u_int8_t *)ALIGNP(curaddr, LOCK_HASH_ALIGN); - lrp->hash_off = curaddr - (u_int8_t *)lrp; - nelements = lrp->table_size; - __db_hashinit(curaddr, nelements); - curaddr += nelements * sizeof(DB_HASHTAB); - - /* - * Initialize locks onto a free list. Since locks contains mutexes, - * we need to make sure that each lock is aligned on a MUTEX_ALIGNMENT - * boundary. - */ - curaddr = (u_int8_t *)ALIGNP(curaddr, MUTEX_ALIGNMENT); - tq_head = &lrp->free_locks; - SH_TAILQ_INIT(tq_head); - - for (i = 0; i++ < maxlocks; - curaddr += ALIGN(sizeof(struct __db_lock), MUTEX_ALIGNMENT)) { - lp = (struct __db_lock *)curaddr; - lp->status = DB_LSTAT_FREE; - SH_TAILQ_INSERT_HEAD(tq_head, lp, links, __db_lock); - } - - /* Initialize objects onto a free list. */ - obj_head = &lrp->free_objs; - SH_TAILQ_INIT(obj_head); - - for (i = 0; i++ < maxlocks; curaddr += sizeof(DB_LOCKOBJ)) { - op = (DB_LOCKOBJ *)curaddr; - SH_TAILQ_INSERT_HEAD(obj_head, op, links, __db_lockobj); - } - - /* - * Initialize the string space; as for all shared memory allocation - * regions, this requires size_t alignment, since we store the - * lengths of malloc'd areas in the area.. - */ - curaddr = (u_int8_t *)ALIGNP(curaddr, sizeof(size_t)); - lrp->mem_off = curaddr - (u_int8_t *)lrp; - __db_shalloc_init(curaddr, lrp->mem_bytes); - - /* Release the lock. */ - (void)__db_mutex_unlock(&lrp->hdr.lock, fd); - - /* Now unmap the region. */ - if ((ret = __db_rclose(dbenv, fd, lrp)) != 0) { - (void)lock_unlink(path, 1 /* force */, dbenv); - return (ret); - } - - return (0); -} int -lock_open(path, flags, mode, dbenv, ltp) - const char *path; - int flags, mode; - DB_ENV *dbenv; - DB_LOCKTAB **ltp; -{ - DB_LOCKTAB *lt; - int ret, retry_cnt; - - /* Validate arguments. */ -#ifdef HAVE_SPINLOCKS -#define OKFLAGS (DB_CREATE | DB_THREAD) -#else -#define OKFLAGS (DB_CREATE) -#endif - if ((ret = __db_fchk(dbenv, "lock_open", flags, OKFLAGS)) != 0) - return (ret); - - /* - * Create the lock table structure. - */ - if ((lt = (DB_LOCKTAB *)__db_calloc(1, sizeof(DB_LOCKTAB))) == NULL) { - __db_err(dbenv, "%s", strerror(ENOMEM)); - return (ENOMEM); - } - lt->dbenv = dbenv; - - /* - * Now, create the lock region if it doesn't already exist. - */ - retry_cnt = 0; -retry: if (LF_ISSET(DB_CREATE) && - (ret = __lock_create(path, mode, dbenv)) != 0) - if (ret == EAGAIN && ++retry_cnt < 3) { - (void)__db_sleep(1, 0); - goto retry; - } else if (ret == EEXIST) /* We did not create the region */ - LF_CLR(DB_CREATE); - else - goto out; - - /* - * Finally, open the region, map it in, and increment the - * reference count. - */ - retry_cnt = 0; -retry1: if ((ret = __db_ropen(dbenv, DB_APP_NONE, path, DB_DEFAULT_LOCK_FILE, - LF_ISSET(~(DB_CREATE | DB_THREAD)), <->fd, <->region)) != 0) { - if (ret == EAGAIN && ++retry_cnt < 3) { - (void)__db_sleep(1, 0); - goto retry1; - } - goto out; - } - - if (lt->region->magic != DB_LOCKMAGIC) { - __db_err(dbenv, "lock_open: Bad magic number"); - ret = EINVAL; - goto out; - } - - /* Check for automatic deadlock detection. */ - if (dbenv->lk_detect != DB_LOCK_NORUN) { - if (lt->region->detect != DB_LOCK_NORUN && - dbenv->lk_detect != DB_LOCK_DEFAULT && - lt->region->detect != dbenv->lk_detect) { - __db_err(dbenv, - "lock_open: incompatible deadlock detector mode"); - ret = EINVAL; - goto out; - } - if (lt->region->detect == DB_LOCK_NORUN) - lt->region->detect = dbenv->lk_detect; - } - - /* Set up remaining pointers into region. */ - lt->conflicts = (u_int8_t *)lt->region + sizeof(DB_LOCKREGION); - lt->hashtab = - (DB_HASHTAB *)((u_int8_t *)lt->region + lt->region->hash_off); - lt->mem = (void *)((u_int8_t *)lt->region + lt->region->mem_off); - lt->reg_size = lt->region->hdr.size; - - *ltp = lt; - return (0); - -/* Error handling. */ -out: if (lt->region != NULL) - (void)__db_rclose(lt->dbenv, lt->fd, lt->region); - if (LF_ISSET(DB_CREATE)) - (void)lock_unlink(path, 1, lt->dbenv); - __db_free(lt); - return (ret); -} - -int -lock_id (lt, idp) +lock_id(lt, idp) DB_LOCKTAB *lt; u_int32_t *idp; { @@ -294,8 +54,8 @@ lock_id (lt, idp) int lock_vec(lt, locker, flags, list, nlist, elistp) DB_LOCKTAB *lt; - u_int32_t locker; - int flags, nlist; + u_int32_t locker, flags; + int nlist; DB_LOCKREQ *list, **elistp; { struct __db_lock *lp; @@ -345,7 +105,7 @@ lock_vec(lt, locker, flags, list, nlist, elistp) for (lp = SH_LIST_FIRST(&sh_locker->heldby, __db_lock); lp != NULL; lp = SH_LIST_FIRST(&sh_locker->heldby, __db_lock)) { - if ((ret = __lock_put_internal(lt, lp, 0)) != 0) + if ((ret = __lock_put_internal(lt, lp, 1)) != 0) break; } __lock_freeobj(lt, sh_locker); @@ -436,8 +196,7 @@ lock_vec(lt, locker, flags, list, nlist, elistp) int lock_get(lt, locker, flags, obj, lock_mode, lock) DB_LOCKTAB *lt; - u_int32_t locker; - int flags; + u_int32_t locker, flags; const DBT *obj; db_lockmode_t lock_mode; DB_LOCK *lock; @@ -496,35 +255,6 @@ lock_put(lt, lock) return (ret); } -int -lock_close(lt) - DB_LOCKTAB *lt; -{ - int ret; - - if ((ret = __db_rclose(lt->dbenv, lt->fd, lt->region)) != 0) - return (ret); - - /* Free lock table. */ - __db_free(lt); - return (0); -} - -int -lock_unlink(path, force, dbenv) - const char *path; - int force; - DB_ENV *dbenv; -{ - return (__db_runlink(dbenv, - DB_APP_NONE, path, DB_DEFAULT_LOCK_FILE, force)); -} - -/* - * XXX This looks like it could be void, but I'm leaving it returning - * an int because I think it will have to when we go through and add - * the appropriate error checking for the EINTR on mutexes. - */ static int __lock_put_internal(lt, lockp, do_all) DB_LOCKTAB *lt; @@ -593,7 +323,7 @@ __lock_put_internal(lt, lockp, do_all) SH_TAILQ_INSERT_TAIL(&sh_obj->holders, lp_w, links); /* Wake up waiter. */ - (void)__db_mutex_unlock(&lp_w->mutex, lt->fd); + (void)__db_mutex_unlock(&lp_w->mutex, lt->reginfo.fd); state_changed = 1; } @@ -626,8 +356,7 @@ __lock_put_internal(lt, lockp, do_all) static int __lock_get_internal(lt, locker, flags, obj, lock_mode, lockp) DB_LOCKTAB *lt; - u_int32_t locker; - int flags; + u_int32_t locker, flags; const DBT *obj; db_lockmode_t lock_mode; struct __db_lock **lockp; @@ -741,7 +470,7 @@ __lock_get_internal(lt, locker, flags, obj, lock_mode, lockp) */ (void)__db_mutex_init(&newl->mutex, MUTEX_LOCK_OFFSET(lt->region, &newl->mutex)); - (void)__db_mutex_lock(&newl->mutex, lt->fd); + (void)__db_mutex_lock(&newl->mutex, lt->reginfo.fd); /* * Now, insert the lock onto its locker's list. @@ -772,7 +501,7 @@ __lock_get_internal(lt, locker, flags, obj, lock_mode, lockp) if (lrp->detect != DB_LOCK_NORUN) ret = lock_detect(lt, 0, lrp->detect); - (void)__db_mutex_lock(&newl->mutex, lt->fd); + (void)__db_mutex_lock(&newl->mutex, lt->reginfo.fd); LOCK_LOCKREGION(lt); if (newl->status != DB_LSTAT_PENDING) { @@ -802,306 +531,6 @@ __lock_get_internal(lt, locker, flags, obj, lock_mode, lockp) } /* - * This is called at every interface to verify if the region - * has changed size, and if so, to remap the region in and - * reset the process pointers. - */ -static int -__lock_validate_region(lt) - DB_LOCKTAB *lt; -{ - int ret; - - if (lt->reg_size == lt->region->hdr.size) - return (0); - - /* Grow the region. */ - if ((ret = __db_rremap(lt->dbenv, lt->region, - lt->reg_size, lt->region->hdr.size, lt->fd, <->region)) != 0) - return (ret); - - __lock_reset_region(lt); - - return (0); -} - -/* - * We have run out of space; time to grow the region. - */ -static int -__lock_grow_region(lt, which, howmuch) - DB_LOCKTAB *lt; - int which; - size_t howmuch; -{ - struct __db_lock *newl; - struct lock_header *lock_head; - struct obj_header *obj_head; - DB_LOCKOBJ *op; - DB_LOCKREGION *lrp; - float lock_ratio, obj_ratio; - size_t incr, oldsize, used; - u_int32_t i, newlocks, newmem, newobjs; - int ret, usedlocks, usedmem, usedobjs; - u_int8_t *curaddr; - - lrp = lt->region; - oldsize = lrp->hdr.size; - incr = lrp->increment; - - /* Figure out how much of each sort of space we have. */ - usedmem = lrp->mem_bytes - __db_shalloc_count(lt->mem); - usedobjs = lrp->numobjs - __lock_count_objs(lrp); - usedlocks = lrp->maxlocks - __lock_count_locks(lrp); - - /* - * Figure out what fraction of the used space belongs to each - * different type of "thing" in the region. Then partition the - * new space up according to this ratio. - */ - used = usedmem + - usedlocks * ALIGN(sizeof(struct __db_lock), MUTEX_ALIGNMENT) + - usedobjs * sizeof(DB_LOCKOBJ); - - lock_ratio = usedlocks * - ALIGN(sizeof(struct __db_lock), MUTEX_ALIGNMENT) / (float)used; - obj_ratio = usedobjs * sizeof(DB_LOCKOBJ) / (float)used; - - newlocks = (u_int32_t)(lock_ratio * - incr / ALIGN(sizeof(struct __db_lock), MUTEX_ALIGNMENT)); - newobjs = (u_int32_t)(obj_ratio * incr / sizeof(DB_LOCKOBJ)); - newmem = incr - - (newobjs * sizeof(DB_LOCKOBJ) + - newlocks * ALIGN(sizeof(struct __db_lock), MUTEX_ALIGNMENT)); - - /* - * Make sure we allocate enough memory for the object being - * requested. - */ - switch (which) { - case DB_LOCK_LOCK: - if (newlocks == 0) { - newlocks = 10; - incr += newlocks * sizeof(struct __db_lock); - } - break; - case DB_LOCK_OBJ: - if (newobjs == 0) { - newobjs = 10; - incr += newobjs * sizeof(DB_LOCKOBJ); - } - break; - case DB_LOCK_MEM: - if (newmem < howmuch * 2) { - incr += howmuch * 2 - newmem; - newmem = howmuch * 2; - } - break; - } - - newmem += ALIGN(incr, sizeof(size_t)) - incr; - incr = ALIGN(incr, sizeof(size_t)); - - /* - * Since we are going to be allocating locks at the beginning of the - * new chunk, we need to make sure that the chunk is MUTEX_ALIGNMENT - * aligned. We did not guarantee this when we created the region, so - * we may need to pad the old region by extra bytes to ensure this - * alignment. - */ - incr += ALIGN(oldsize, MUTEX_ALIGNMENT) - oldsize; - - __db_err(lt->dbenv, - "Growing lock region: %lu locks %lu objs %lu bytes", - (u_long)newlocks, (u_long)newobjs, (u_long)newmem); - - if ((ret = __db_rgrow(lt->dbenv, lt->fd, incr)) != 0) - return (ret); - if ((ret = __db_rremap(lt->dbenv, - lt->region, oldsize, oldsize + incr, lt->fd, <->region)) != 0) - return (ret); - __lock_reset_region(lt); - - /* Update region parameters. */ - lrp = lt->region; - lrp->increment = incr << 1; - lrp->maxlocks += newlocks; - lrp->numobjs += newobjs; - lrp->mem_bytes += newmem; - - curaddr = (u_int8_t *)lrp + oldsize; - curaddr = (u_int8_t *)ALIGNP(curaddr, MUTEX_ALIGNMENT); - - /* Put new locks onto the free list. */ - lock_head = &lrp->free_locks; - for (i = 0; i++ < newlocks; - curaddr += ALIGN(sizeof(struct __db_lock), MUTEX_ALIGNMENT)) { - newl = (struct __db_lock *)curaddr; - SH_TAILQ_INSERT_HEAD(lock_head, newl, links, __db_lock); - } - - /* Put new objects onto the free list. */ - obj_head = &lrp->free_objs; - for (i = 0; i++ < newobjs; curaddr += sizeof(DB_LOCKOBJ)) { - op = (DB_LOCKOBJ *)curaddr; - SH_TAILQ_INSERT_HEAD(obj_head, op, links, __db_lockobj); - } - - *((size_t *)curaddr) = newmem - sizeof(size_t); - curaddr += sizeof(size_t); - __db_shalloc_free(lt->mem, curaddr); - - return (0); -} - -#ifdef DEBUG -/* - * __lock_dump_region -- - * - * PUBLIC: void __lock_dump_region __P((DB_LOCKTAB *, u_int)); - */ -void -__lock_dump_region(lt, flags) - DB_LOCKTAB *lt; - u_int flags; -{ - struct __db_lock *lp; - DB_LOCKOBJ *op; - DB_LOCKREGION *lrp; - u_int32_t i, j; - - lrp = lt->region; - - printf("Lock region parameters\n"); - printf("%s:0x%x\t%s:%lu\t%s:%lu\t%s:%lu\n%s:%lu\t%s:%lu\t%s:%lu\t\n", - "magic ", lrp->magic, - "version ", (u_long)lrp->version, - "processes ", (u_long)lrp->hdr.refcnt, - "maxlocks ", (u_long)lrp->maxlocks, - "table size ", (u_long)lrp->table_size, - "nmodes ", (u_long)lrp->nmodes, - "numobjs ", (u_long)lrp->numobjs); - printf("%s:%lu\t%s:%lu\t%s:%lu\n%s:%lu\t%s:%lu\t%s:%lu\n", - "size ", (u_long)lrp->hdr.size, - "nlockers ", (u_long)lrp->nlockers, - "hash_off ", (u_long)lrp->hash_off, - "increment ", (u_long)lrp->increment, - "mem_off ", (u_long)lrp->mem_off, - "mem_bytes ", (u_long)lrp->mem_bytes); -#ifndef HAVE_SPINLOCKS - printf("Mutex: off %lu", (u_long)lrp->hdr.lock.off); -#endif - printf(" waits %lu nowaits %lu", - (u_long)lrp->hdr.lock.mutex_set_wait, - (u_long)lrp->hdr.lock.mutex_set_nowait); - printf("\n%s:%lu\t%s:%lu\t%s:%lu\t%s:%lu\n", - "nconflicts ", (u_long)lrp->nconflicts, - "nrequests ", (u_long)lrp->nrequests, - "nreleases ", (u_long)lrp->nreleases, - "ndeadlocks ", (u_long)lrp->ndeadlocks); - printf("need_dd %lu\n", (u_long)lrp->need_dd); - if (flags & LOCK_DEBUG_CONF) { - printf("\nConflict matrix\n"); - - for (i = 0; i < lrp->nmodes; i++) { - for (j = 0; j < lrp->nmodes; j++) - printf("%lu\t", - (u_long)lt->conflicts[i * lrp->nmodes + j]); - printf("\n"); - } - } - - for (i = 0; i < lrp->table_size; i++) { - op = SH_TAILQ_FIRST(<->hashtab[i], __db_lockobj); - if (op != NULL && flags & LOCK_DEBUG_BUCKET) - printf("Bucket %lu:\n", (unsigned long)i); - while (op != NULL) { - if (op->type == DB_LOCK_LOCKER && - flags & LOCK_DEBUG_LOCKERS) - __lock_dump_locker(lt, op); - else if (flags & LOCK_DEBUG_OBJECTS && - op->type == DB_LOCK_OBJTYPE) - __lock_dump_object(lt, op); - op = SH_TAILQ_NEXT(op, links, __db_lockobj); - } - } - - if (flags & LOCK_DEBUG_LOCK) { - printf("\nLock Free List\n"); - for (lp = SH_TAILQ_FIRST(&lrp->free_locks, __db_lock); - lp != NULL; - lp = SH_TAILQ_NEXT(lp, links, __db_lock)) { - printf("0x%x: %lu\t%lu\t%lu\t0x%x\n", (u_int)lp, - (u_long)lp->holder, (u_long)lp->mode, - (u_long)lp->status, (u_int)lp->obj); - } - } - - if (flags & LOCK_DEBUG_LOCK) { - printf("\nObject Free List\n"); - for (op = SH_TAILQ_FIRST(&lrp->free_objs, __db_lockobj); - op != NULL; - op = SH_TAILQ_NEXT(op, links, __db_lockobj)) - printf("0x%x\n", (u_int)op); - } - - if (flags & LOCK_DEBUG_MEM) { - printf("\nMemory Free List\n"); - __db_shalloc_dump(stdout, lt->mem); - } -} - -static void -__lock_dump_locker(lt, op) - DB_LOCKTAB *lt; - DB_LOCKOBJ *op; -{ - struct __db_lock *lp; - u_int32_t locker; - void *ptr; - - ptr = SH_DBT_PTR(&op->lockobj); - memcpy(&locker, ptr, sizeof(u_int32_t)); - printf("L %lx", (u_long)locker); - - lp = SH_LIST_FIRST(&op->heldby, __db_lock); - if (lp == NULL) { - printf("\n"); - return; - } - for (; lp != NULL; lp = SH_LIST_NEXT(lp, locker_links, __db_lock)) - __lock_printlock(lt, lp, 0); -} - -static void -__lock_dump_object(lt, op) - DB_LOCKTAB *lt; - DB_LOCKOBJ *op; -{ - struct __db_lock *lp; - u_int32_t j; - char *ptr; - - ptr = SH_DBT_PTR(&op->lockobj); - for (j = 0; j < op->lockobj.size; ptr++, j++) - printf("%c", (int)*ptr); - printf("\n"); - - printf("H:"); - for (lp = - SH_TAILQ_FIRST(&op->holders, __db_lock); - lp != NULL; - lp = SH_TAILQ_NEXT(lp, links, __db_lock)) - __lock_printlock(lt, lp, 0); - lp = SH_TAILQ_FIRST(&op->waiters, __db_lock); - if (lp != NULL) { - printf("\nW:"); - for (; lp != NULL; lp = SH_TAILQ_NEXT(lp, links, __db_lock)) - __lock_printlock(lt, lp, 0); - } -} - -/* * __lock_is_locked -- * * PUBLIC: int __lock_is_locked @@ -1136,7 +565,12 @@ __lock_is_locked(lt, locker, dbt, mode) return (0); } -static void +/* + * __lock_printlock -- + * + * PUBLIC: void __lock_printlock __P((DB_LOCKTAB *, struct __db_lock *, int)); + */ +void __lock_printlock(lt, lp, ispgno) DB_LOCKTAB *lt; struct __db_lock *lp; @@ -1213,39 +647,6 @@ __lock_printlock(lt, lp, ispgno) printf("\n"); } } -#endif - -static int -__lock_count_locks(lrp) - DB_LOCKREGION *lrp; -{ - struct __db_lock *newl; - int count; - - count = 0; - for (newl = SH_TAILQ_FIRST(&lrp->free_locks, __db_lock); - newl != NULL; - newl = SH_TAILQ_NEXT(newl, links, __db_lock)) - count++; - - return (count); -} - -static int -__lock_count_objs(lrp) - DB_LOCKREGION *lrp; -{ - DB_LOCKOBJ *obj; - int count; - - count = 0; - for (obj = SH_TAILQ_FIRST(&lrp->free_objs, __db_lockobj); - obj != NULL; - obj = SH_TAILQ_NEXT(obj, links, __db_lockobj)) - count++; - - return (count); -} /* * PUBLIC: int __lock_getobj __P((DB_LOCKTAB *, @@ -1354,19 +755,7 @@ __lock_remove_waiter(lt, sh_obj, lockp, status) lockp->status = status; /* Wake whoever is waiting on this lock. */ - (void)__db_mutex_unlock(&lockp->mutex, lt->fd); -} - -static void -__lock_freeobj(lt, obj) - DB_LOCKTAB *lt; - DB_LOCKOBJ *obj; -{ - HASHREMOVE_EL(lt->hashtab, - __db_lockobj, links, obj, lt->region->table_size, __lock_lhash); - if (obj->lockobj.size > sizeof(obj->objdata)) - __db_shalloc_free(lt->mem, SH_DBT_PTR(&obj->lockobj)); - SH_TAILQ_INSERT_HEAD(<->region->free_objs, obj, links, __db_lockobj); + (void)__db_mutex_unlock(&lockp->mutex, lt->reginfo.fd); } static void @@ -1384,17 +773,18 @@ __lock_checklocker(lt, lockp, do_remove) if (__lock_getobj(lt, lockp->holder, NULL, DB_LOCK_LOCKER, &sh_locker) == 0 && SH_LIST_FIRST(&sh_locker->heldby, __db_lock) == NULL) { __lock_freeobj(lt, sh_locker); - lt->region->nlockers--; + lt->region->nlockers--; } } static void -__lock_reset_region(lt) +__lock_freeobj(lt, obj) DB_LOCKTAB *lt; + DB_LOCKOBJ *obj; { - lt->conflicts = (u_int8_t *)lt->region + sizeof(DB_LOCKREGION); - lt->hashtab = - (DB_HASHTAB *)((u_int8_t *)lt->region + lt->region->hash_off); - lt->mem = (void *)((u_int8_t *)lt->region + lt->region->mem_off); - lt->reg_size = lt->region->hdr.size; + HASHREMOVE_EL(lt->hashtab, + __db_lockobj, links, obj, lt->region->table_size, __lock_lhash); + if (obj->lockobj.size > sizeof(obj->objdata)) + __db_shalloc_free(lt->mem, SH_DBT_PTR(&obj->lockobj)); + SH_TAILQ_INSERT_HEAD(<->region->free_objs, obj, links, __db_lockobj); } |