summary refs log tree commit diff
path: root/db2/include/mp.h
blob: 4efbf9b95e5e4f6d7e4e91b7e1a36fd4052504f7 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
/*-
 * See the file LICENSE for redistribution information.
 *
 * Copyright (c) 1996, 1997
 *	Sleepycat Software.  All rights reserved.
 *
 *	@(#)mp.h	10.25 (Sleepycat) 1/8/98
 */

struct __bh;		typedef struct __bh BH;
struct __db_mpreg;	typedef struct __db_mpreg DB_MPREG;
struct __mpool;		typedef struct __mpool MPOOL;
struct __mpoolfile;	typedef struct __mpoolfile MPOOLFILE;

					/* Default mpool name. */
#define	DB_DEFAULT_MPOOL_FILE	"__db_mpool.share"

/*
 *  We default to 128K (16 8K pages) if the user doesn't specify, and
 * require a minimum of 20K.
 */
#define	DB_CACHESIZE_DEF	(128 * 1024)
#define	DB_CACHESIZE_MIN	( 20 * 1024)

#define	INVALID		0		/* Invalid shared memory offset. */

/*
 * There are three ways we do locking in the mpool code:
 *
 * Locking a handle mutex to provide concurrency for DB_THREAD operations.
 * Locking the region mutex to provide mutual exclusion while reading and
 *    writing structures in the shared region.
 * Locking buffer header mutexes during I/O.
 *
 * The first will not be further described here.  We use the shared mpool
 * region lock to provide mutual exclusion while reading/modifying all of
 * the data structures, including the buffer headers.  We use a per-buffer
 * header lock to wait on buffer I/O.  The order of locking is as follows:
 *
 * Searching for a buffer:
 *	Acquire the region lock.
 *	Find the buffer header.
 *	Increment the reference count (guarantee the buffer stays).
 *	While the BH_LOCKED flag is set (I/O is going on) {
 *	    Release the region lock.
 *		Explicitly yield the processor if it's not the first pass
 *		through this loop, otherwise, we can simply spin because
 *		we'll be simply switching between the two locks.
 *	    Request the buffer lock.
 *	    The I/O will complete...
 *	    Acquire the buffer lock.
 *	    Release the buffer lock.
 *	    Acquire the region lock.
 *	}
 *	Return the buffer.
 *
 * Reading/writing a buffer:
 *	Acquire the region lock.
 *	Find/create the buffer header.
 *	If reading, increment the reference count (guarantee the buffer stays).
 *	Set the BH_LOCKED flag.
 *	Acquire the buffer lock (guaranteed not to block).
 *	Release the region lock.
 *	Do the I/O and/or initialize the buffer contents.
 *	Release the buffer lock.
 *	    At this point, the buffer lock is available, but the logical
 *	    operation (flagged by BH_LOCKED) is not yet completed.  For
 *	    this reason, among others, threads checking the BH_LOCKED flag
 *	    must loop around their test.
 *	Acquire the region lock.
 *	Clear the BH_LOCKED flag.
 *	Release the region lock.
 *	Return/discard the buffer.
 *
 * Pointers to DB_MPOOL, MPOOL, DB_MPOOLFILE and MPOOLFILE structures are not
 * reacquired when a region lock is reacquired because they couldn't have been
 * closed/discarded and because they never move in memory.
 */
#define	LOCKINIT(dbmp, mutexp)						\
	if (F_ISSET(dbmp, MP_LOCKHANDLE | MP_LOCKREGION))		\
		(void)__db_mutex_init(mutexp,				\
		    MUTEX_LOCK_OFFSET((dbmp)->maddr, mutexp))

#define	LOCKHANDLE(dbmp, mutexp)					\
	if (F_ISSET(dbmp, MP_LOCKHANDLE))				\
		(void)__db_mutex_lock(mutexp, (dbmp)->fd)
#define	UNLOCKHANDLE(dbmp, mutexp)					\
	if (F_ISSET(dbmp, MP_LOCKHANDLE))				\
		(void)__db_mutex_unlock(mutexp, (dbmp)->fd)

#define	LOCKREGION(dbmp)						\
	if (F_ISSET(dbmp, MP_LOCKREGION))				\
		(void)__db_mutex_lock(&((RLAYOUT *)(dbmp)->mp)->lock,	\
		    (dbmp)->fd)
#define	UNLOCKREGION(dbmp)						\
	if (F_ISSET(dbmp, MP_LOCKREGION))				\
		(void)__db_mutex_unlock(&((RLAYOUT *)(dbmp)->mp)->lock,	\
		(dbmp)->fd)

#define	LOCKBUFFER(dbmp, bhp)						\
	if (F_ISSET(dbmp, MP_LOCKREGION))				\
		(void)__db_mutex_lock(&(bhp)->mutex, (dbmp)->fd)
#define	UNLOCKBUFFER(dbmp, bhp)						\
	if (F_ISSET(dbmp, MP_LOCKREGION))				\
		(void)__db_mutex_unlock(&(bhp)->mutex, (dbmp)->fd)

/*
 * DB_MPOOL --
 *	Per-process memory pool structure.
 */
struct __db_mpool {
/* These fields need to be protected for multi-threaded support. */
	db_mutex_t	*mutexp;	/* Structure lock. */

					/* List of pgin/pgout routines. */
	LIST_HEAD(__db_mpregh, __db_mpreg) dbregq;

					/* List of DB_MPOOLFILE's. */
	TAILQ_HEAD(__db_mpoolfileh, __db_mpoolfile) dbmfq;

/* These fields are not protected. */
	DB_ENV     *dbenv;		/* Reference to error information. */

	MPOOL	   *mp;			/* Address of the shared MPOOL. */

	void	   *maddr;		/* Address of mmap'd region. */
	void	   *addr;		/* Address of shalloc() region. */

	DB_HASHTAB *htab;		/* Hash table of bucket headers. */

	int	    fd;			/* Underlying mmap'd fd. */

#define	MP_ISPRIVATE	0x01		/* Private, so local memory. */
#define	MP_LOCKHANDLE	0x02		/* Threaded, lock handles and region. */
#define	MP_LOCKREGION	0x04		/* Concurrent access, lock region. */
#define	MP_MALLOC	0x08		/* If region in allocated memory. */
	u_int32_t  flags;
};

/*
 * DB_MPREG --
 *	DB_MPOOL registry of pgin/pgout functions.
 */
struct __db_mpreg {
	LIST_ENTRY(__db_mpreg) q;	/* Linked list. */

	int ftype;			/* File type. */
					/* Pgin, pgout routines. */
	int (*pgin) __P((db_pgno_t, void *, DBT *));
	int (*pgout) __P((db_pgno_t, void *, DBT *));
};

/*
 * DB_MPOOLFILE --
 *	Per-process DB_MPOOLFILE information.
 */
struct __db_mpoolfile {
/* These fields need to be protected for multi-threaded support. */
	db_mutex_t	*mutexp;	/* Structure lock. */

	int	   fd;			/* Underlying file descriptor. */

	u_int32_t pinref;		/* Pinned block reference count. */

/* These fields are not protected. */
	TAILQ_ENTRY(__db_mpoolfile) q;	/* Linked list of DB_MPOOLFILE's. */

	DB_MPOOL  *dbmp;		/* Overlying DB_MPOOL. */
	MPOOLFILE *mfp;			/* Underlying MPOOLFILE. */

	void	  *addr;		/* Address of mmap'd region. */
	size_t	   len;			/* Length of mmap'd region. */

/* These fields need to be protected for multi-threaded support. */
#define	MP_READONLY	0x01		/* File is readonly. */
#define	MP_UPGRADE	0x02		/* File descriptor is readwrite. */
#define	MP_UPGRADE_FAIL	0x04		/* Upgrade wasn't possible. */
	u_int32_t  flags;
};

/*
 * MPOOL --
 *	Shared memory pool region.  One of these is allocated in shared
 *	memory, and describes the pool.
 */
struct __mpool {
	RLAYOUT	    rlayout;		/* General region information. */

	SH_TAILQ_HEAD(__bhq) bhq;	/* LRU list of buckets. */
	SH_TAILQ_HEAD(__bhfq) bhfq;	/* Free buckets. */
	SH_TAILQ_HEAD(__mpfq) mpfq;	/* List of MPOOLFILEs. */

	/*
	 * We make the assumption that the early pages of the file are far
	 * more likely to be retrieved than the later pages, which means
	 * that the top bits are more interesting for hashing since they're
	 * less likely to collide.  On the other hand, since 512 4K pages
	 * represents a 2MB file, only the bottom 9 bits of the page number
	 * are likely to be set.  We XOR in the offset in the MPOOL of the
	 * MPOOLFILE that backs this particular page, since that should also
	 * be unique for the page.
	 */
#define	BUCKET(mp, mf_offset, pgno)					\
	(((pgno) ^ ((mf_offset) << 9)) % (mp)->htab_buckets)

	size_t	    htab;		/* Hash table offset. */
	size_t	    htab_buckets;	/* Number of hash table entries. */

	DB_LSN	    lsn;		/* Maximum checkpoint LSN. */
	int	    lsn_cnt;		/* Checkpoint buffers left to write. */

	DB_MPOOL_STAT stat;		/* Global mpool statistics. */

#define	MP_LSN_RETRY	0x01		/* Retry all BH_WRITE buffers. */
	u_int32_t  flags;
};

/*
 * MPOOLFILE --
 *	Shared DB_MPOOLFILE information.
 */
struct __mpoolfile {
	SH_TAILQ_ENTRY  q;		/* List of MPOOLFILEs */

	u_int32_t ref;			/* Reference count. */

	int	  ftype;		/* File type. */
	int	  lsn_off;		/* Page's LSN offset. */

	size_t	  path_off;		/* File name location. */
	size_t	  fileid_off;		/* File identification location. */

	size_t	  pgcookie_len;		/* Pgin/pgout cookie length. */
	size_t	  pgcookie_off;		/* Pgin/pgout cookie location. */

	int	  lsn_cnt;		/* Checkpoint buffers left to write. */

	db_pgno_t last_pgno;		/* Last page in the file. */

#define	MP_CAN_MMAP	0x01		/* If the file can be mmap'd. */
#define	MP_TEMP		0x02		/* Backing file is a temporary. */
	u_int32_t  flags;

	DB_MPOOL_FSTAT stat;		/* Per-file mpool statistics. */
};

/*
 * BH --
 *	Buffer header.
 */
struct __bh {
	db_mutex_t	mutex;		/* Structure lock. */

	u_int16_t	ref;		/* Reference count. */

#define	BH_CALLPGIN	0x001		/* Page needs to be reworked... */
#define	BH_DIRTY	0x002		/* Page was modified. */
#define	BH_DISCARD	0x004		/* Page is useless. */
#define	BH_LOCKED	0x008		/* Page is locked (I/O in progress). */
#define	BH_TRASH	0x010		/* Page is garbage. */
#define	BH_WRITE	0x020		/* Page scheduled for writing. */
	u_int16_t  flags;

	SH_TAILQ_ENTRY	q;		/* LRU queue. */
	SH_TAILQ_ENTRY	hq;		/* MPOOL hash bucket queue. */

	db_pgno_t pgno;			/* Underlying MPOOLFILE page number. */
	size_t	  mf_offset;		/* Associated MPOOLFILE offset. */

	/*
	 * !!!
	 * This array must be size_t aligned -- the DB access methods put PAGE
	 * and other structures into it, and expect to be able to access them
	 * directly.  (We guarantee size_t alignment in the db_mpool(3) manual
	 * page as well.)
	 */
	u_int8_t   buf[1];		/* Variable length data. */
};

#include "mp_ext.h"