2 * See the file LICENSE for redistribution information.
4 * Copyright (c) 1996-2009 Oracle. All rights reserved.
12 #include "dbinc/db_page.h"
13 #include "dbinc/btree.h"
14 #include "dbinc/lock.h"
16 #include "dbinc/partition.h"
18 #ifdef HAVE_STATISTICS
21 * Gather/print the btree statistics
23 * PUBLIC: int __bam_stat __P((DBC *, void *, u_int32_t));
26 __bam_stat(dbc, spp, flags)
36 DB_LOCK lock, metalock;
41 int ret, t_ret, write_meta;
55 cp = (BTREE_CURSOR *)dbc->internal;
57 /* Allocate and clear the structure. */
58 if ((ret = __os_umalloc(env, sizeof(*sp), &sp)) != 0)
60 memset(sp, 0, sizeof(*sp));
62 /* Get the metadata page for the entire database. */
64 if ((ret = __db_lget(dbc, 0, pgno, DB_LOCK_READ, 0, &metalock)) != 0)
66 if ((ret = __memp_fget(mpf, &pgno,
67 dbc->thread_info, dbc->txn, 0, &meta)) != 0)
70 if (flags == DB_FAST_STAT)
73 /* Walk the metadata free list, counting pages. */
74 for (sp->bt_free = 0, pgno = meta->dbmeta.free; pgno != PGNO_INVALID;) {
77 if ((ret = __memp_fget(mpf, &pgno,
78 dbc->thread_info, dbc->txn, 0, &h)) != 0)
82 if ((ret = __memp_fput(mpf,
83 dbc->thread_info, h, dbc->priority)) != 0)
88 /* Get the root page. */
90 if ((ret = __db_lget(dbc, 0, pgno, DB_LOCK_READ, 0, &lock)) != 0)
92 if ((ret = __memp_fget(mpf, &pgno,
93 dbc->thread_info, dbc->txn, 0, &h)) != 0)
96 /* Get the levels from the root page. */
97 sp->bt_levels = h->level;
99 /* Discard the root page. */
100 ret = __memp_fput(mpf, dbc->thread_info, h, dbc->priority);
102 if ((t_ret = __LPUT(dbc, lock)) != 0 && ret == 0)
108 if ((ret = __bam_traverse(dbc,
109 DB_LOCK_READ, cp->root, __bam_stat_callback, sp)) != 0)
112 #ifdef HAVE_COMPRESSION
113 if (DB_IS_COMPRESSED(dbp) && (ret = __bam_compress_count(dbc,
114 &sp->bt_nkeys, &sp->bt_ndata)) != 0)
119 * Get the subdatabase metadata page if it's not the same as the
120 * one we already have.
122 write_meta = !F_ISSET(dbp, DB_AM_RDONLY) &&
123 (!MULTIVERSION(dbp) || dbc->txn != NULL);
125 if (t->bt_meta != PGNO_BASE_MD || write_meta) {
126 ret = __memp_fput(mpf, dbc->thread_info, meta, dbc->priority);
128 if ((t_ret = __LPUT(dbc, metalock)) != 0 && ret == 0)
133 if ((ret = __db_lget(dbc,
134 0, t->bt_meta, write_meta ? DB_LOCK_WRITE : DB_LOCK_READ,
137 if ((ret = __memp_fget(mpf, &t->bt_meta,
138 dbc->thread_info, dbc->txn,
139 write_meta ? DB_MPOOL_DIRTY : 0, &meta)) != 0)
142 if (flags == DB_FAST_STAT) {
143 if (dbp->type == DB_RECNO ||
144 (dbp->type == DB_BTREE && F_ISSET(dbp, DB_AM_RECNUM))) {
145 if ((ret = __db_lget(dbc, 0,
146 cp->root, DB_LOCK_READ, 0, &lock)) != 0)
148 if ((ret = __memp_fget(mpf, &cp->root,
149 dbc->thread_info, dbc->txn, 0, &h)) != 0)
152 sp->bt_nkeys = RE_NREC(h);
154 sp->bt_nkeys = meta->dbmeta.key_count;
156 sp->bt_ndata = dbp->type == DB_RECNO ?
157 sp->bt_nkeys : meta->dbmeta.record_count;
160 /* Get metadata page statistics. */
161 sp->bt_metaflags = meta->dbmeta.flags;
162 sp->bt_minkey = meta->minkey;
163 sp->bt_re_len = meta->re_len;
164 sp->bt_re_pad = meta->re_pad;
166 * Don't take the page number from the meta-data page -- that value is
167 * only maintained in the primary database, we may have been called on
168 * a subdatabase. (Yes, I read the primary database meta-data page
169 * earlier in this function, but I'm asking the underlying cache so the
170 * code for the Hash and Btree methods is the same.)
172 if ((ret = __memp_get_last_pgno(dbp->mpf, &pgno)) != 0)
174 sp->bt_pagecnt = pgno + 1;
175 sp->bt_pagesize = meta->dbmeta.pagesize;
176 sp->bt_magic = meta->dbmeta.magic;
177 sp->bt_version = meta->dbmeta.version;
179 if (write_meta != 0) {
180 meta->dbmeta.key_count = sp->bt_nkeys;
181 meta->dbmeta.record_count = sp->bt_ndata;
184 *(DB_BTREE_STAT **)spp = sp;
186 err: /* Discard the second page. */
187 if ((t_ret = __LPUT(dbc, lock)) != 0 && ret == 0)
189 if (h != NULL && (t_ret = __memp_fput(mpf,
190 dbc->thread_info, h, dbc->priority)) != 0 && ret == 0)
193 /* Discard the metadata page. */
194 if ((t_ret = __LPUT(dbc, metalock)) != 0 && ret == 0)
196 if (meta != NULL && (t_ret = __memp_fput(mpf,
197 dbc->thread_info, meta, dbc->priority)) != 0 && ret == 0)
200 if (ret != 0 && sp != NULL) {
202 *(DB_BTREE_STAT **)spp = NULL;
209 * __bam_stat_print --
210 * Display btree/recno statistics.
212 * PUBLIC: int __bam_stat_print __P((DBC *, u_int32_t));
215 __bam_stat_print(dbc, flags)
219 static const FN fn[] = {
220 { BTM_DUP, "duplicates" },
221 { BTM_RECNO, "recno" },
222 { BTM_RECNUM, "record-numbers" },
223 { BTM_FIXEDLEN, "fixed-length" },
224 { BTM_RENUMBER, "renumber" },
225 { BTM_SUBDB, "multiple-databases" },
226 { BTM_DUPSORT, "sorted duplicates" },
227 { BTM_COMPRESS, "compressed" },
238 #ifdef HAVE_PARTITION
239 if (DB_IS_PARTITIONED(dbp)) {
240 if ((ret = __partition_stat(dbc, &sp, flags)) != 0)
244 if ((ret = __bam_stat(dbc, &sp, LF_ISSET(DB_FAST_STAT))) != 0)
247 if (LF_ISSET(DB_STAT_ALL)) {
248 __db_msg(env, "%s", DB_GLOBAL(db_line));
249 __db_msg(env, "Default Btree/Recno database information:");
252 __db_msg(env, "%lx\tBtree magic number", (u_long)sp->bt_magic);
253 __db_msg(env, "%lu\tBtree version number", (u_long)sp->bt_version);
255 (void)__db_get_lorder(dbp, &lorder);
264 s = "Unrecognized byte order";
267 __db_msg(env, "%s\tByte order", s);
268 __db_prflags(env, NULL, sp->bt_metaflags, fn, NULL, "\tFlags");
269 if (dbp->type == DB_BTREE)
270 __db_dl(env, "Minimum keys per-page", (u_long)sp->bt_minkey);
271 if (dbp->type == DB_RECNO) {
273 "Fixed-length record size", (u_long)sp->bt_re_len);
275 "%#x\tFixed-length record pad", (u_int)sp->bt_re_pad);
278 "Underlying database page size", (u_long)sp->bt_pagesize);
279 if (dbp->type == DB_BTREE)
280 __db_dl(env, "Overflow key/data size",
281 ((BTREE_CURSOR *)dbc->internal)->ovflsize);
282 __db_dl(env, "Number of levels in the tree", (u_long)sp->bt_levels);
283 __db_dl(env, dbp->type == DB_BTREE ?
284 "Number of unique keys in the tree" :
285 "Number of records in the tree", (u_long)sp->bt_nkeys);
287 "Number of data items in the tree", (u_long)sp->bt_ndata);
290 "Number of tree internal pages", (u_long)sp->bt_int_pg);
292 "Number of bytes free in tree internal pages",
293 (u_long)sp->bt_int_pgfree,
294 DB_PCT_PG(sp->bt_int_pgfree, sp->bt_int_pg, sp->bt_pagesize), "ff");
297 "Number of tree leaf pages", (u_long)sp->bt_leaf_pg);
298 __db_dl_pct(env, "Number of bytes free in tree leaf pages",
299 (u_long)sp->bt_leaf_pgfree, DB_PCT_PG(
300 sp->bt_leaf_pgfree, sp->bt_leaf_pg, sp->bt_pagesize), "ff");
303 "Number of tree duplicate pages", (u_long)sp->bt_dup_pg);
305 "Number of bytes free in tree duplicate pages",
306 (u_long)sp->bt_dup_pgfree,
307 DB_PCT_PG(sp->bt_dup_pgfree, sp->bt_dup_pg, sp->bt_pagesize), "ff");
310 "Number of tree overflow pages", (u_long)sp->bt_over_pg);
311 __db_dl_pct(env, "Number of bytes free in tree overflow pages",
312 (u_long)sp->bt_over_pgfree, DB_PCT_PG(
313 sp->bt_over_pgfree, sp->bt_over_pg, sp->bt_pagesize), "ff");
314 __db_dl(env, "Number of empty pages", (u_long)sp->bt_empty_pg);
316 __db_dl(env, "Number of pages on the free list", (u_long)sp->bt_free);
324 * __bam_stat_callback --
325 * Statistics callback.
327 * PUBLIC: int __bam_stat_callback __P((DBC *, PAGE *, void *, int *));
330 __bam_stat_callback(dbc, h, cookie, putp)
338 db_indx_t indx, *inp, top;
351 sp->bt_int_pgfree += P_FREESPACE(dbp, h);
357 /* Correct for on-page duplicates and deleted items. */
358 for (indx = 0; indx < top; indx += P_INDX) {
359 type = GET_BKEYDATA(dbp, h, indx + O_INDX)->type;
360 /* Ignore deleted items. */
364 /* Ignore duplicate keys. */
365 if (indx + P_INDX >= top ||
366 inp[indx] != inp[indx + P_INDX])
369 /* Ignore off-page duplicates. */
370 if (B_TYPE(type) != B_DUPLICATE)
375 sp->bt_leaf_pgfree += P_FREESPACE(dbp, h);
382 * If walking a recno tree, then each of these items is a key.
383 * Otherwise, we're walking an off-page duplicate set.
385 if (dbp->type == DB_RECNO) {
387 * Correct for deleted items in non-renumbering Recno
390 if (F_ISSET(dbp, DB_AM_RENUMBER)) {
394 for (indx = 0; indx < top; indx += O_INDX) {
395 type = GET_BKEYDATA(dbp, h, indx)->type;
396 if (!B_DISSET(type)) {
403 sp->bt_leaf_pgfree += P_FREESPACE(dbp, h);
408 sp->bt_dup_pgfree += P_FREESPACE(dbp, h);
415 /* Correct for deleted items. */
416 for (indx = 0; indx < top; indx += O_INDX)
417 if (!B_DISSET(GET_BKEYDATA(dbp, h, indx)->type))
421 sp->bt_dup_pgfree += P_FREESPACE(dbp, h);
425 sp->bt_over_pgfree += P_OVFLSPACE(dbp, dbp->pgsize, h);
428 return (__db_pgfmt(dbp->env, h->pgno));
434 * __bam_print_cursor --
435 * Display the current internal cursor.
437 * PUBLIC: void __bam_print_cursor __P((DBC *));
440 __bam_print_cursor(dbc)
443 static const FN fn[] = {
444 { C_DELETED, "C_DELETED" },
445 { C_RECNUM, "C_RECNUM" },
446 { C_RENUMBER, "C_RENUMBER" },
453 cp = (BTREE_CURSOR *)dbc->internal;
455 STAT_ULONG("Overflow size", cp->ovflsize);
456 if (dbc->dbtype == DB_RECNO)
457 STAT_ULONG("Recno", cp->recno);
458 STAT_ULONG("Order", cp->order);
459 __db_prflags(env, NULL, cp->flags, fn, NULL, "\tInternal Flags");
462 #else /* !HAVE_STATISTICS */
465 __bam_stat(dbc, spp, flags)
470 COMPQUIET(spp, NULL);
473 return (__db_stat_not_built(dbc->env));
477 __bam_stat_print(dbc, flags)
483 return (__db_stat_not_built(dbc->env));
490 * Return proportion of keys relative to given key. The numbers are
491 * slightly skewed due to on page duplicates.
493 * PUBLIC: int __bam_key_range __P((DBC *, DBT *, DB_KEY_RANGE *, u_int32_t));
496 __bam_key_range(dbc, dbt, kp, flags)
509 if ((ret = __bam_search(dbc, PGNO_INVALID,
510 dbt, SR_STK_ONLY, 1, NULL, &exact)) != 0)
513 cp = (BTREE_CURSOR *)dbc->internal;
514 kp->less = kp->greater = 0.0;
518 /* Correct the leaf page. */
519 cp->csp->entries /= 2;
521 for (sp = cp->sp; sp <= cp->csp; ++sp) {
523 * At each level we know that pages greater than indx contain
524 * keys greater than what we are looking for and those less
525 * than indx are less than. The one pointed to by indx may
526 * have some less, some greater or even equal. If indx is
527 * equal to the number of entries, then the key is out of range
528 * and everything is less.
531 kp->greater += factor * (sp->entries - 1)/sp->entries;
532 else if (sp->indx == sp->entries)
535 kp->less += factor * sp->indx / sp->entries;
536 kp->greater += factor *
537 ((sp->entries - sp->indx) - 1) / sp->entries;
539 factor *= 1.0/sp->entries;
543 * If there was an exact match then assign 1 n'th to the key itself.
544 * Otherwise that factor belongs to those greater than the key, unless
545 * the key was out of range.
551 kp->greater += factor;
563 * Walk a Btree database.
565 * PUBLIC: int __bam_traverse __P((DBC *, db_lockmode_t,
566 * PUBLIC: db_pgno_t, int (*)(DBC *, PAGE *, void *, int *), void *));
569 __bam_traverse(dbc, mode, root_pgno, callback, cookie)
573 int (*callback)__P((DBC *, PAGE *, void *, int *));
583 db_indx_t indx, *inp;
584 int already_put, ret, t_ret;
590 if ((ret = __db_lget(dbc, 0, root_pgno, mode, 0, &lock)) != 0)
592 if ((ret = __memp_fget(mpf, &root_pgno,
593 dbc->thread_info, dbc->txn, 0, &h)) != 0) {
594 (void)__TLPUT(dbc, lock);
600 for (indx = 0; indx < NUM_ENT(h); indx += O_INDX) {
601 bi = GET_BINTERNAL(dbp, h, indx);
602 if (B_TYPE(bi->type) == B_OVERFLOW &&
603 (ret = __db_traverse_big(dbc,
604 ((BOVERFLOW *)bi->data)->pgno,
605 callback, cookie)) != 0)
607 if ((ret = __bam_traverse(
608 dbc, mode, bi->pgno, callback, cookie)) != 0)
613 for (indx = 0; indx < NUM_ENT(h); indx += O_INDX) {
614 ri = GET_RINTERNAL(dbp, h, indx);
615 if ((ret = __bam_traverse(
616 dbc, mode, ri->pgno, callback, cookie)) != 0)
622 for (indx = 0; indx < NUM_ENT(h); indx += P_INDX) {
623 bk = GET_BKEYDATA(dbp, h, indx);
624 if (B_TYPE(bk->type) == B_OVERFLOW &&
625 (indx + P_INDX >= NUM_ENT(h) ||
626 inp[indx] != inp[indx + P_INDX])) {
627 if ((ret = __db_traverse_big(dbc,
628 GET_BOVERFLOW(dbp, h, indx)->pgno,
629 callback, cookie)) != 0)
632 bk = GET_BKEYDATA(dbp, h, indx + O_INDX);
633 if (B_TYPE(bk->type) == B_DUPLICATE &&
634 (ret = __bam_traverse(dbc, mode,
635 GET_BOVERFLOW(dbp, h, indx + O_INDX)->pgno,
636 callback, cookie)) != 0)
638 if (B_TYPE(bk->type) == B_OVERFLOW &&
639 (ret = __db_traverse_big(dbc,
640 GET_BOVERFLOW(dbp, h, indx + O_INDX)->pgno,
641 callback, cookie)) != 0)
647 for (indx = 0; indx < NUM_ENT(h); indx += O_INDX) {
648 bk = GET_BKEYDATA(dbp, h, indx);
649 if (B_TYPE(bk->type) == B_OVERFLOW &&
650 (ret = __db_traverse_big(dbc,
651 GET_BOVERFLOW(dbp, h, indx)->pgno,
652 callback, cookie)) != 0)
657 return (__db_pgfmt(dbp->env, h->pgno));
660 ret = callback(dbc, h, cookie, &already_put);
662 err: if (!already_put && (t_ret = __memp_fput(mpf,
663 dbc->thread_info, h, dbc->priority)) != 0 && ret == 0)
665 if ((t_ret = __TLPUT(dbc, lock)) != 0 && ret == 0)