2 * See the file LICENSE for redistribution information.
4 * Copyright (c) 1999, 2000
5 * Sleepycat Software. All rights reserved.
7 * $Id: hash_verify.c,v 1.31 2000/11/30 00:58:37 ubell Exp $
10 #include "db_config.h"
13 static const char revid[] = "$Id: hash_verify.c,v 1.31 2000/11/30 00:58:37 ubell Exp $";
16 #ifndef NO_SYSTEM_INCLUDES
17 #include <sys/types.h>
24 #include "db_verify.h"
28 static int __ham_dups_unsorted __P((DB *, u_int8_t *, u_int32_t));
29 static int __ham_vrfy_bucket __P((DB *, VRFY_DBINFO *, HMETA *, u_int32_t,
31 static int __ham_vrfy_item __P((DB *,
32 VRFY_DBINFO *, db_pgno_t, PAGE *, u_int32_t, u_int32_t));
36 * Verify the hash-specific part of a metadata page.
38 * Note that unlike btree, we don't save things off, because we
39 * will need most everything again to verify each page and the
40 * amount of state here is significant.
42 * PUBLIC: int __ham_vrfy_meta __P((DB *, VRFY_DBINFO *, HMETA *,
43 * PUBLIC: db_pgno_t, u_int32_t));
46 __ham_vrfy_meta(dbp, vdp, m, pgno, flags)
55 int i, ret, t_ret, isbad;
56 u_int32_t pwr, mbucket;
57 u_int32_t (*hfunc) __P((DB *, const void *, u_int32_t));
59 if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
63 hashp = dbp->h_internal;
65 if (hashp != NULL && hashp->h_hash != NULL)
66 hfunc = hashp->h_hash;
71 * If we haven't already checked the common fields in pagezero,
74 if (!F_ISSET(pip, VRFY_INCOMPLETE) &&
75 (ret = __db_vrfy_meta(dbp, vdp, &m->dbmeta, pgno, flags)) != 0) {
76 if (ret == DB_VERIFY_BAD)
83 if (!LF_ISSET(DB_NOORDERCHK))
84 if (m->h_charkey != hfunc(dbp, CHARKEY, sizeof(CHARKEY))) {
86 "Database has different custom hash function; reverify with DB_NOORDERCHK set"
89 * Return immediately; this is probably a sign
90 * of user error rather than database corruption, so
91 * we want to avoid extraneous errors.
97 /* max_bucket must be less than the last pgno. */
98 if (m->max_bucket > vdp->last_pgno) {
100 "Impossible max_bucket %lu on meta page %lu",
101 m->max_bucket, pgno));
103 * Most other fields depend somehow on max_bucket, so
104 * we just return--there will be lots of extraneous
112 * max_bucket, high_mask and low_mask: high_mask must be one
113 * less than the next power of two above max_bucket, and
114 * low_mask must be one less than the power of two below it.
118 pwr = (m->max_bucket == 0) ? 1 : 1 << __db_log2(m->max_bucket + 1);
119 if (m->high_mask != pwr - 1) {
121 "Incorrect high_mask %lu on page %lu, should be %lu",
122 m->high_mask, pgno, pwr - 1));
126 if (m->low_mask != pwr - 1) {
128 "Incorrect low_mask %lu on page %lu, should be %lu",
129 m->low_mask, pgno, pwr - 1));
133 /* ffactor: no check possible. */
134 pip->h_ffactor = m->ffactor;
137 * nelem: just make sure it's not astronomical for now. This is the
138 * same check that hash_upgrade does, since there was a bug in 2.X
139 * which could make nelem go "negative".
141 if (m->nelem > 0x80000000) {
143 "Suspiciously high nelem of %lu on page %lu",
148 pip->h_nelem = m->nelem;
151 if (F_ISSET(&m->dbmeta, DB_HASH_DUP))
152 F_SET(pip, VRFY_HAS_DUPS);
153 if (F_ISSET(&m->dbmeta, DB_HASH_DUPSORT))
154 F_SET(pip, VRFY_HAS_DUPSORT);
155 /* XXX: Why is the DB_HASH_SUBDB flag necessary? */
158 for (i = 0; m->spares[i] != 0 && i < NCACHED; i++) {
160 * We set mbucket to the maximum bucket that would use a given
161 * spares entry; we want to ensure that it's always less
164 mbucket = (1 << i) - 1;
165 if (BS_TO_PAGE(mbucket, m->spares) > vdp->last_pgno) {
167 "Spares array entry %lu, page %lu is invalid",
173 err: if ((t_ret = __db_vrfy_putpageinfo(vdp, pip)) != 0 && ret == 0)
175 return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
182 * PUBLIC: int __ham_vrfy __P((DB *, VRFY_DBINFO *, PAGE *, db_pgno_t,
183 * PUBLIC: u_int32_t));
186 __ham_vrfy(dbp, vdp, h, pgno, flags)
194 u_int32_t ent, himark, inpend;
195 int isbad, ret, t_ret;
198 if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
201 /* Sanity check our flags and page type. */
202 if ((ret = __db_fchk(dbp->dbenv, "__ham_vrfy",
203 flags, DB_AGGRESSIVE | DB_NOORDERCHK | DB_SALVAGE)) != 0)
206 if (TYPE(h) != P_HASH) {
207 TYPE_ERR_PRINT(dbp->dbenv, "__ham_vrfy", pgno, TYPE(h));
213 /* Verify and save off fields common to all PAGEs. */
214 if ((ret = __db_vrfy_datapage(dbp, vdp, h, pgno, flags)) != 0) {
215 if (ret == DB_VERIFY_BAD)
222 * Verify inp[]. Each offset from 0 to NUM_ENT(h) must be lower
223 * than the previous one, higher than the current end of the inp array,
224 * and lower than the page size.
226 * In any case, we return immediately if things are bad, as it would
227 * be unsafe to proceed.
229 for (ent = 0, himark = dbp->pgsize,
230 inpend = (u_int8_t *)h->inp - (u_int8_t *)h;
231 ent < NUM_ENT(h); ent++)
232 if (h->inp[ent] >= himark) {
234 "Item %lu on page %lu out of order or nonsensical",
238 } else if (inpend >= himark) {
240 "inp array collided with data on page %lu",
246 himark = h->inp[ent];
247 inpend += sizeof(db_indx_t);
248 if ((ret = __ham_vrfy_item(
249 dbp, vdp, pgno, h, ent, flags)) != 0)
253 err: if ((t_ret = __db_vrfy_putpageinfo(vdp, pip)) != 0 && ret == 0)
255 return (ret == 0 && isbad == 1 ? DB_VERIFY_BAD : ret);
260 * Given a hash page and an offset, sanity-check the item itself,
261 * and save off any overflow items or off-page dup children as necessary.
264 __ham_vrfy_item(dbp, vdp, pgno, h, i, flags)
273 VRFY_CHILDINFO child;
275 db_indx_t offset, len, dlen, elen;
279 if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
282 switch (HPAGE_TYPE(h, i)) {
284 /* Nothing to do here--everything but the type field is data */
287 /* Are we a datum or a key? Better be the former. */
290 "Hash key stored as duplicate at page %lu item %lu",
294 * Dups are encoded as a series within a single HKEYDATA,
295 * in which each dup is surrounded by a copy of its length
296 * on either side (so that the series can be walked in either
297 * direction. We loop through this series and make sure
298 * each dup is reasonable.
300 * Note that at this point, we've verified item i-1, so
301 * it's safe to use LEN_HKEYDATA (which looks at inp[i-1]).
303 len = LEN_HKEYDATA(h, dbp->pgsize, i);
304 databuf = HKEYDATA_DATA(P_ENTRY(h, i));
305 for (offset = 0; offset < len; offset += DUP_SIZE(dlen)) {
306 memcpy(&dlen, databuf + offset, sizeof(db_indx_t));
308 /* Make sure the length is plausible. */
309 if (offset + DUP_SIZE(dlen) > len) {
311 "Duplicate item %lu, page %lu has bad length",
318 * Make sure the second copy of the length is the
322 databuf + offset + dlen + sizeof(db_indx_t),
326 "Duplicate item %lu, page %lu has two different lengths",
332 F_SET(pip, VRFY_HAS_DUPS);
333 if (!LF_ISSET(DB_NOORDERCHK) &&
334 __ham_dups_unsorted(dbp, databuf, len))
335 F_SET(pip, VRFY_DUPS_UNSORTED);
338 /* Offpage item. Make sure pgno is sane, save off. */
339 memcpy(&hop, P_ENTRY(h, i), HOFFPAGE_SIZE);
340 if (!IS_VALID_PGNO(hop.pgno) || hop.pgno == pip->pgno ||
341 hop.pgno == PGNO_INVALID) {
343 "Offpage item %lu, page %lu has bad page number",
348 memset(&child, 0, sizeof(VRFY_CHILDINFO));
349 child.pgno = hop.pgno;
350 child.type = V_OVERFLOW;
351 child.tlen = hop.tlen; /* This will get checked later. */
352 if ((ret = __db_vrfy_childput(vdp, pip->pgno, &child)) != 0)
356 /* Offpage duplicate item. Same drill. */
357 memcpy(&hod, P_ENTRY(h, i), HOFFDUP_SIZE);
358 if (!IS_VALID_PGNO(hod.pgno) || hod.pgno == pip->pgno ||
359 hod.pgno == PGNO_INVALID) {
361 "Offpage item %lu, page %lu has bad page number",
366 memset(&child, 0, sizeof(VRFY_CHILDINFO));
367 child.pgno = hod.pgno;
368 child.type = V_DUPLICATE;
369 if ((ret = __db_vrfy_childput(vdp, pip->pgno, &child)) != 0)
371 F_SET(pip, VRFY_HAS_DUPS);
375 "Item %i, page %lu has bad type", i, pip->pgno));
380 err: if ((t_ret = __db_vrfy_putpageinfo(vdp, pip)) != 0 && ret == 0)
386 * __ham_vrfy_structure --
387 * Verify the structure of a hash database.
389 * PUBLIC: int __ham_vrfy_structure __P((DB *, VRFY_DBINFO *, db_pgno_t,
390 * PUBLIC: u_int32_t));
393 __ham_vrfy_structure(dbp, vdp, meta_pgno, flags)
403 int isbad, p, ret, t_ret;
411 if ((ret = __db_vrfy_pgset_get(pgset, meta_pgno, &p)) != 0)
415 "Hash meta page %lu referenced twice", meta_pgno));
416 return (DB_VERIFY_BAD);
418 if ((ret = __db_vrfy_pgset_inc(pgset, meta_pgno)) != 0)
421 /* Get the meta page; we'll need it frequently. */
422 if ((ret = memp_fget(dbp->mpf, &meta_pgno, 0, &m)) != 0)
425 /* Loop through bucket by bucket. */
426 for (bucket = 0; bucket <= m->max_bucket; bucket++)
428 __ham_vrfy_bucket(dbp, vdp, m, bucket, flags)) != 0) {
429 if (ret == DB_VERIFY_BAD)
436 * There may be unused hash pages corresponding to buckets
437 * that have been allocated but not yet used. These may be
438 * part of the current doubling above max_bucket, or they may
439 * correspond to buckets that were used in a transaction
442 * Loop through them, as far as the spares array defines them,
443 * and make sure they're all empty.
445 * Note that this should be safe, since we've already verified
446 * that the spares array is sane.
448 for (bucket = m->max_bucket + 1;
449 m->spares[__db_log2(bucket + 1)] != 0; bucket++) {
450 pgno = BS_TO_PAGE(bucket, m->spares);
451 if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
454 /* It's okay if these pages are totally zeroed; unmark it. */
455 F_CLR(pip, VRFY_IS_ALLZEROES);
457 if (pip->type != P_HASH) {
459 "Hash bucket %lu maps to non-hash page %lu",
462 } else if (pip->entries != 0) {
464 "Non-empty page %lu in unused hash bucket %lu",
468 if ((ret = __db_vrfy_pgset_get(pgset, pgno, &p)) != 0)
472 "Hash page %lu above max_bucket referenced",
477 __db_vrfy_pgset_inc(pgset, pgno)) != 0)
480 __db_vrfy_putpageinfo(vdp, pip)) != 0)
486 /* If we got here, it's an error. */
487 (void)__db_vrfy_putpageinfo(vdp, pip);
491 err: if ((t_ret = memp_fput(dbp->mpf, m, 0)) != 0)
493 if (h != NULL && (t_ret = memp_fput(dbp->mpf, h, 0)) != 0)
495 return ((isbad == 1 && ret == 0) ? DB_VERIFY_BAD: ret);
499 * __ham_vrfy_bucket --
500 * Verify a given bucket.
503 __ham_vrfy_bucket(dbp, vdp, m, bucket, flags)
507 u_int32_t bucket, flags;
510 VRFY_CHILDINFO *child;
511 VRFY_PAGEINFO *mip, *pip;
512 int ret, t_ret, isbad, p;
513 db_pgno_t pgno, next_pgno;
515 u_int32_t (*hfunc) __P((DB *, const void *, u_int32_t));
521 hashp = dbp->h_internal;
522 if (hashp != NULL && hashp->h_hash != NULL)
523 hfunc = hashp->h_hash;
527 if ((ret = __db_vrfy_getpageinfo(vdp, PGNO(m), &mip)) != 0)
530 /* Calculate the first pgno for this bucket. */
531 pgno = BS_TO_PAGE(bucket, m->spares);
533 if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0)
536 /* Make sure we got a plausible page number. */
537 if (pgno > vdp->last_pgno || pip->type != P_HASH) {
538 EPRINT((dbp->dbenv, "Bucket %lu has impossible first page %lu",
540 /* Unsafe to continue. */
545 if (pip->prev_pgno != PGNO_INVALID) {
547 "First hash page %lu in bucket %lu has a prev_pgno", pgno));
552 * Set flags for dups and sorted dups.
554 flags |= F_ISSET(mip, VRFY_HAS_DUPS) ? ST_DUPOK : 0;
555 flags |= F_ISSET(mip, VRFY_HAS_DUPSORT) ? ST_DUPSORT : 0;
557 /* Loop until we find a fatal bug, or until we run out of pages. */
559 /* Provide feedback on our progress to the application. */
560 if (!LF_ISSET(DB_SALVAGE))
561 __db_vrfy_struct_feedback(dbp, vdp);
563 if ((ret = __db_vrfy_pgset_get(vdp->pgset, pgno, &p)) != 0)
567 "Hash page %lu referenced twice", pgno));
569 /* Unsafe to continue. */
571 } else if ((ret = __db_vrfy_pgset_inc(vdp->pgset, pgno)) != 0)
575 * Hash pages that nothing has ever hashed to may never
576 * have actually come into existence, and may appear to be
577 * entirely zeroed. This is acceptable, and since there's
578 * no real way for us to know whether this has actually
579 * occurred, we clear the "wholly zeroed" flag on every
580 * hash page. A wholly zeroed page, by nature, will appear
581 * to have no flags set and zero entries, so should
582 * otherwise verify correctly.
584 F_CLR(pip, VRFY_IS_ALLZEROES);
586 /* If we have dups, our meta page had better know about it. */
587 if (F_ISSET(pip, VRFY_HAS_DUPS)
588 && !F_ISSET(mip, VRFY_HAS_DUPS)) {
590 "Duplicates present in non-duplicate database, page %lu",
596 * If the database has sorted dups, this page had better
597 * not have unsorted ones.
599 if (F_ISSET(mip, VRFY_HAS_DUPSORT) &&
600 F_ISSET(pip, VRFY_DUPS_UNSORTED)) {
602 "Unsorted dups in sorted-dup database, page %lu",
607 /* Walk overflow chains and offpage dup trees. */
608 if ((ret = __db_vrfy_childcursor(vdp, &cc)) != 0)
610 for (ret = __db_vrfy_ccset(cc, pip->pgno, &child); ret == 0;
611 ret = __db_vrfy_ccnext(cc, &child))
612 if (child->type == V_OVERFLOW) {
613 if ((ret = __db_vrfy_ovfl_structure(dbp, vdp,
614 child->pgno, child->tlen, flags)) != 0) {
615 if (ret == DB_VERIFY_BAD)
620 } else if (child->type == V_DUPLICATE) {
621 if ((ret = __db_vrfy_duptype(dbp,
622 vdp, child->pgno, flags)) != 0) {
626 if ((ret = __bam_vrfy_subtree(dbp, vdp,
627 child->pgno, NULL, NULL,
628 flags | ST_RECNUM | ST_DUPSET, NULL,
630 if (ret == DB_VERIFY_BAD)
636 if ((ret = __db_vrfy_ccclose(cc)) != 0)
640 /* If it's safe to check that things hash properly, do so. */
641 if (isbad == 0 && !LF_ISSET(DB_NOORDERCHK) &&
642 (ret = __ham_vrfy_hashing(dbp, pip->entries,
643 m, bucket, pgno, flags, hfunc)) != 0) {
644 if (ret == DB_VERIFY_BAD)
650 next_pgno = pip->next_pgno;
651 ret = __db_vrfy_putpageinfo(vdp, pip);
657 if (next_pgno == PGNO_INVALID)
658 break; /* End of the bucket. */
660 /* We already checked this, but just in case... */
661 if (!IS_VALID_PGNO(next_pgno)) {
664 "Hash page %lu has bad next_pgno", pgno));
669 if ((ret = __db_vrfy_getpageinfo(vdp, next_pgno, &pip)) != 0)
672 if (pip->prev_pgno != pgno) {
673 EPRINT((dbp->dbenv, "Hash page %lu has bad prev_pgno",
680 err: if (cc != NULL && ((t_ret = __db_vrfy_ccclose(cc)) != 0) && ret == 0)
682 if (mip != NULL && ((t_ret = __db_vrfy_putpageinfo(vdp, mip)) != 0) &&
685 if (pip != NULL && ((t_ret = __db_vrfy_putpageinfo(vdp, pip)) != 0) &&
688 return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
692 * __ham_vrfy_hashing --
693 * Verify that all items on a given hash page hash correctly.
695 * PUBLIC: int __ham_vrfy_hashing __P((DB *,
696 * PUBLIC: u_int32_t, HMETA *, u_int32_t, db_pgno_t, u_int32_t,
697 * PUBLIC: u_int32_t (*) __P((DB *, const void *, u_int32_t))));
700 __ham_vrfy_hashing(dbp, nentries, m, thisbucket, pgno, flags, hfunc)
704 u_int32_t thisbucket;
707 u_int32_t (*hfunc) __P((DB *, const void *, u_int32_t));
712 int ret, t_ret, isbad;
713 u_int32_t hval, bucket;
716 memset(&dbt, 0, sizeof(DBT));
717 F_SET(&dbt, DB_DBT_REALLOC);
719 if ((ret = memp_fget(dbp->mpf, &pgno, 0, &h)) != 0)
722 for (i = 0; i < nentries; i += 2) {
724 * We've already verified the page integrity and that of any
725 * overflow chains linked off it; it is therefore safe to use
726 * __db_ret. It's also not all that much slower, since we have
727 * to copy every hash item to deal with alignment anyway; we
728 * can tweak this a bit if this proves to be a bottleneck,
729 * but for now, take the easy route.
731 if ((ret = __db_ret(dbp, h, i, &dbt, NULL, NULL)) != 0)
733 hval = hfunc(dbp, dbt.data, dbt.size);
735 bucket = hval & m->high_mask;
736 if (bucket > m->max_bucket)
737 bucket = bucket & m->low_mask;
739 if (bucket != thisbucket) {
741 "Item %lu on page %lu hashes incorrectly",
747 err: if (dbt.data != NULL)
748 __os_free(dbt.data, 0);
749 if ((t_ret = memp_fput(dbp->mpf, h, 0)) != 0)
752 return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);
757 * Safely dump out anything that looks like a key on an alleged
760 * PUBLIC: int __ham_salvage __P((DB *, VRFY_DBINFO *, db_pgno_t, PAGE *,
761 * PUBLIC: void *, int (*)(void *, const void *), u_int32_t));
764 __ham_salvage(dbp, vdp, pgno, h, handle, callback, flags)
770 int (*callback) __P((void *, const void *));
775 int ret, err_ret, t_ret;
776 u_int32_t himark, tlen;
779 u_int32_t dlen, len, i;
781 memset(&dbt, 0, sizeof(DBT));
782 dbt.flags = DB_DBT_REALLOC;
784 memset(&unkdbt, 0, sizeof(DBT));
785 unkdbt.size = strlen("UNKNOWN") + 1;
786 unkdbt.data = "UNKNOWN";
791 * Allocate a buffer for overflow items. Start at one page;
792 * __db_safe_goff will realloc as needed.
794 if ((ret = __os_malloc(dbp->dbenv, dbp->pgsize, NULL, &buf)) != 0)
797 himark = dbp->pgsize;
799 /* If we're not aggressive, break when we hit NUM_ENT(h). */
800 if (!LF_ISSET(DB_AGGRESSIVE) && i >= NUM_ENT(h))
803 /* Verify the current item. */
804 ret = __db_vrfy_inpitem(dbp,
805 h, pgno, i, 0, flags, &himark, NULL);
806 /* If this returned a fatality, it's time to break. */
807 if (ret == DB_VERIFY_FATAL)
812 len = LEN_HKEYDATA(h, dbp->pgsize, i);
813 if ((u_int32_t)(hk + len - (u_int8_t *)h) >
816 * Item is unsafely large; either continue
817 * or set it to the whole page, depending on
820 if (!LF_ISSET(DB_AGGRESSIVE))
823 (u_int32_t)(hk - (u_int8_t *)h);
824 err_ret = DB_VERIFY_BAD;
826 switch (HPAGE_PTYPE(hk)) {
828 if (!LF_ISSET(DB_AGGRESSIVE))
830 err_ret = DB_VERIFY_BAD;
833 keydata: memcpy(buf, HKEYDATA_DATA(hk), len);
836 if ((ret = __db_prdbt(&dbt,
837 0, " ", handle, callback, 0, NULL)) != 0)
841 if (len < HOFFPAGE_SIZE) {
842 err_ret = DB_VERIFY_BAD;
846 HOFFPAGE_PGNO(hk), sizeof(dpgno));
847 if ((ret = __db_safe_goff(dbp, vdp,
848 dpgno, &dbt, &buf, flags)) != 0) {
850 (void)__db_prdbt(&unkdbt, 0, " ",
851 handle, callback, 0, NULL);
854 if ((ret = __db_prdbt(&dbt,
855 0, " ", handle, callback, 0, NULL)) != 0)
859 if (len < HOFFPAGE_SIZE) {
860 err_ret = DB_VERIFY_BAD;
864 HOFFPAGE_PGNO(hk), sizeof(dpgno));
865 /* UNKNOWN iff pgno is bad or we're a key. */
866 if (!IS_VALID_PGNO(dpgno) || (i % 2 == 0)) {
867 if ((ret = __db_prdbt(&unkdbt, 0, " ",
868 handle, callback, 0, NULL)) != 0)
870 } else if ((ret = __db_salvage_duptree(dbp,
871 vdp, dpgno, &dbt, handle, callback,
872 flags | SA_SKIPFIRSTKEY)) != 0)
877 * We're a key; printing dups will seriously
878 * foul the output. If we're being aggressive,
879 * pretend this is a key and let the app.
880 * programmer sort out the mess.
884 if (LF_ISSET(DB_AGGRESSIVE))
889 /* Too small to have any data. */
891 HKEYDATA_SIZE(2 * sizeof(db_indx_t))) {
892 err_ret = DB_VERIFY_BAD;
896 /* Loop until we hit the total length. */
897 for (tlen = 0; tlen + sizeof(db_indx_t) < len;
899 tlen += sizeof(db_indx_t);
900 memcpy(&dlen, hk, sizeof(db_indx_t));
902 * If dlen is too long, print all the
903 * rest of the dup set in a chunk.
905 if (dlen + tlen > len)
907 memcpy(buf, hk + tlen, dlen);
910 if ((ret = __db_prdbt(&dbt, 0, " ",
911 handle, callback, 0, NULL)) != 0)
913 tlen += sizeof(db_indx_t);
921 if ((t_ret = __db_salvage_markdone(vdp, pgno)) != 0)
923 return ((ret == 0 && err_ret != 0) ? err_ret : ret);
927 * __ham_meta2pgset --
928 * Return the set of hash pages corresponding to the given
929 * known-good meta page.
931 * PUBLIC: int __ham_meta2pgset __P((DB *, VRFY_DBINFO *, HMETA *, u_int32_t,
934 int __ham_meta2pgset(dbp, vdp, hmeta, flags, pgset)
943 u_int32_t bucket, totpgs;
947 * We don't really need flags, but leave them for consistency with
952 DB_ASSERT(pgset != NULL);
957 * Loop through all the buckets, pushing onto pgset the corresponding
958 * page(s) for each one.
960 for (bucket = 0; bucket <= hmeta->max_bucket; bucket++) {
961 pgno = BS_TO_PAGE(bucket, hmeta->spares);
964 * We know the initial pgno is safe because the spares array has
967 * Safely walk the list of pages in this bucket.
970 if ((ret = memp_fget(dbp->mpf, &pgno, 0, &h)) != 0)
972 if (TYPE(h) == P_HASH) {
975 * Make sure we don't go past the end of
978 if (++totpgs > vdp->last_pgno) {
979 (void)memp_fput(dbp->mpf, h, 0);
980 return (DB_VERIFY_BAD);
983 __db_vrfy_pgset_inc(pgset, pgno)) != 0)
990 if ((ret = memp_fput(dbp->mpf, h, 0)) != 0)
993 /* If the new pgno is wonky, go onto the next bucket. */
994 if (!IS_VALID_PGNO(pgno) ||
995 pgno == PGNO_INVALID)
999 * If we've touched this page before, we have a cycle;
1000 * go on to the next bucket.
1002 if ((ret = __db_vrfy_pgset_get(pgset, pgno, &val)) != 0)
1013 * __ham_dups_unsorted --
1014 * Takes a known-safe hash duplicate set and its total length.
1015 * Returns 1 if there are out-of-order duplicates in this set,
1016 * 0 if there are not.
1019 __ham_dups_unsorted(dbp, buf, len)
1025 db_indx_t offset, dlen;
1026 int (*func) __P((DB *, const DBT *, const DBT *));
1028 memset(&a, 0, sizeof(DBT));
1029 memset(&b, 0, sizeof(DBT));
1031 func = (dbp->dup_compare == NULL) ? __bam_defcmp : dbp->dup_compare;
1034 * Loop through the dup set until we hit the end or we find
1035 * a pair of dups that's out of order. b is always the current
1036 * dup, a the one before it.
1038 for (offset = 0; offset < len; offset += DUP_SIZE(dlen)) {
1039 memcpy(&dlen, buf + offset, sizeof(db_indx_t));
1040 b.data = buf + offset + sizeof(db_indx_t);
1043 if (a.data != NULL && func(dbp, &a, &b) > 0)