/* 40 = 4.0, 33 = 3.3; this will break if the second number is > 9 */
#define DBVER (DB_VERSION_MAJOR * 10 + DB_VERSION_MINOR)
-#define PY_BSDDB_VERSION "3.3.1"
+#define PY_BSDDB_VERSION "3.4.2"
-static char *rcs_id = "$Id: _rpmdb.c,v 1.5 2002/08/14 21:35:35 jbj Exp $";
+static char *rcs_id = "$Id: _rpmdb.c,v 1.6 2002/11/06 16:46:54 jbj Exp $";
#ifdef WITH_THREAD
secondaryDB->associateCallback = callback;
secondaryDB->primaryDBType = _DB_get_type(self);
-
+ /* PyEval_InitThreads is called here due to a quirk in python 1.5
+ * - 2.2.1 (at least) according to Russell Williamson <merel@wt.net>:
+ * The global interepreter lock is not initialized until the first
+ * thread is created using thread.start_new_thread() or fork() is
+ * called. that would cause the ALLOW_THREADS here to segfault due
+ * to a null pointer reference if no threads or child processes
+ * have been created. This works around that and is a no-op if
+ * threads have already been initialized.
+ * (see pybsddb-users mailing list post on 2002-08-07)
+ */
+ PyEval_InitThreads();
MYDB_BEGIN_ALLOW_THREADS;
#if (DBVER >= 41)
err = self->db->associate(self->db, NULL,
MAKE_HASH_ENTRY(dup);
MAKE_HASH_ENTRY(dup_free);
break;
+#endif
case DB_BTREE:
case DB_RECNO:
MAKE_QUEUE_ENTRY(re_len);
MAKE_QUEUE_ENTRY(re_pad);
MAKE_QUEUE_ENTRY(pgfree);
-#if (DBVER >= 31) && (DBVER < 40)
+#if (DBVER == 31)
MAKE_QUEUE_ENTRY(start);
#endif
MAKE_QUEUE_ENTRY(first_recno);
if (!PyArg_ParseTuple(args, ":close"))
return NULL;
- CHECK_CURSOR_NOT_CLOSED(self);
-
if (self->dbc != NULL) {
MYDB_BEGIN_ALLOW_THREADS;
err = self->dbc->c_close(self->dbc);
static PyObject*
DBC_get(DBCursorObject* self, PyObject* args, PyObject *kwargs)
{
- int err, flags;
+ int err, flags=0;
PyObject* keyobj = NULL;
PyObject* dataobj = NULL;
PyObject* retval = NULL;
int err;
DB_LOCK_STAT* sp;
PyObject* d = NULL;
- u_int32_t flags;
+ u_int32_t flags = 0;
if (!PyArg_ParseTuple(args, "|i:lock_stat", &flags))
return NULL;
int err;
DB_TXN_STAT* sp;
PyObject* d = NULL;
- u_int32_t flags;
+ u_int32_t flags=0;
if (!PyArg_ParseTuple(args, "|i:txn_stat", &flags))
return NULL;
flags = _checkflag(flag)
d = _db.DB()
d.set_flags(hflags)
- if cachesize is not None: d.set_cachesize(cachesize)
+ if cachesize is not None: d.set_cachesize(0, cachesize)
if pgsize is not None: d.set_pagesize(pgsize)
if lorder is not None: d.set_lorder(lorder)
if ffactor is not None: d.set_h_ffactor(ffactor)
flags = _checkflag(flag)
d = _db.DB()
- if cachesize is not None: d.set_cachesize(cachesize)
+ if cachesize is not None: d.set_cachesize(0, cachesize)
if pgsize is not None: d.set_pagesize(pgsize)
if lorder is not None: d.set_lorder(lorder)
d.set_flags(btflags)
flags = _checkflag(flag)
d = _db.DB()
- if cachesize is not None: d.set_cachesize(cachesize)
+ if cachesize is not None: d.set_cachesize(0, cachesize)
if pgsize is not None: d.set_pagesize(pgsize)
if lorder is not None: d.set_lorder(lorder)
d.set_flags(rnflags)
if type(flags) == type(''):
sflag = flags
if sflag == 'r':
- flags = db.DB_READONLY
+ flags = db.DB_RDONLY
elif sflag == 'rw':
flags = 0
elif sflag == 'w':
#-----------------------------------------------------------------------
#
# Copyright (C) 2000, 2001 by Autonomous Zone Industries
+# Copyright (C) 2002 Gregory P. Smith
#
# License: This is free software. You may use this software for any
# purpose including modification/redistribution, so long as
def __call__(self, s):
return s[:len(self.prefix)] == self.prefix
+class PostfixCond(Cond):
+ """Acts as a condition function for matching a string postfix"""
+ def __init__(self, postfix):
+ self.postfix = postfix
+ def __call__(self, s):
+ return s[-len(self.postfix):] == self.postfix
+
class LikeCond(Cond):
"""
Acts as a function that will match using an SQL 'LIKE' style
# if no condition was specified or the condition
# succeeds, add row to our match list.
if not condition or condition(data) :
- # only create new entries in matcing_rowids on
- # the first pass, otherwise reject the
- # rowid as it must not have matched
- # the previous passes
- if column_num == 0 :
- if not matching_rowids.has_key(rowid) :
- matching_rowids[rowid] = {}
- if savethiscolumndata :
- matching_rowids[rowid][column] = data
- else :
- rejected_rowids[rowid] = rowid
+ if not matching_rowids.has_key(rowid) :
+ matching_rowids[rowid] = {}
+ if savethiscolumndata :
+ matching_rowids[rowid][column] = data
else :
if matching_rowids.has_key(rowid) :
del matching_rowids[rowid]
#------------------------------------------------------------------------
#
-# In my performance tests, using this (as in dbtest.py test4) is
-# slightly slower than simply compiling _db.c with MYDB_THREAD
-# undefined to prevent multithreading support in the C module.
-# Using NoDeadlockDb also prevent deadlocks from mutliple processes
-# accessing the same database.
-#
# Copyright (C) 2000 Autonomous Zone Industries
#
# License: This is free software. You may use this software for any
# Author: Gregory P. Smith <greg@electricrain.com>
#
# Note: I don't know how useful this is in reality since when a
-# DBDeadlockError happens the current transaction is supposed to be
+# DBLockDeadlockError happens the current transaction is supposed to be
# aborted. If it doesn't then when the operation is attempted again
# the deadlock is still happening...
# --Robin
import _rpmdb as _db
-_deadlock_MinSleepTime = 1.0/64 # always sleep at least N seconds between retrys
-_deadlock_MaxSleepTime = 1.0 # never sleep more than N seconds between retrys
+_deadlock_MinSleepTime = 1.0/64 # always sleep at least N seconds between retrys
+_deadlock_MaxSleepTime = 3.14159 # never sleep more than N seconds between retrys
+_deadlock_VerboseFile = None # Assign a file object to this for a "sleeping"
+ # message to be written to it each retry
def DeadlockWrap(function, *_args, **_kwargs):
"""DeadlockWrap(function, *_args, **_kwargs) - automatically retries
function in case of a database deadlock.
- This is a DeadlockWrapper method which DB calls can be made using to
- preform infinite retrys with sleeps in between when a DBLockDeadlockError
- exception is raised in a database call:
+ This is a function intended to be used to wrap database calls such
+ that they perform retrys with exponentially backing off sleeps in
+ between when a DBLockDeadlockError exception is raised.
+
+ A 'max_retries' parameter may optionally be passed to prevent it
+ from retrying forever (in which case the exception will be reraised).
d = DB(...)
d.open(...)
DeadlockWrap(d.put, "foo", data="bar") # set key "foo" to "bar"
"""
sleeptime = _deadlock_MinSleepTime
- while (1) :
+ max_retries = _kwargs.get('max_retries', -1)
+ if _kwargs.has_key('max_retries'):
+ del _kwargs['max_retries']
+ while 1:
try:
return apply(function, _args, _kwargs)
except _db.DBLockDeadlockError:
- print 'DeadlockWrap sleeping ', sleeptime
+ if _deadlock_VerboseFile:
+ _deadlock_VerboseFile.write('dbutils.DeadlockWrap: sleeping %1.3f\n' % sleeptime)
_sleep(sleeptime)
# exponential backoff in the sleep time
sleeptime = sleeptime * 2
if sleeptime > _deadlock_MaxSleepTime :
sleeptime = _deadlock_MaxSleepTime
+ max_retries = max_retries - 1
+ if max_retries == -1:
+ raise
#------------------------------------------------------------------------
class HashShelveTestCase(BasicShelveTestCase):
- dbtype = db.DB_BTREE
+ dbtype = db.DB_HASH
dbflags = db.DB_CREATE
class ThreadHashShelveTestCase(BasicShelveTestCase):
- dbtype = db.DB_BTREE
+ dbtype = db.DB_HASH
dbflags = db.DB_CREATE | db.DB_THREAD
class EnvHashShelveTestCase(BasicEnvShelveTestCase):
envflags = 0
- dbtype = db.DB_BTREE
+ dbtype = db.DB_HASH
dbflags = db.DB_CREATE
class EnvThreadHashShelveTestCase(BasicEnvShelveTestCase):
envflags = db.DB_THREAD
- dbtype = db.DB_BTREE
+ dbtype = db.DB_HASH
dbflags = db.DB_CREATE | db.DB_THREAD
#-----------------------------------------------------------------------
#
# Copyright (C) 2000, 2001 by Autonomous Zone Industries
+# Copyright (C) 2002 Gregory P. Smith
#
# March 20, 2000
#
assert values[0]['b'] == "bad"
+ def test04_MultiCondSelect(self):
+ tabname = "test04_MultiCondSelect"
+ try:
+ self.tdb.Drop(tabname)
+ except dbtables.TableDBError:
+ pass
+ self.tdb.CreateTable(tabname, ['a', 'b', 'c', 'd', 'e'])
+
+ try:
+ self.tdb.Insert(tabname, {'a': "", 'e': pickle.dumps([{4:5, 6:7}, 'foo'], 1), 'f': "Zero"})
+ assert 0
+ except dbtables.TableDBError:
+ pass
+
+ self.tdb.Insert(tabname, {'a': "A", 'b': "B", 'c': "C", 'd': "D", 'e': "E"})
+ self.tdb.Insert(tabname, {'a': "-A", 'b': "-B", 'c': "-C", 'd': "-D", 'e': "-E"})
+ self.tdb.Insert(tabname, {'a': "A-", 'b': "B-", 'c': "C-", 'd': "D-", 'e': "E-"})
+
+ if verbose:
+ self.tdb._db_print()
+
+ # This select should return 0 rows. it is designed to test
+ # the bug identified and fixed in sourceforge bug # 590449
+ # (Big Thanks to "Rob Tillotson (n9mtb)" for tracking this down
+ # and supplying a fix!! This one caused many headaches to say
+ # the least...)
+ values = self.tdb.Select(tabname, ['b', 'a', 'd'],
+ conditions={'e': dbtables.ExactCond('E'),
+ 'a': dbtables.ExactCond('A'),
+ 'd': dbtables.PrefixCond('-')
+ } )
+ assert len(values) == 0, values
+
+
def test_CreateOrExtend(self):
tabname = "test_CreateOrExtend"
import unittest
from test_all import verbose
-from rpmdb import db
+from rpmdb import db, dbutils
#----------------------------------------------------------------------
def setUp(self):
+ if verbose:
+ dbutils._deadlock_VerboseFile = sys.stdout
+
homeDir = os.path.join(os.path.dirname(sys.argv[0]), 'db_home')
self.homeDir = homeDir
try: os.mkdir(homeDir)
for x in range(start, stop):
key = '%04d' % x
- d.put(key, self.makeData(key))
+ dbutils.DeadlockWrap(d.put, key, self.makeData(key), max_retries=12)
if verbose and x % 100 == 0:
print "%s: records %d - %d finished" % (name, start, x)
# create a bunch of records
for x in xrange(start, stop):
key = '%04d' % x
- d.put(key, self.makeData(key))
+ dbutils.DeadlockWrap(d.put, key, self.makeData(key), max_retries=12)
if verbose and x % 100 == 0:
print "%s: records %d - %d finished" % (name, start, x)
if random() <= 0.05:
for y in xrange(start, x):
key = '%04d' % x
- data = d.get(key)
+ data = dbutils.DeadlockWrap(d.get, key, max_retries=12)
assert data == self.makeData(key)
# flush them
try:
- d.sync()
+ dbutils.DeadlockWrap(d.sync, max_retries=12)
except db.DBIncompleteError, val:
if verbose:
print "could not complete sync()..."
# read them back, deleting a few
for x in xrange(start, stop):
key = '%04d' % x
- data = d.get(key)
+ data = dbutils.DeadlockWrap(d.get, key, max_retries=12)
if verbose and x % 100 == 0:
print "%s: fetched record (%s, %s)" % (name, key, data)
- assert data == self.makeData(key)
+ assert data == self.makeData(key), (key, data, self.makeData(key))
if random() <= 0.10:
- d.delete(key)
+ dbutils.DeadlockWrap(d.delete, key, max_retries=12)
if verbose:
print "%s: deleted record %s" % (name, key)
class HashSimpleThreaded(SimpleThreadedBase):
- dbtype = db.DB_BTREE
+ dbtype = db.DB_HASH
#----------------------------------------------------------------------