#endif
#endif
+#if SQLITE_VERSION_NUMBER >= 3006011
+#define HAVE_BACKUP_API
+#endif
+
_Py_IDENTIFIER(cursor);
static const char * const begin_statements[] = {
};
char* database;
+ PyObject* database_obj;
int detect_types = 0;
PyObject* isolation_level = NULL;
PyObject* factory = NULL;
double timeout = 5.0;
int rc;
- if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s|diOiOipi", kwlist,
- &database, &timeout, &detect_types,
+ if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&|diOiOipi", kwlist,
+ PyUnicode_FSConverter, &database_obj, &timeout, &detect_types,
&isolation_level, &check_same_thread,
&factory, &cached_statements, &uri,
&flags))
return -1;
}
+ database = PyBytes_AsString(database_obj);
+
self->initialized = 1;
self->begin_statement = NULL;
- self->statement_cache = NULL;
- self->statements = NULL;
- self->cursors = NULL;
+ Py_CLEAR(self->statement_cache);
+ Py_CLEAR(self->statements);
+ Py_CLEAR(self->cursors);
Py_INCREF(Py_None);
- self->row_factory = Py_None;
+ Py_XSETREF(self->row_factory, Py_None);
Py_INCREF(&PyUnicode_Type);
- self->text_factory = (PyObject*)&PyUnicode_Type;
+ Py_XSETREF(self->text_factory, (PyObject*)&PyUnicode_Type);
#ifdef SQLITE_OPEN_URI
Py_BEGIN_ALLOW_THREADS
#endif
Py_END_ALLOW_THREADS
+ Py_DECREF(database_obj);
+
if (rc != SQLITE_OK) {
_pysqlite_seterror(self->db, NULL);
return -1;
} else {
Py_INCREF(isolation_level);
}
- self->isolation_level = NULL;
+ Py_CLEAR(self->isolation_level);
if (pysqlite_connection_set_isolation_level(self, isolation_level) < 0) {
Py_DECREF(isolation_level);
return -1;
self->detect_types = detect_types;
self->timeout = timeout;
(void)sqlite3_busy_timeout(self->db, (int)(timeout*1000));
-#ifdef WITH_THREAD
self->thread_ident = PyThread_get_thread_ident();
-#endif
if (!check_same_thread && sqlite3_libversion_number() < 3003001) {
PyErr_SetString(pysqlite_NotSupportedError, "shared connections not available");
return -1;
}
self->check_same_thread = check_same_thread;
- self->function_pinboard = PyDict_New();
+ Py_XSETREF(self->function_pinboard, PyDict_New());
if (!self->function_pinboard) {
return -1;
}
- self->collations = PyDict_New();
+ Py_XSETREF(self->collations, PyDict_New());
if (!self->collations) {
return -1;
}
/* Clean up if user has not called .close() explicitly. */
if (self->db) {
Py_BEGIN_ALLOW_THREADS
- sqlite3_close(self->db);
+ SQLITE3_CLOSE(self->db);
Py_END_ALLOW_THREADS
}
if (self->db) {
Py_BEGIN_ALLOW_THREADS
- rc = sqlite3_close(self->db);
+ rc = SQLITE3_CLOSE(self->db);
Py_END_ALLOW_THREADS
if (rc != SQLITE_OK) {
if (PyErr_Occurred()) {
return NULL;
} else {
- Py_INCREF(Py_None);
- return Py_None;
+ Py_RETURN_NONE;
}
}
if (PyErr_Occurred()) {
return NULL;
} else {
- Py_INCREF(Py_None);
- return Py_None;
+ Py_RETURN_NONE;
}
}
if (PyErr_Occurred()) {
return NULL;
} else {
- Py_INCREF(Py_None);
- return Py_None;
+ Py_RETURN_NONE;
}
}
PyObject* py_retval = NULL;
int ok;
-#ifdef WITH_THREAD
PyGILState_STATE threadstate;
threadstate = PyGILState_Ensure();
-#endif
py_func = (PyObject*)sqlite3_user_data(context);
_sqlite3_result_error(context, "user-defined function raised exception", -1);
}
-#ifdef WITH_THREAD
PyGILState_Release(threadstate);
-#endif
}
static void _pysqlite_step_callback(sqlite3_context *context, int argc, sqlite3_value** params)
PyObject** aggregate_instance;
PyObject* stepmethod = NULL;
-#ifdef WITH_THREAD
PyGILState_STATE threadstate;
threadstate = PyGILState_Ensure();
-#endif
aggregate_class = (PyObject*)sqlite3_user_data(context);
aggregate_instance = (PyObject**)sqlite3_aggregate_context(context, sizeof(PyObject*));
- if (*aggregate_instance == 0) {
- *aggregate_instance = PyObject_CallFunction(aggregate_class, NULL);
+ if (*aggregate_instance == NULL) {
+ *aggregate_instance = _PyObject_CallNoArg(aggregate_class);
if (PyErr_Occurred()) {
*aggregate_instance = 0;
Py_XDECREF(stepmethod);
Py_XDECREF(function_result);
-#ifdef WITH_THREAD
PyGILState_Release(threadstate);
-#endif
}
void _pysqlite_final_callback(sqlite3_context* context)
PyObject *exception, *value, *tb;
int restore;
-#ifdef WITH_THREAD
PyGILState_STATE threadstate;
threadstate = PyGILState_Ensure();
-#endif
aggregate_instance = (PyObject**)sqlite3_aggregate_context(context, sizeof(PyObject*));
if (!*aggregate_instance) {
}
error:
-#ifdef WITH_THREAD
PyGILState_Release(threadstate);
-#endif
- /* explicit return to avoid a compilation error if WITH_THREAD
- is not defined */
- return;
}
static void _pysqlite_drop_unused_statement_references(pysqlite_Connection* self)
{
PyObject *ret;
int rc;
-#ifdef WITH_THREAD
PyGILState_STATE gilstate;
gilstate = PyGILState_Ensure();
-#endif
ret = PyObject_CallFunction((PyObject*)user_arg, "issss", action, arg1, arg2, dbname, access_attempt_source);
Py_DECREF(ret);
}
-#ifdef WITH_THREAD
PyGILState_Release(gilstate);
-#endif
return rc;
}
{
int rc;
PyObject *ret;
-#ifdef WITH_THREAD
PyGILState_STATE gilstate;
gilstate = PyGILState_Ensure();
-#endif
- ret = PyObject_CallFunction((PyObject*)user_arg, NULL);
+ ret = _PyObject_CallNoArg((PyObject*)user_arg);
if (!ret) {
if (_enable_callback_tracebacks) {
Py_DECREF(ret);
}
-#ifdef WITH_THREAD
PyGILState_Release(gilstate);
-#endif
return rc;
}
PyObject *py_statement = NULL;
PyObject *ret = NULL;
-#ifdef WITH_THREAD
PyGILState_STATE gilstate;
gilstate = PyGILState_Ensure();
-#endif
py_statement = PyUnicode_DecodeUTF8(statement_string,
strlen(statement_string), "replace");
if (py_statement) {
}
}
-#ifdef WITH_THREAD
PyGILState_Release(gilstate);
-#endif
}
static PyObject* pysqlite_connection_set_authorizer(pysqlite_Connection* self, PyObject* args, PyObject* kwargs)
int pysqlite_check_thread(pysqlite_Connection* self)
{
-#ifdef WITH_THREAD
if (self->check_same_thread) {
if (PyThread_get_thread_ident() != self->thread_ident) {
PyErr_Format(pysqlite_ProgrammingError,
- "SQLite objects created in a thread can only be used in that same thread."
- "The object was created in thread id %ld and this is thread id %ld",
+ "SQLite objects created in a thread can only be used in that same thread. "
+ "The object was created in thread id %lu and this is thread id %lu.",
self->thread_ident, PyThread_get_thread_ident());
return 0;
}
}
-#endif
return 1;
}
return NULL;
}
- if (!_PyArg_NoKeywords(MODULE_NAME ".Connection()", kwargs))
+ if (!_PyArg_NoKeywords(MODULE_NAME ".Connection", kwargs))
return NULL;
if (!PyArg_ParseTuple(args, "O", &sql))
PyObject* callback = (PyObject*)context;
PyObject* string1 = 0;
PyObject* string2 = 0;
-#ifdef WITH_THREAD
PyGILState_STATE gilstate;
-#endif
PyObject* retval = NULL;
long longval;
int result = 0;
-#ifdef WITH_THREAD
gilstate = PyGILState_Ensure();
-#endif
if (PyErr_Occurred()) {
goto finally;
Py_XDECREF(string1);
Py_XDECREF(string2);
Py_XDECREF(retval);
-#ifdef WITH_THREAD
PyGILState_Release(gilstate);
-#endif
return result;
}
return retval;
}
+#ifdef HAVE_BACKUP_API
+static PyObject *
+pysqlite_connection_backup(pysqlite_Connection *self, PyObject *args, PyObject *kwds)
+{
+ PyObject *target = NULL;
+ int pages = -1;
+ PyObject *progress = Py_None;
+ const char *name = "main";
+ int rc;
+ int callback_error = 0;
+ PyObject *sleep_obj = NULL;
+ int sleep_ms = 250;
+ sqlite3 *bck_conn;
+ sqlite3_backup *bck_handle;
+ static char *keywords[] = {"target", "pages", "progress", "name", "sleep", NULL};
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!|$iOsO:backup", keywords,
+ &pysqlite_ConnectionType, &target,
+ &pages, &progress, &name, &sleep_obj)) {
+ return NULL;
+ }
+
+ if (sleep_obj != NULL) {
+ _PyTime_t sleep_secs;
+ if (_PyTime_FromSecondsObject(&sleep_secs, sleep_obj,
+ _PyTime_ROUND_CEILING)) {
+ return NULL;
+ }
+ _PyTime_t ms = _PyTime_AsMilliseconds(sleep_secs,
+ _PyTime_ROUND_CEILING);
+ if (ms < INT_MIN || ms > INT_MAX) {
+ PyErr_SetString(PyExc_OverflowError, "sleep is too large");
+ return NULL;
+ }
+ sleep_ms = (int)ms;
+ }
+
+ if (!pysqlite_check_connection((pysqlite_Connection *)target)) {
+ return NULL;
+ }
+
+ if ((pysqlite_Connection *)target == self) {
+ PyErr_SetString(PyExc_ValueError, "target cannot be the same connection instance");
+ return NULL;
+ }
+
+#if SQLITE_VERSION_NUMBER < 3008008
+ /* Since 3.8.8 this is already done, per commit
+ https://www.sqlite.org/src/info/169b5505498c0a7e */
+ if (!sqlite3_get_autocommit(((pysqlite_Connection *)target)->db)) {
+ PyErr_SetString(pysqlite_OperationalError, "target is in transaction");
+ return NULL;
+ }
+#endif
+
+ if (progress != Py_None && !PyCallable_Check(progress)) {
+ PyErr_SetString(PyExc_TypeError, "progress argument must be a callable");
+ return NULL;
+ }
+
+ if (pages == 0) {
+ pages = -1;
+ }
+
+ bck_conn = ((pysqlite_Connection *)target)->db;
+
+ Py_BEGIN_ALLOW_THREADS
+ bck_handle = sqlite3_backup_init(bck_conn, "main", self->db, name);
+ Py_END_ALLOW_THREADS
+
+ if (bck_handle) {
+ do {
+ Py_BEGIN_ALLOW_THREADS
+ rc = sqlite3_backup_step(bck_handle, pages);
+ Py_END_ALLOW_THREADS
+
+ if (progress != Py_None) {
+ PyObject *res;
+
+ res = PyObject_CallFunction(progress, "iii", rc,
+ sqlite3_backup_remaining(bck_handle),
+ sqlite3_backup_pagecount(bck_handle));
+ if (res == NULL) {
+ /* User's callback raised an error: interrupt the loop and
+ propagate it. */
+ callback_error = 1;
+ rc = -1;
+ } else {
+ Py_DECREF(res);
+ }
+ }
+
+ /* Sleep for a while if there are still further pages to copy and
+ the engine could not make any progress */
+ if (rc == SQLITE_BUSY || rc == SQLITE_LOCKED) {
+ Py_BEGIN_ALLOW_THREADS
+ sqlite3_sleep(sleep_ms);
+ Py_END_ALLOW_THREADS
+ }
+ } while (rc == SQLITE_OK || rc == SQLITE_BUSY || rc == SQLITE_LOCKED);
+
+ Py_BEGIN_ALLOW_THREADS
+ rc = sqlite3_backup_finish(bck_handle);
+ Py_END_ALLOW_THREADS
+ } else {
+ rc = _pysqlite_seterror(bck_conn, NULL);
+ }
+
+ if (!callback_error && rc != SQLITE_OK) {
+ /* We cannot use _pysqlite_seterror() here because the backup APIs do
+ not set the error status on the connection object, but rather on
+ the backup handle. */
+ if (rc == SQLITE_NOMEM) {
+ (void)PyErr_NoMemory();
+ } else {
+#if SQLITE_VERSION_NUMBER > 3007015
+ PyErr_SetString(pysqlite_OperationalError, sqlite3_errstr(rc));
+#else
+ switch (rc) {
+ case SQLITE_READONLY:
+ PyErr_SetString(pysqlite_OperationalError,
+ "attempt to write a readonly database");
+ break;
+ case SQLITE_BUSY:
+ PyErr_SetString(pysqlite_OperationalError, "database is locked");
+ break;
+ case SQLITE_LOCKED:
+ PyErr_SetString(pysqlite_OperationalError,
+ "database table is locked");
+ break;
+ default:
+ PyErr_Format(pysqlite_OperationalError,
+ "unrecognized error code: %d", rc);
+ break;
+ }
+#endif
+ }
+ }
+
+ if (!callback_error && rc == SQLITE_OK) {
+ Py_RETURN_NONE;
+ } else {
+ return NULL;
+ }
+}
+#endif
+
static PyObject *
pysqlite_connection_create_collation(pysqlite_Connection* self, PyObject* args)
{
PyObject* retval;
Py_ssize_t i, len;
_Py_IDENTIFIER(upper);
- char *uppercase_name_str;
+ const char *uppercase_name_str;
int rc;
unsigned int kind;
void *data;
pysqlite_connection_exit(pysqlite_Connection* self, PyObject* args)
{
PyObject* exc_type, *exc_value, *exc_tb;
- char* method_name;
+ const char* method_name;
PyObject* result;
if (!PyArg_ParseTuple(args, "OOO", &exc_type, &exc_value, &exc_tb)) {
PyDoc_STR("Abort any pending database operation. Non-standard.")},
{"iterdump", (PyCFunction)pysqlite_connection_iterdump, METH_NOARGS,
PyDoc_STR("Returns iterator to the dump of the database in an SQL text format. Non-standard.")},
+ #ifdef HAVE_BACKUP_API
+ {"backup", (PyCFunction)pysqlite_connection_backup, METH_VARARGS | METH_KEYWORDS,
+ PyDoc_STR("Makes a backup of the database. Non-standard.")},
+ #endif
{"__enter__", (PyCFunction)pysqlite_connection_enter, METH_NOARGS,
PyDoc_STR("For context manager. Non-standard.")},
{"__exit__", (PyCFunction)pysqlite_connection_exit, METH_VARARGS,
/* static objects at module-level */
-PyObject* pysqlite_Error, *pysqlite_Warning, *pysqlite_InterfaceError, *pysqlite_DatabaseError,
- *pysqlite_InternalError, *pysqlite_OperationalError, *pysqlite_ProgrammingError,
- *pysqlite_IntegrityError, *pysqlite_DataError, *pysqlite_NotSupportedError;
-
-PyObject* converters;
-int _enable_callback_tracebacks;
-int pysqlite_BaseTypeAdapted;
+PyObject *pysqlite_Error = NULL;
+PyObject *pysqlite_Warning = NULL;
+PyObject *pysqlite_InterfaceError = NULL;
+PyObject *pysqlite_DatabaseError = NULL;
+PyObject *pysqlite_InternalError = NULL;
+PyObject *pysqlite_OperationalError = NULL;
+PyObject *pysqlite_ProgrammingError = NULL;
+PyObject *pysqlite_IntegrityError = NULL;
+PyObject *pysqlite_DataError = NULL;
+PyObject *pysqlite_NotSupportedError = NULL;
+
+PyObject* converters = NULL;
+int _enable_callback_tracebacks = 0;
+int pysqlite_BaseTypeAdapted = 0;
static PyObject* module_connect(PyObject* self, PyObject* args, PyObject*
kwargs)
"check_same_thread", "factory", "cached_statements", "uri", "flags",
NULL
};
- char* database;
+ PyObject* database;
int detect_types = 0;
PyObject* isolation_level;
PyObject* factory = NULL;
PyObject* result;
- if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s|diOiOipi", kwlist,
+ if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|diOiOip", kwlist,
&database, &timeout, &detect_types,
&isolation_level, &check_same_thread,
&factory, &cached_statements, &uri,
{"SQLITE_DROP_TRIGGER", SQLITE_DROP_TRIGGER},
{"SQLITE_DROP_VIEW", SQLITE_DROP_VIEW},
{"SQLITE_INSERT", SQLITE_INSERT},
+ {"SQLITE_OPEN_CREATE", SQLITE_OPEN_CREATE},
+ {"SQLITE_OPEN_FULLMUTEX", SQLITE_OPEN_FULLMUTEX},
+ {"SQLITE_OPEN_MEMORY", SQLITE_OPEN_MEMORY},
+ {"SQLITE_OPEN_NOMUTEX", SQLITE_OPEN_NOMUTEX},
+ {"SQLITE_OPEN_PRIVATECACHE", SQLITE_OPEN_PRIVATECACHE},
+ {"SQLITE_OPEN_READONLY", SQLITE_OPEN_READONLY},
+ {"SQLITE_OPEN_SHAREDCACHE", SQLITE_OPEN_SHAREDCACHE},
+ {"SQLITE_OPEN_READWRITE", SQLITE_OPEN_READWRITE},
+ {"SQLITE_OPEN_URI", SQLITE_OPEN_URI},
{"SQLITE_PRAGMA", SQLITE_PRAGMA},
{"SQLITE_READ", SQLITE_READ},
{"SQLITE_SELECT", SQLITE_SELECT},
#endif
#if SQLITE_VERSION_NUMBER >= 3003000
{"SQLITE_ANALYZE", SQLITE_ANALYZE},
+#endif
+#if SQLITE_VERSION_NUMBER >= 3003007
+ {"SQLITE_CREATE_VTABLE", SQLITE_CREATE_VTABLE},
+ {"SQLITE_DROP_VTABLE", SQLITE_DROP_VTABLE},
+#endif
+#if SQLITE_VERSION_NUMBER >= 3003008
+ {"SQLITE_FUNCTION", SQLITE_FUNCTION},
+#endif
+#if SQLITE_VERSION_NUMBER >= 3006008
+ {"SQLITE_SAVEPOINT", SQLITE_SAVEPOINT},
+#endif
+#if SQLITE_VERSION_NUMBER >= 3008003
+ {"SQLITE_RECURSIVE", SQLITE_RECURSIVE},
+#endif
+#if SQLITE_VERSION_NUMBER >= 3006011
+ {"SQLITE_DONE", SQLITE_DONE},
#endif
{(char*)NULL, 0}
};
-static struct PyModuleDef _sqlitemodule = {
+static struct PyModuleDef _sqlite3module = {
PyModuleDef_HEAD_INIT,
- "_sqlite",
+ "_sqlite3",
NULL,
-1,
module_methods,
NULL
};
-PyMODINIT_FUNC PyInit__sqlite(void)
+PyMODINIT_FUNC PyInit__sqlite3(void)
{
PyObject *module, *dict;
PyObject *tmp_obj;
int i;
- module = PyModule_Create(&_sqlitemodule);
+ module = PyModule_Create(&_sqlite3module);
if (!module ||
(pysqlite_row_setup_types() < 0) ||
PyDict_SetItemString(dict, "OptimizedUnicode", (PyObject*)&PyUnicode_Type);
/* Set integer constants */
- for (i = 0; _int_constants[i].constant_name != 0; i++) {
+ for (i = 0; _int_constants[i].constant_name != NULL; i++) {
tmp_obj = PyLong_FromLong(_int_constants[i].constant_value);
if (!tmp_obj) {
goto error;
/* initialize the default converters */
converters_init(dict);
- _enable_callback_tracebacks = 0;
-
- pysqlite_BaseTypeAdapted = 0;
-
- /* Original comment from _bsddb.c in the Python core. This is also still
- * needed nowadays for Python 2.3/2.4.
- *
- * PyEval_InitThreads is called here due to a quirk in python 1.5
- * - 2.2.1 (at least) according to Russell Williamson <merel@wt.net>:
- * The global interpreter lock is not initialized until the first
- * thread is created using thread.start_new_thread() or fork() is
- * called. that would cause the ALLOW_THREADS here to segfault due
- * to a null pointer reference if no threads or child processes
- * have been created. This works around that and is a no-op if
- * threads have already been initialized.
- * (see pybsddb-users mailing list post on 2002-08-07)
- */
-#ifdef WITH_THREAD
- PyEval_InitThreads();
-#endif
-
error:
if (PyErr_Occurred())
{