|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" |
|
A lightweight wrapper around Python's sqlite3 database, with a dict-like interface |
|
and multi-thread access support:: |
|
|
|
>>> mydict = SqliteDict('some.db', autocommit=True) # the mapping will be persisted to file `some.db` |
|
>>> mydict['some_key'] = any_picklable_object |
|
>>> print mydict['some_key'] |
|
>>> print len(mydict) # etc... all dict functions work |
|
|
|
Pickle is used internally to serialize the values. Keys are strings. |
|
|
|
If you don't use autocommit (default is no autocommit for performance), then |
|
don't forget to call `mydict.commit()` when done with a transaction. |
|
|
|
""" |
|
|
|
import sqlite3 |
|
import os |
|
import sys |
|
import tempfile |
|
import threading |
|
import logging |
|
import traceback |
|
from base64 import b64decode, b64encode |
|
import weakref |
|
|
|
__version__ = '2.1.0' |
|
|
|
|
|
def reraise(tp, value, tb=None): |
|
if value is None: |
|
value = tp() |
|
if value.__traceback__ is not tb: |
|
raise value.with_traceback(tb) |
|
raise value |
|
|
|
|
|
try: |
|
from cPickle import dumps, loads, HIGHEST_PROTOCOL as PICKLE_PROTOCOL |
|
except ImportError: |
|
from pickle import dumps, loads, HIGHEST_PROTOCOL as PICKLE_PROTOCOL |
|
|
|
|
|
try: |
|
from collections import UserDict as DictClass |
|
except ImportError: |
|
from UserDict import DictMixin as DictClass |
|
|
|
try: |
|
from queue import Queue |
|
except ImportError: |
|
from Queue import Queue |
|
|
|
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
_REQUEST_CLOSE = '--close--' |
|
_REQUEST_COMMIT = '--commit--' |
|
_RESPONSE_NO_MORE = '--no more--' |
|
|
|
|
|
|
|
|
|
|
|
|
|
_PUT_OK, _PUT_REFERENT_DESTROYED, _PUT_NOOP = 0, 1, 2 |
|
|
|
|
|
def _put(queue_reference, item): |
|
if queue_reference is not None: |
|
queue = queue_reference() |
|
if queue is None: |
|
|
|
|
|
|
|
retval = _PUT_REFERENT_DESTROYED |
|
else: |
|
queue.put(item) |
|
retval = _PUT_OK |
|
|
|
del queue |
|
return retval |
|
|
|
|
|
|
|
|
|
return _PUT_NOOP |
|
|
|
|
|
def open(*args, **kwargs): |
|
"""See documentation of the SqliteDict class.""" |
|
return SqliteDict(*args, **kwargs) |
|
|
|
|
|
def encode(obj): |
|
"""Serialize an object using pickle to a binary format accepted by SQLite.""" |
|
return sqlite3.Binary(dumps(obj, protocol=PICKLE_PROTOCOL)) |
|
|
|
|
|
def decode(obj): |
|
"""Deserialize objects retrieved from SQLite.""" |
|
return loads(bytes(obj)) |
|
|
|
|
|
def encode_key(key): |
|
"""Serialize a key using pickle + base64 encoding to text accepted by SQLite.""" |
|
return b64encode(dumps(key, protocol=PICKLE_PROTOCOL)).decode("ascii") |
|
|
|
|
|
def decode_key(key): |
|
"""Deserialize a key retrieved from SQLite.""" |
|
return loads(b64decode(key.encode("ascii"))) |
|
|
|
|
|
def identity(obj): |
|
"""Identity f(x) = x function for encoding/decoding.""" |
|
return obj |
|
|
|
|
|
class SqliteDict(DictClass): |
|
VALID_FLAGS = ['c', 'r', 'w', 'n'] |
|
|
|
def __init__(self, filename=None, tablename='unnamed', flag='c', |
|
autocommit=False, journal_mode="DELETE", encode=encode, |
|
decode=decode, encode_key=identity, decode_key=identity, |
|
timeout=5, outer_stack=True): |
|
""" |
|
Initialize a thread-safe sqlite-backed dictionary. The dictionary will |
|
be a table `tablename` in database file `filename`. A single file (=database) |
|
may contain multiple tables. |
|
|
|
If no `filename` is given, a random file in temp will be used (and deleted |
|
from temp once the dict is closed/deleted). |
|
|
|
If you enable `autocommit`, changes will be committed after each operation |
|
(more inefficient but safer). Otherwise, changes are committed on `self.commit()`, |
|
`self.clear()` and `self.close()`. |
|
|
|
Set `journal_mode` to 'OFF' if you're experiencing sqlite I/O problems |
|
or if you need performance and don't care about crash-consistency. |
|
|
|
Set `outer_stack` to False to disable the output of the outer exception |
|
to the error logs. This may improve the efficiency of sqlitedict |
|
operation at the expense of a detailed exception trace. |
|
|
|
The `flag` parameter. Exactly one of: |
|
'c': default mode, open for read/write, creating the db/table if necessary. |
|
'w': open for r/w, but drop `tablename` contents first (start with empty table) |
|
'r': open as read-only |
|
'n': create a new database (erasing any existing tables, not just `tablename`!). |
|
|
|
The `encode` and `decode` parameters are used to customize how the values |
|
are serialized and deserialized. |
|
The `encode` parameter must be a function that takes a single Python |
|
object and returns a serialized representation. |
|
The `decode` function must be a function that takes the serialized |
|
representation produced by `encode` and returns a deserialized Python |
|
object. |
|
The default is to use pickle. |
|
|
|
The `timeout` defines the maximum time (in seconds) to wait for initial Thread startup. |
|
|
|
""" |
|
self.in_temp = filename is None |
|
if self.in_temp: |
|
fd, filename = tempfile.mkstemp(prefix='sqldict') |
|
os.close(fd) |
|
|
|
if flag not in SqliteDict.VALID_FLAGS: |
|
raise RuntimeError("Unrecognized flag: %s" % flag) |
|
self.flag = flag |
|
|
|
if flag == 'n': |
|
if os.path.exists(filename): |
|
os.remove(filename) |
|
|
|
dirname = os.path.dirname(filename) |
|
if dirname: |
|
if not os.path.exists(dirname): |
|
raise RuntimeError('Error! The directory does not exist, %s' % dirname) |
|
|
|
self.filename = filename |
|
|
|
|
|
|
|
self.tablename = tablename.replace('"', '""') |
|
|
|
self.autocommit = autocommit |
|
self.journal_mode = journal_mode |
|
self.encode = encode |
|
self.decode = decode |
|
self.encode_key = encode_key |
|
self.decode_key = decode_key |
|
self._outer_stack = outer_stack |
|
|
|
logger.debug("opening Sqlite table %r in %r" % (tablename, filename)) |
|
self.conn = self._new_conn() |
|
if self.flag == 'r': |
|
if self.tablename not in SqliteDict.get_tablenames(self.filename): |
|
msg = 'Refusing to create a new table "%s" in read-only DB mode' % tablename |
|
raise RuntimeError(msg) |
|
else: |
|
MAKE_TABLE = 'CREATE TABLE IF NOT EXISTS "%s" (key TEXT PRIMARY KEY, value BLOB)' % self.tablename |
|
self.conn.execute(MAKE_TABLE) |
|
self.conn.commit() |
|
if flag == 'w': |
|
self.clear() |
|
|
|
def _new_conn(self): |
|
return SqliteMultithread( |
|
self.filename, |
|
autocommit=self.autocommit, |
|
journal_mode=self.journal_mode, |
|
outer_stack=self._outer_stack, |
|
) |
|
|
|
def __enter__(self): |
|
if not hasattr(self, 'conn') or self.conn is None: |
|
self.conn = self._new_conn() |
|
return self |
|
|
|
def __exit__(self, *exc_info): |
|
self.close() |
|
|
|
def __str__(self): |
|
return "SqliteDict(%s)" % (self.filename) |
|
|
|
def __repr__(self): |
|
return str(self) |
|
|
|
def __len__(self): |
|
|
|
|
|
|
|
|
|
|
|
GET_LEN = 'SELECT COUNT(*) FROM "%s"' % self.tablename |
|
rows = self.conn.select_one(GET_LEN)[0] |
|
return rows if rows is not None else 0 |
|
|
|
def __bool__(self): |
|
|
|
GET_MAX = 'SELECT MAX(ROWID) FROM "%s"' % self.tablename |
|
m = self.conn.select_one(GET_MAX)[0] |
|
|
|
return True if m is not None else False |
|
|
|
def iterkeys(self): |
|
GET_KEYS = 'SELECT key FROM "%s" ORDER BY rowid' % self.tablename |
|
for key in self.conn.select(GET_KEYS): |
|
yield self.decode_key(key[0]) |
|
|
|
def itervalues(self): |
|
GET_VALUES = 'SELECT value FROM "%s" ORDER BY rowid' % self.tablename |
|
for value in self.conn.select(GET_VALUES): |
|
yield self.decode(value[0]) |
|
|
|
def iteritems(self): |
|
GET_ITEMS = 'SELECT key, value FROM "%s" ORDER BY rowid' % self.tablename |
|
for key, value in self.conn.select(GET_ITEMS): |
|
yield self.decode_key(key), self.decode(value) |
|
|
|
def keys(self): |
|
return self.iterkeys() |
|
|
|
def values(self): |
|
return self.itervalues() |
|
|
|
def items(self): |
|
return self.iteritems() |
|
|
|
def __contains__(self, key): |
|
HAS_ITEM = 'SELECT 1 FROM "%s" WHERE key = ?' % self.tablename |
|
return self.conn.select_one(HAS_ITEM, (self.encode_key(key),)) is not None |
|
|
|
def __getitem__(self, key): |
|
GET_ITEM = 'SELECT value FROM "%s" WHERE key = ?' % self.tablename |
|
item = self.conn.select_one(GET_ITEM, (self.encode_key(key),)) |
|
if item is None: |
|
raise KeyError(key) |
|
return self.decode(item[0]) |
|
|
|
def __setitem__(self, key, value): |
|
if self.flag == 'r': |
|
raise RuntimeError('Refusing to write to read-only SqliteDict') |
|
|
|
ADD_ITEM = 'REPLACE INTO "%s" (key, value) VALUES (?,?)' % self.tablename |
|
self.conn.execute(ADD_ITEM, (self.encode_key(key), self.encode(value))) |
|
if self.autocommit: |
|
self.commit() |
|
|
|
def __delitem__(self, key): |
|
if self.flag == 'r': |
|
raise RuntimeError('Refusing to delete from read-only SqliteDict') |
|
|
|
if key not in self: |
|
raise KeyError(key) |
|
DEL_ITEM = 'DELETE FROM "%s" WHERE key = ?' % self.tablename |
|
self.conn.execute(DEL_ITEM, (self.encode_key(key),)) |
|
if self.autocommit: |
|
self.commit() |
|
|
|
def update(self, items=(), **kwds): |
|
if self.flag == 'r': |
|
raise RuntimeError('Refusing to update read-only SqliteDict') |
|
|
|
try: |
|
items = items.items() |
|
except AttributeError: |
|
pass |
|
items = [(self.encode_key(k), self.encode(v)) for k, v in items] |
|
|
|
UPDATE_ITEMS = 'REPLACE INTO "%s" (key, value) VALUES (?, ?)' % self.tablename |
|
self.conn.executemany(UPDATE_ITEMS, items) |
|
if kwds: |
|
self.update(kwds) |
|
if self.autocommit: |
|
self.commit() |
|
|
|
def __iter__(self): |
|
return self.iterkeys() |
|
|
|
def clear(self): |
|
if self.flag == 'r': |
|
raise RuntimeError('Refusing to clear read-only SqliteDict') |
|
|
|
|
|
CLEAR_ALL = 'DELETE FROM "%s";' % self.tablename |
|
self.conn.commit() |
|
self.conn.execute(CLEAR_ALL) |
|
self.conn.commit() |
|
|
|
@staticmethod |
|
def get_tablenames(filename): |
|
"""get the names of the tables in an sqlite db as a list""" |
|
if not os.path.isfile(filename): |
|
raise IOError('file %s does not exist' % (filename)) |
|
GET_TABLENAMES = 'SELECT name FROM sqlite_master WHERE type="table"' |
|
with sqlite3.connect(filename) as conn: |
|
cursor = conn.execute(GET_TABLENAMES) |
|
res = cursor.fetchall() |
|
|
|
return [name[0] for name in res] |
|
|
|
def commit(self, blocking=True): |
|
""" |
|
Persist all data to disk. |
|
|
|
When `blocking` is False, the commit command is queued, but the data is |
|
not guaranteed persisted (default implication when autocommit=True). |
|
""" |
|
if self.conn is not None: |
|
self.conn.commit(blocking) |
|
sync = commit |
|
|
|
def close(self, do_log=True, force=False): |
|
if do_log: |
|
logger.debug("closing %s" % self) |
|
if hasattr(self, 'conn') and self.conn is not None: |
|
if self.conn.autocommit and not force: |
|
|
|
|
|
|
|
|
|
self.conn.commit(blocking=True) |
|
self.conn.close(force=force) |
|
self.conn = None |
|
if self.in_temp: |
|
try: |
|
os.remove(self.filename) |
|
except Exception: |
|
pass |
|
|
|
def terminate(self): |
|
"""Delete the underlying database file. Use with care.""" |
|
if self.flag == 'r': |
|
raise RuntimeError('Refusing to terminate read-only SqliteDict') |
|
|
|
self.close() |
|
|
|
if self.filename == ':memory:': |
|
return |
|
|
|
logger.info("deleting %s" % self.filename) |
|
try: |
|
if os.path.isfile(self.filename): |
|
os.remove(self.filename) |
|
except (OSError, IOError): |
|
logger.exception("failed to delete %s" % (self.filename)) |
|
|
|
def __del__(self): |
|
|
|
try: |
|
self.close(do_log=False, force=True) |
|
except Exception: |
|
|
|
|
|
|
|
pass |
|
|
|
|
|
class SqliteMultithread(threading.Thread): |
|
""" |
|
Wrap sqlite connection in a way that allows concurrent requests from multiple threads. |
|
|
|
This is done by internally queueing the requests and processing them sequentially |
|
in a separate thread (in the same order they arrived). |
|
|
|
""" |
|
def __init__(self, filename, autocommit, journal_mode, outer_stack=True): |
|
super(SqliteMultithread, self).__init__() |
|
self.filename = filename |
|
self.autocommit = autocommit |
|
self.journal_mode = journal_mode |
|
|
|
self.reqs = Queue() |
|
self.daemon = True |
|
self._outer_stack = outer_stack |
|
self.log = logging.getLogger('sqlitedict.SqliteMultithread') |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
self._lock = threading.Lock() |
|
self._lock.acquire() |
|
self.exception = None |
|
|
|
self.start() |
|
|
|
def _connect(self): |
|
"""Connect to the underlying database. |
|
|
|
Raises an exception on failure. Returns the connection and cursor on success. |
|
""" |
|
try: |
|
if self.autocommit: |
|
conn = sqlite3.connect(self.filename, isolation_level=None, check_same_thread=False) |
|
else: |
|
conn = sqlite3.connect(self.filename, check_same_thread=False) |
|
except Exception: |
|
self.log.exception("Failed to initialize connection for filename: %s" % self.filename) |
|
self.exception = sys.exc_info() |
|
raise |
|
|
|
try: |
|
conn.execute('PRAGMA journal_mode = %s' % self.journal_mode) |
|
conn.text_factory = str |
|
cursor = conn.cursor() |
|
conn.commit() |
|
cursor.execute('PRAGMA synchronous=OFF') |
|
except Exception: |
|
self.log.exception("Failed to execute PRAGMA statements.") |
|
self.exception = sys.exc_info() |
|
raise |
|
|
|
return conn, cursor |
|
|
|
def run(self): |
|
|
|
|
|
|
|
|
|
try: |
|
conn, cursor = self._connect() |
|
finally: |
|
self._lock.release() |
|
|
|
res_ref = None |
|
while True: |
|
|
|
|
|
|
|
|
|
|
|
|
|
req, arg, res_ref, outer_stack = self.reqs.get() |
|
|
|
if req == _REQUEST_CLOSE: |
|
assert res_ref, ('--close-- without return queue', res_ref) |
|
break |
|
elif req == _REQUEST_COMMIT: |
|
conn.commit() |
|
_put(res_ref, _RESPONSE_NO_MORE) |
|
else: |
|
try: |
|
cursor.execute(req, arg) |
|
except Exception: |
|
with self._lock: |
|
self.exception = (e_type, e_value, e_tb) = sys.exc_info() |
|
|
|
inner_stack = traceback.extract_stack() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
self.log.error('Inner exception:') |
|
for item in traceback.format_list(inner_stack): |
|
self.log.error(item) |
|
self.log.error('') |
|
for item in traceback.format_exception_only(e_type, e_value): |
|
self.log.error(item) |
|
|
|
self.log.error('') |
|
|
|
if self._outer_stack: |
|
self.log.error('Outer stack:') |
|
for item in traceback.format_list(outer_stack): |
|
self.log.error(item) |
|
self.log.error('Exception will be re-raised at next call.') |
|
else: |
|
self.log.error( |
|
'Unable to show the outer stack. Pass ' |
|
'outer_stack=True when initializing the ' |
|
'SqliteDict instance to show the outer stack.' |
|
) |
|
|
|
if res_ref: |
|
for rec in cursor: |
|
if _put(res_ref, rec) == _PUT_REFERENT_DESTROYED: |
|
|
|
|
|
|
|
|
|
|
|
break |
|
|
|
_put(res_ref, _RESPONSE_NO_MORE) |
|
|
|
if self.autocommit: |
|
conn.commit() |
|
|
|
self.log.debug('received: %s, send: --no more--', req) |
|
conn.close() |
|
|
|
_put(res_ref, _RESPONSE_NO_MORE) |
|
|
|
def check_raise_error(self): |
|
""" |
|
Check for and raise exception for any previous sqlite query. |
|
|
|
For the `execute*` family of method calls, such calls are non-blocking and any |
|
exception raised in the thread cannot be handled by the calling Thread (usually |
|
MainThread). This method is called on `close`, and prior to any subsequent |
|
calls to the `execute*` methods to check for and raise an exception in a |
|
previous call to the MainThread. |
|
""" |
|
with self._lock: |
|
if self.exception: |
|
e_type, e_value, e_tb = self.exception |
|
|
|
|
|
|
|
self.exception = None |
|
|
|
self.log.error('An exception occurred from a previous statement, view ' |
|
'the logging namespace "sqlitedict" for outer stack.') |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
reraise(e_type, e_value, e_tb) |
|
|
|
def execute(self, req, arg=None, res=None): |
|
""" |
|
`execute` calls are non-blocking: just queue up the request and return immediately. |
|
|
|
:param req: The request (an SQL command) |
|
:param arg: Arguments to the SQL command |
|
:param res: A queue in which to place responses as they become available |
|
""" |
|
self.check_raise_error() |
|
stack = None |
|
|
|
if self._outer_stack: |
|
|
|
|
|
|
|
|
|
stack = traceback.extract_stack()[:-1] |
|
|
|
|
|
|
|
|
|
|
|
|
|
res_ref = None |
|
if res: |
|
res_ref = weakref.ref(res) |
|
|
|
self.reqs.put((req, arg or tuple(), res_ref, stack)) |
|
|
|
def executemany(self, req, items): |
|
for item in items: |
|
self.execute(req, item) |
|
self.check_raise_error() |
|
|
|
def select(self, req, arg=None): |
|
""" |
|
Unlike sqlite's native select, this select doesn't handle iteration efficiently. |
|
|
|
The result of `select` starts filling up with values as soon as the |
|
request is dequeued, and although you can iterate over the result normally |
|
(`for res in self.select(): ...`), the entire result will be in memory. |
|
""" |
|
res = Queue() |
|
self.execute(req, arg, res) |
|
while True: |
|
rec = res.get() |
|
self.check_raise_error() |
|
if rec == _RESPONSE_NO_MORE: |
|
break |
|
yield rec |
|
|
|
def select_one(self, req, arg=None): |
|
"""Return only the first row of the SELECT, or None if there are no matching rows.""" |
|
try: |
|
return next(iter(self.select(req, arg))) |
|
except StopIteration: |
|
return None |
|
|
|
def commit(self, blocking=True): |
|
if blocking: |
|
|
|
|
|
|
|
|
|
self.select_one(_REQUEST_COMMIT) |
|
else: |
|
|
|
self.execute(_REQUEST_COMMIT) |
|
|
|
def close(self, force=False): |
|
if force: |
|
|
|
|
|
|
|
|
|
|
|
self.reqs.put((_REQUEST_CLOSE, None, weakref.ref(Queue()), None)) |
|
else: |
|
|
|
|
|
|
|
self.select_one(_REQUEST_CLOSE) |
|
self.join() |
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__': |
|
print(__version__) |
|
|