""" | |
SQLAlchemy, PostgreSQL (psycopg2), and autocommit | |
See blog post: http://oddbird.net/2014/06/14/sqlalchemy-postgres-autocommit/ | |
""" | |
from contextlib import contextmanager | |
from sqlalchemy import create_engine, event | |
from sqlalchemy.orm import sessionmaker, Session as BaseSession | |
class Session(BaseSession): | |
def __init__(self, *a, **kw): | |
super(Session, self).__init__(*a, **kw) | |
self._in_atomic = False | |
@contextmanager | |
def atomic(self): | |
"""Transaction context manager. | |
Will commit the transaction on successful completion of the block, or | |
roll it back on error. | |
Supports nested usage (via savepoints). | |
""" | |
nested = self._in_atomic | |
self.begin(nested=nested) | |
self._in_atomic = True | |
try: | |
yield | |
except: | |
self.rollback() | |
raise | |
else: | |
self.commit() | |
finally: | |
if not nested: | |
self._in_atomic = False | |
class Database(object): | |
def __init__(self, db_uri): | |
self.engine = create_engine(db_uri, isolation_level="AUTOCOMMIT") | |
self.Session = sessionmaker(bind=self.engine, class_=Session, autocommit=True) | |
# Keep track of which DBAPI connection(s) had autocommit turned off for | |
# a particular transaction object. | |
dconns_by_trans = {} | |
@event.listens_for(self.Session, 'after_begin') | |
def receive_after_begin(session, transaction, connection): | |
"""When a (non-nested) transaction begins, turn autocommit off.""" | |
dbapi_connection = connection.connection.connection | |
if transaction.nested: | |
assert not dbapi_connection.autocommit | |
return | |
assert dbapi_connection.autocommit | |
dbapi_connection.autocommit = False | |
dconns_by_trans.setdefault(transaction, set()).add( | |
dbapi_connection) | |
@event.listens_for(self.Session, 'after_transaction_end') | |
def receive_after_transaction_end(session, transaction): | |
"""Restore autocommit anywhere this transaction turned it off.""" | |
if transaction in dconns_by_trans: | |
for dbapi_connection in dconns_by_trans[transaction]: | |
assert not dbapi_connection.autocommit | |
dbapi_connection.autocommit = True | |
del dconns_by_trans[transaction] |
This comment has been minimized.
This comment has been minimized.
Hey this is very interesting as I deal with a related problem. I'm executing concurrent inserts from a multi-process environment and I get IntegrityErrors because of unique key violations. Thought of using savepoints via atomic transactions so I'm trying to understand your solution but I also found this from the docs:
How is this different from your solution? Doesn't it work on PostgreSQL? Thanks anyway, I'll keep studying |
This comment has been minimized.
This comment has been minimized.
@ducu |
This comment has been minimized.
This comment has been minimized.
@carljm Thanks so much for the blog post that accompanied this – very helpful! Did anything ever come of this? Is this still your approach to transaction management, or is there a better solution now? |
This comment has been minimized.
This comment has been minimized.
Nice research. But isn't it some sort of hacking SQLAlchemy 'by-design-behaviour'? |
This comment has been minimized.
This comment has been minimized.
This comment has been minimized.
This comment has been minimized.
Just saw the latest comment notification and realized I missed some earlier comments. Thanks for the kind words about the blog post! A couple responses:
In my current work I'm no longer using either Postgres or SQLAlchemy. The project I did this on is still running in prod and there haven't been issues caused by this approach that I know of. But I only ever did it on one project.
Yup. I was curious to see if it could be done, and as I mentioned in the blog post, I'm not super happy with how hacky the solution feels, and wish there were a well-supported way to get this behavior in SQLAlchemy. |
This comment has been minimized.
I've been wrestling the past couple weeks with the subtle nuances of Postgres and SQLAlchemy's transaction management. Thanks for the great blog post as well as this code. This helps me out a lot!