diff --git a/synapse/storage/transactions.py b/synapse/storage/transactions.py
index fd18619178..b3c3bf55bc 100644
--- a/synapse/storage/transactions.py
+++ b/synapse/storage/transactions.py
@@ -147,7 +147,7 @@ class TransactionStore(SQLBaseStore):
result = self._destination_retry_cache.get(destination, SENTINEL)
if result is not SENTINEL:
- defer.returnValue(result)
+ return result
result = yield self.runInteraction(
"get_destination_retry_timings",
@@ -158,7 +158,7 @@ class TransactionStore(SQLBaseStore):
# We don't hugely care about race conditions between getting and
# invalidating the cache, since we time out fairly quickly anyway.
self._destination_retry_cache[destination] = result
- defer.returnValue(result)
+ return result
def _get_destination_retry_timings(self, txn, destination):
result = self._simple_select_one_txn(
@@ -196,6 +196,26 @@ class TransactionStore(SQLBaseStore):
def _set_destination_retry_timings(
self, txn, destination, retry_last_ts, retry_interval
):
+
+ if self.database_engine.can_native_upsert:
+ # Upsert retry time interval if retry_interval is zero (i.e. we're
+ # resetting it) or greater than the existing retry interval.
+
+ sql = """
+ INSERT INTO destinations (destination, retry_last_ts, retry_interval)
+ VALUES (?, ?, ?)
+ ON CONFLICT (destination) DO UPDATE SET
+ retry_last_ts = EXCLUDED.retry_last_ts,
+ retry_interval = EXCLUDED.retry_interval
+ WHERE
+ EXCLUDED.retry_interval = 0
+ OR destinations.retry_interval < EXCLUDED.retry_interval
+ """
+
+ txn.execute(sql, (destination, retry_last_ts, retry_interval))
+
+ return
+
self.database_engine.lock_table(txn, "destinations")
# We need to be careful here as the data may have changed from under us
|