diff --git a/synapse/federation/replication.py b/synapse/federation/replication.py
index 01f87fe423..f9c05b5ea3 100644
--- a/synapse/federation/replication.py
+++ b/synapse/federation/replication.py
@@ -723,6 +723,8 @@ class _TransactionQueue(object):
deferreds = []
for destination in destinations:
+ # XXX: why don't we specify an errback for this deferred
+ # like we do for EDUs? --matthew
deferred = defer.Deferred()
self.pending_pdus_by_dest.setdefault(destination, []).append(
(pdu, deferred, order)
@@ -738,6 +740,9 @@ class _TransactionQueue(object):
# NO inlineCallbacks
def enqueue_edu(self, edu):
destination = edu.destination
+
+ if destination == self.server_name:
+ return
deferred = defer.Deferred()
self.pending_edus_by_dest.setdefault(destination, []).append(
@@ -766,14 +771,23 @@ class _TransactionQueue(object):
)
yield deferred
-
+
@defer.inlineCallbacks
@log_function
def _attempt_new_transaction(self, destination):
+
+ (retry_last_ts, retry_interval) = self.store.get_destination_retry_timings(destination)
+ if retry_last_ts + retry_interval > int(self._clock.time_msec()):
+ logger.info("TX [%s] not ready for retry yet - dropping transaction for now")
+ return
+
if destination in self.pending_transactions:
+ # XXX: pending_transactions can get stuck on by a never-ending request
+ # at which point pending_pdus_by_dest just keeps growing.
+ # we need application-layer timeouts of some flavour of these requests
return
- # list of (pending_pdu, deferred, order)
+ # list of (pending_pdu, deferred, order)
pending_pdus = self.pending_pdus_by_dest.pop(destination, [])
pending_edus = self.pending_edus_by_dest.pop(destination, [])
pending_failures = self.pending_failures_by_dest.pop(destination, [])
@@ -781,7 +795,8 @@ class _TransactionQueue(object):
if not pending_pdus and not pending_edus and not pending_failures:
return
- logger.debug("TX [%s] Attempting new transaction", destination)
+ logger.debug("TX [%s] Attempting new transaction (pdus: %d, edus: %d, failures: %d)",
+ destination, len(pending_pdus), len(pending_edus), len(pending_failures))
# Sort based on the order field
pending_pdus.sort(key=lambda t: t[2])
@@ -814,7 +829,7 @@ class _TransactionQueue(object):
yield self.transaction_actions.prepare_to_send(transaction)
logger.debug("TX [%s] Persisted transaction", destination)
- logger.debug("TX [%s] Sending transaction...", destination)
+ logger.info("TX [%s] Sending transaction [%s]", destination, transaction.transaction_id)
# Actually send the transaction
@@ -835,6 +850,8 @@ class _TransactionQueue(object):
transaction, json_data_cb
)
+ logger.info("TX [%s] got %d response", destination, code)
+
logger.debug("TX [%s] Sent transaction", destination)
logger.debug("TX [%s] Marking as delivered...", destination)
@@ -849,6 +866,7 @@ class _TransactionQueue(object):
if code == 200:
deferred.callback(None)
else:
+ start_retrying(destination, retry_interval)
deferred.errback(RuntimeError("Got status %d" % code))
# Ensures we don't continue until all callbacks on that
@@ -861,12 +879,12 @@ class _TransactionQueue(object):
logger.debug("TX [%s] Yielded to callbacks", destination)
except Exception as e:
- logger.error("TX Problem in _attempt_transaction")
-
# We capture this here as there as nothing actually listens
# for this finishing functions deferred.
- logger.exception(e)
+ logger.exception("TX [%s] Problem in _attempt_transaction: %s", destination, e)
+ start_retrying(destination, retry_interval)
+
for deferred in deferreds:
if not deferred.called:
deferred.errback(e)
@@ -877,3 +895,14 @@ class _TransactionQueue(object):
# Check to see if there is anything else to send.
self._attempt_new_transaction(destination)
+
+def start_retrying(destination, retry_interval):
+ # track that this destination is having problems and we should
+ # give it a chance to recover before trying it again
+ if retry_interval:
+ retry_interval *= 2
+ else:
+ retry_interval = 2 # try again at first after 2 seconds
+ self.store.set_destination_retry_timings(destination,
+ int(self._clock.time_msec()), retry_interval)
+
\ No newline at end of file
diff --git a/synapse/federation/transport.py b/synapse/federation/transport.py
index 8d86152085..0f11c6d491 100644
--- a/synapse/federation/transport.py
+++ b/synapse/federation/transport.py
@@ -155,7 +155,7 @@ class TransportLayer(object):
@defer.inlineCallbacks
@log_function
def send_transaction(self, transaction, json_data_callback=None):
- """ Sends the given Transaction to it's destination
+ """ Sends the given Transaction to its destination
Args:
transaction (Transaction)
diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py
index 510f07dd7b..3edc59dbab 100644
--- a/synapse/http/matrixfederationclient.py
+++ b/synapse/http/matrixfederationclient.py
@@ -89,7 +89,7 @@ class MatrixFederationHttpClient(object):
("", "", path_bytes, param_bytes, query_bytes, "",)
)
- logger.debug("Sending request to %s: %s %s",
+ logger.info("Sending request to %s: %s %s",
destination, method, url_bytes)
logger.debug(
@@ -101,7 +101,10 @@ class MatrixFederationHttpClient(object):
]
)
- retries_left = 5
+ # was 5; for now, let's only try once at the HTTP layer and then
+ # rely on transaction-layer retries for exponential backoff and
+ # getting the message through.
+ retries_left = 0
endpoint = self._getEndpoint(reactor, destination)
@@ -131,7 +134,8 @@ class MatrixFederationHttpClient(object):
e)
raise SynapseError(400, "Domain specified not found.")
- logger.exception("Got error in _create_request")
+ logger.exception("Sending request failed to %s: %s %s : %s",
+ destination, method, url_bytes, e)
_print_ex(e)
if retries_left:
@@ -140,15 +144,15 @@ class MatrixFederationHttpClient(object):
else:
raise
+ logger.info("Received response %d %s for %s: %s %s",
+ response.code, response.phrase, destination, method, url_bytes)
+
if 200 <= response.code < 300:
# We need to update the transactions table to say it was sent?
pass
else:
# :'(
# Update transactions table?
- logger.error(
- "Got response %d %s", response.code, response.phrase
- )
raise CodeMessageException(
response.code, response.phrase
)
diff --git a/synapse/rest/transactions.py b/synapse/rest/transactions.py
index 93c0122f30..8c41ab4edb 100644
--- a/synapse/rest/transactions.py
+++ b/synapse/rest/transactions.py
@@ -19,7 +19,7 @@ import logging
logger = logging.getLogger(__name__)
-
+# FIXME: elsewhere we use FooStore to indicate something in the storage layer...
class HttpTransactionStore(object):
def __init__(self):
diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py
index f15e3dfe62..04ab39341d 100644
--- a/synapse/storage/__init__.py
+++ b/synapse/storage/__init__.py
@@ -67,7 +67,7 @@ SCHEMAS = [
# Remember to update this number every time an incompatible change is made to
# database schema files, so the users will be informed on server restarts.
-SCHEMA_VERSION = 8
+SCHEMA_VERSION = 9
class _RollbackButIsFineException(Exception):
diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py
index 4881f03368..e72200e2f7 100644
--- a/synapse/storage/_base.py
+++ b/synapse/storage/_base.py
@@ -650,7 +650,7 @@ class JoinHelper(object):
to dump the results into.
Attributes:
- taples (list): List of `Table` classes
+ tables (list): List of `Table` classes
EntryType (type)
"""
diff --git a/synapse/storage/schema/delta/v9.sql b/synapse/storage/schema/delta/v9.sql
new file mode 100644
index 0000000000..ad680c64da
--- /dev/null
+++ b/synapse/storage/schema/delta/v9.sql
@@ -0,0 +1,23 @@
+/* Copyright 2014 OpenMarket Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+-- To track destination health
+CREATE TABLE IF NOT EXISTS destinations(
+ destination TEXT PRIMARY KEY,
+ retry_last_ts INTEGER,
+ retry_interval INTEGER
+);
+
+PRAGMA user_version = 9;
\ No newline at end of file
diff --git a/synapse/storage/schema/transactions.sql b/synapse/storage/schema/transactions.sql
index 88e3e4e04d..de461bfa15 100644
--- a/synapse/storage/schema/transactions.sql
+++ b/synapse/storage/schema/transactions.sql
@@ -59,3 +59,9 @@ CREATE INDEX IF NOT EXISTS transaction_id_to_pdu_tx ON transaction_id_to_pdu(tra
CREATE INDEX IF NOT EXISTS transaction_id_to_pdu_dest ON transaction_id_to_pdu(destination);
CREATE INDEX IF NOT EXISTS transaction_id_to_pdu_index ON transaction_id_to_pdu(transaction_id, destination);
+-- To track destination health
+CREATE TABLE IF NOT EXISTS destinations(
+ destination TEXT PRIMARY KEY,
+ retry_last_ts INTEGER,
+ retry_interval INTEGER
+);
diff --git a/synapse/storage/transactions.py b/synapse/storage/transactions.py
index 00d0f48082..47b73f7458 100644
--- a/synapse/storage/transactions.py
+++ b/synapse/storage/transactions.py
@@ -114,7 +114,7 @@ class TransactionStore(SQLBaseStore):
def _prep_send_transaction(self, txn, transaction_id, destination,
origin_server_ts):
- # First we find out what the prev_txs should be.
+ # First we find out what the prev_txns should be.
# Since we know that we are only sending one transaction at a time,
# we can simply take the last one.
query = "%s ORDER BY id DESC LIMIT 1" % (
@@ -205,6 +205,71 @@ class TransactionStore(SQLBaseStore):
return ReceivedTransactionsTable.decode_results(txn.fetchall())
+ def get_destination_retry_timings(self, destination):
+ """Gets the current retry timings (if any) for a given destination.
+
+ Args:
+ destination (str)
+
+ Returns:
+ None if not retrying
+ tuple: (retry_last_ts, retry_interval)
+ retry_ts: time of last retry attempt in unix epoch ms
+ retry_interval: how long until next retry in ms
+ """
+ return self.runInteraction(
+ "get_destination_retry_timings",
+ self._get_destination_retry_timings, destination)
+
+ def _get_destination_retry_timings(cls, txn, destination):
+ query = DestinationsTable.select_statement("destination = ?")
+ txn.execute(query, (destination,))
+ result = DestinationsTable.decode_single_result(txn.fetchone())
+ if result and result[0] > 0:
+ return result
+ else:
+ return None
+
+ def set_destination_retry_timings(self, destination):
+ """Sets the current retry timings for a given destination.
+ Both timings should be zero if retrying is no longer occuring.
+
+ Args:
+ destination (str)
+ retry_last_ts (int) - time of last retry attempt in unix epoch ms
+ retry_interval (int) - how long until next retry in ms
+ """
+ return self.runInteraction(
+ "set_destination_retry_timings",
+ self._set_destination_retry_timings, destination, retry_last_ts, retry_interval)
+
+ def _set_destination_retry_timings(cls, txn, destination, retry_last_ts, retry_interval):
+
+ query = (
+ "INSERT OR REPLACE INTO %s "
+ "(retry_last_ts, retry_interval) "
+ "VALUES (?, ?) "
+ "WHERE destination = ?"
+ ) % DestinationsTable.table_name
+
+ txn.execute(query, (retry_last_ts, retry_interval, destination))
+
+ def get_destinations_needing_retry(self):
+ """Get all destinations which are due a retry for sending a transaction.
+
+ Returns:
+ list: A list of `DestinationsTable.EntryType`
+ """
+ return self.runInteraction(
+ "get_destinations_needing_retry",
+ self._get_destinations_needing_retry
+ )
+
+ def _get_destinations_needing_retry(cls, txn):
+ where = "retry_last_ts > 0 and retry_next_ts < now()"
+ query = DestinationsTable.select_statement(where)
+ txn.execute(query)
+ return DestinationsTable.decode_results(txn.fetchall())
class ReceivedTransactionsTable(Table):
table_name = "received_transactions"
@@ -247,3 +312,14 @@ class TransactionsToPduTable(Table):
]
EntryType = namedtuple("TransactionsToPduEntry", fields)
+
+class DestinationsTable(Table):
+ table_name = "destinations"
+
+ fields = [
+ "destination",
+ "retry_last_ts",
+ "retry_interval",
+ ]
+
+ EntryType = namedtuple("DestinationsEntry", fields)
\ No newline at end of file
|