diff --git a/synapse/federation/send_queue.py b/synapse/federation/send_queue.py
index 748548bbe2..a12c18f4df 100644
--- a/synapse/federation/send_queue.py
+++ b/synapse/federation/send_queue.py
@@ -53,6 +53,7 @@ class FederationRemoteSendQueue(object):
self.server_name = hs.hostname
self.clock = hs.get_clock()
self.notifier = hs.get_notifier()
+ self.is_mine_id = hs.is_mine_id
self.presence_map = {}
self.presence_changed = sorteddict()
@@ -120,7 +121,9 @@ class FederationRemoteSendQueue(object):
del self.presence_changed[key]
user_ids = set(
- user_id for uids in self.presence_changed.values() for _, user_id in uids
+ user_id
+ for uids in self.presence_changed.itervalues()
+ for user_id in uids
)
to_del = [
@@ -187,18 +190,14 @@ class FederationRemoteSendQueue(object):
self.notifier.on_new_replication_data()
- def send_presence(self, destination, states):
+ def send_presence(self, states):
"""As per TransactionQueue"""
pos = self._next_pos()
- self.presence_map.update({
- state.user_id: state
- for state in states
- })
+ local_states = filter(lambda s: self.is_mine_id(s.user_id), states)
- self.presence_changed[pos] = [
- (destination, state.user_id) for state in states
- ]
+ self.presence_map.update({state.user_id: state for state in local_states})
+ self.presence_changed[pos] = [state.user_id for state in local_states]
self.notifier.on_new_replication_data()
@@ -251,15 +250,14 @@ class FederationRemoteSendQueue(object):
keys = self.presence_changed.keys()
i = keys.bisect_right(from_token)
j = keys.bisect_right(to_token) + 1
- dest_user_ids = set(
- (pos, dest_user_id)
+ dest_user_ids = [
+ (pos, user_id)
for pos in keys[i:j]
- for dest_user_id in self.presence_changed[pos]
- )
+ for user_id in self.presence_changed[pos]
+ ]
- for (key, (dest, user_id)) in dest_user_ids:
+ for (key, user_id) in dest_user_ids:
rows.append((key, PresenceRow(
- destination=dest,
state=self.presence_map[user_id],
)))
@@ -354,7 +352,6 @@ class BaseFederationRow(object):
class PresenceRow(BaseFederationRow, namedtuple("PresenceRow", (
- "destination", # str
"state", # UserPresenceState
))):
TypeId = "p"
@@ -362,18 +359,14 @@ class PresenceRow(BaseFederationRow, namedtuple("PresenceRow", (
@staticmethod
def from_data(data):
return PresenceRow(
- destination=data["destination"],
- state=UserPresenceState.from_dict(data["state"])
+ state=UserPresenceState.from_dict(data)
)
def to_data(self):
- return {
- "destination": self.destination,
- "state": self.state.as_dict()
- }
+ return self.state.as_dict()
def add_to_buffer(self, buff):
- buff.presence.setdefault(self.destination, []).append(self.state)
+ buff.presence.append(self.state)
class KeyedEduRow(BaseFederationRow, namedtuple("KeyedEduRow", (
@@ -469,7 +462,7 @@ TypeToRow = {
ParsedFederationStreamData = namedtuple("ParsedFederationStreamData", (
- "presence", # dict of destination -> [UserPresenceState]
+ "presence", # list(UserPresenceState)
"keyed_edus", # dict of destination -> { key -> Edu }
"edus", # dict of destination -> [Edu]
"failures", # dict of destination -> [failures]
@@ -491,7 +484,7 @@ def process_rows_for_federation(transaction_queue, rows):
# them into the appropriate collection and then send them off.
buff = ParsedFederationStreamData(
- presence={},
+ presence=[],
keyed_edus={},
edus={},
failures={},
@@ -508,8 +501,8 @@ def process_rows_for_federation(transaction_queue, rows):
parsed_row = RowType.from_data(row.data)
parsed_row.add_to_buffer(buff)
- for destination, states in buff.presence.iteritems():
- transaction_queue.send_presence(destination, states)
+ if buff.presence:
+ transaction_queue.send_presence(buff.presence)
for destination, edu_map in buff.keyed_edus.iteritems():
for key, edu in edu_map.items():
diff --git a/synapse/federation/transaction_queue.py b/synapse/federation/transaction_queue.py
index c27ce7c5f3..fd9e1fa01c 100644
--- a/synapse/federation/transaction_queue.py
+++ b/synapse/federation/transaction_queue.py
@@ -21,7 +21,7 @@ from .units import Transaction, Edu
from synapse.api.errors import HttpResponseException
from synapse.util.async import run_on_reactor
-from synapse.util.logcontext import preserve_context_over_fn
+from synapse.util.logcontext import preserve_context_over_fn, preserve_fn
from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter
from synapse.util.metrics import measure_func
from synapse.types import get_domain_from_id
@@ -78,6 +78,7 @@ class TransactionQueue(object):
self.pending_edus_by_dest = edus = {}
# Presence needs to be separate as we send single aggragate EDUs
+ self.pending_presence = {}
self.pending_presence_by_dest = presence = {}
self.pending_edus_keyed_by_dest = edus_keyed = {}
@@ -113,6 +114,8 @@ class TransactionQueue(object):
self._is_processing = False
self._last_poked_id = -1
+ self._processing_pending_presence = False
+
def can_send_to(self, destination):
"""Can we send messages to the given server?
@@ -224,17 +227,95 @@ class TransactionQueue(object):
self._attempt_new_transaction, destination
)
- def send_presence(self, destination, states):
- if not self.can_send_to(destination):
+ @preserve_fn
+ @defer.inlineCallbacks
+ def send_presence(self, states):
+ """Send the new presence states to the appropriate destinations.
+
+ Args:
+ states (list(UserPresenceState))
+ """
+
+ # First we queue up the new presence by user ID, so multiple presence
+ # updates in quick successtion are correctly handled
+ self.pending_presence.update({state.user_id: state for state in states})
+
+ # We then handle the new pending presence in batches, first figuring
+ # out the destinations we need to send each state to and then poking it
+ # to attempt a new transaction. We linearize this so that we don't
+ # accidentally mess up the ordering and send multiple presence updates
+ # in the wrong order
+ if self._processing_pending_presence:
return
- self.pending_presence_by_dest.setdefault(destination, {}).update({
- state.user_id: state for state in states
- })
+ self._processing_pending_presence = True
+ try:
+ while True:
+ states = self.pending_presence
+ self.pending_presence = {}
- preserve_context_over_fn(
- self._attempt_new_transaction, destination
- )
+ if not states:
+ break
+
+ yield self._process_presence_inner(states)
+ finally:
+ self._processing_pending_presence = False
+
+ @measure_func("txnqueue._process_presence")
+ @defer.inlineCallbacks
+ def _process_presence_inner(self, states):
+ """Given a list of states populate self.pending_presence_by_dest and
+ poke to send a new transaction to each destination
+
+ Args:
+ states (list(UserPresenceState))
+ """
+ # First we look up the rooms each user is in (as well as any explicit
+ # subscriptions), then for each distinct room we look up the remote
+ # hosts in those rooms.
+ room_ids_to_states = {}
+ users_to_states = {}
+ for state in states.itervalues():
+ room_ids = yield self.store.get_rooms_for_user(state.user_id)
+ for room_id in room_ids:
+ room_ids_to_states.setdefault(room_id, []).append(state)
+
+ plist = yield self.store.get_presence_list_observers_accepted(
+ state.user_id,
+ )
+ for u in plist:
+ users_to_states.setdefault(u, []).append(state)
+
+ hosts_and_states = []
+ for room_id, states in room_ids_to_states.items():
+ local_states = filter(lambda s: self.is_mine_id(s.user_id), states)
+ if not local_states:
+ continue
+
+ hosts = yield self.store.get_hosts_in_room(room_id)
+ hosts_and_states.append((hosts, local_states))
+
+ for user_id, states in users_to_states.items():
+ local_states = filter(lambda s: self.is_mine_id(s.user_id), states)
+ if not local_states:
+ continue
+
+ host = get_domain_from_id(user_id)
+ hosts_and_states.append(([host], local_states))
+
+ # And now finally queue up new transactions
+ for destinations, states in hosts_and_states:
+ for destination in destinations:
+ if not self.can_send_to(destination):
+ continue
+
+ self.pending_presence_by_dest.setdefault(
+ destination, {}
+ ).update({
+ state.user_id: state for state in states
+ })
+
+ preserve_fn(self._attempt_new_transaction)(destination)
def send_edu(self, destination, edu_type, content, key=None):
edu = Edu(
|