diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py
index e4f2201c92..18252a2958 100644
--- a/synapse/replication/tcp/client.py
+++ b/synapse/replication/tcp/client.py
@@ -189,7 +189,9 @@ class ReplicationDataHandler:
if row.deleted:
self.stop_pusher(row.user_id, row.app_id, row.pushkey)
else:
- await self.start_pusher(row.user_id, row.app_id, row.pushkey)
+ await self.process_pusher_change(
+ row.user_id, row.app_id, row.pushkey
+ )
elif stream_name == EventsStream.NAME:
# We shouldn't get multiple rows per token for events stream, so
# we don't need to optimise this for multiple rows.
@@ -208,15 +210,16 @@ class ReplicationDataHandler:
max_token = self.store.get_room_max_token()
event_pos = PersistedEventPosition(instance_name, token)
- await self.notifier.on_new_room_event_args(
- event_pos=event_pos,
- max_room_stream_token=max_token,
- extra_users=extra_users,
- room_id=row.data.room_id,
- event_id=row.data.event_id,
- event_type=row.data.type,
- state_key=row.data.state_key,
- membership=row.data.membership,
+ event_entry = self.notifier.create_pending_room_event_entry(
+ event_pos,
+ extra_users,
+ row.data.room_id,
+ row.data.type,
+ row.data.state_key,
+ row.data.membership,
+ )
+ await self.notifier.notify_new_room_events(
+ [(event_entry, row.data.event_id)], max_token
)
# If this event is a join, make a note of it so we have an accurate
@@ -334,13 +337,15 @@ class ReplicationDataHandler:
logger.info("Stopping pusher %r / %r", user_id, key)
pusher.on_stop()
- async def start_pusher(self, user_id: str, app_id: str, pushkey: str) -> None:
+ async def process_pusher_change(
+ self, user_id: str, app_id: str, pushkey: str
+ ) -> None:
if not self._notify_pushers:
return
key = "%s:%s" % (app_id, pushkey)
logger.info("Starting pusher %r / %r", user_id, key)
- await self._pusher_pool.start_pusher_by_id(app_id, pushkey, user_id)
+ await self._pusher_pool.process_pusher_change_by_id(app_id, pushkey, user_id)
class FederationSenderHandler:
@@ -423,7 +428,8 @@ class FederationSenderHandler:
receipt.receipt_type,
receipt.user_id,
[receipt.event_id],
- receipt.data,
+ thread_id=receipt.thread_id,
+ data=receipt.data,
)
await self.federation_sender.send_read_receipt(receipt_info)
diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py
index e1cbfa50eb..0f166d16aa 100644
--- a/synapse/replication/tcp/handler.py
+++ b/synapse/replication/tcp/handler.py
@@ -35,7 +35,6 @@ from twisted.internet.protocol import ReconnectingClientFactory
from synapse.metrics import LaterGauge
from synapse.metrics.background_process_metrics import run_as_background_process
-from synapse.replication.tcp.client import DirectTcpReplicationClientFactory
from synapse.replication.tcp.commands import (
ClearUserSyncsCommand,
Command,
@@ -332,46 +331,31 @@ class ReplicationCommandHandler:
def start_replication(self, hs: "HomeServer") -> None:
"""Helper method to start replication."""
- if hs.config.redis.redis_enabled:
- from synapse.replication.tcp.redis import (
- RedisDirectTcpReplicationClientFactory,
- )
+ from synapse.replication.tcp.redis import RedisDirectTcpReplicationClientFactory
- # First let's ensure that we have a ReplicationStreamer started.
- hs.get_replication_streamer()
+ # First let's ensure that we have a ReplicationStreamer started.
+ hs.get_replication_streamer()
- # We need two connections to redis, one for the subscription stream and
- # one to send commands to (as you can't send further redis commands to a
- # connection after SUBSCRIBE is called).
+ # We need two connections to redis, one for the subscription stream and
+ # one to send commands to (as you can't send further redis commands to a
+ # connection after SUBSCRIBE is called).
- # First create the connection for sending commands.
- outbound_redis_connection = hs.get_outbound_redis_connection()
+ # First create the connection for sending commands.
+ outbound_redis_connection = hs.get_outbound_redis_connection()
- # Now create the factory/connection for the subscription stream.
- self._factory = RedisDirectTcpReplicationClientFactory(
- hs,
- outbound_redis_connection,
- channel_names=self._channels_to_subscribe_to,
- )
- hs.get_reactor().connectTCP(
- hs.config.redis.redis_host,
- hs.config.redis.redis_port,
- self._factory,
- timeout=30,
- bindAddress=None,
- )
- else:
- client_name = hs.get_instance_name()
- self._factory = DirectTcpReplicationClientFactory(hs, client_name, self)
- host = hs.config.worker.worker_replication_host
- port = hs.config.worker.worker_replication_port
- hs.get_reactor().connectTCP(
- host,
- port,
- self._factory,
- timeout=30,
- bindAddress=None,
- )
+ # Now create the factory/connection for the subscription stream.
+ self._factory = RedisDirectTcpReplicationClientFactory(
+ hs,
+ outbound_redis_connection,
+ channel_names=self._channels_to_subscribe_to,
+ )
+ hs.get_reactor().connectTCP(
+ hs.config.redis.redis_host,
+ hs.config.redis.redis_port,
+ self._factory,
+ timeout=30,
+ bindAddress=None,
+ )
def get_streams(self) -> Dict[str, Stream]:
"""Get a map from stream name to all streams."""
diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py
index 7763ffb2d0..56a5c21910 100644
--- a/synapse/replication/tcp/protocol.py
+++ b/synapse/replication/tcp/protocol.py
@@ -245,7 +245,7 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver):
self._parse_and_dispatch_line(line)
def _parse_and_dispatch_line(self, line: bytes) -> None:
- if line.strip() == "":
+ if line.strip() == b"":
# Ignore blank lines
return
diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py
index 398bebeaa6..e01155ad59 100644
--- a/synapse/replication/tcp/streams/_base.py
+++ b/synapse/replication/tcp/streams/_base.py
@@ -361,6 +361,7 @@ class ReceiptsStream(Stream):
receipt_type: str
user_id: str
event_id: str
+ thread_id: Optional[str]
data: dict
NAME = "receipts"
|