summary refs log tree commit diff
path: root/synapse/replication/tcp/client.py
blob: 5e5387fdcb723802350014cd506db0d3f6af4470 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
#
# This file is licensed under the Affero General Public License (AGPL) version 3.
#
# Copyright 2017 Vector Creations Ltd
# Copyright (C) 2023 New Vector, Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# See the GNU Affero General Public License for more details:
# <https://www.gnu.org/licenses/agpl-3.0.html>.
#
# Originally licensed under the Apache License, Version 2.0:
# <http://www.apache.org/licenses/LICENSE-2.0>.
#
# [This file includes modifications made by New Vector Limited]
#
#
"""A replication client for use by synapse workers.
"""
import logging
from typing import TYPE_CHECKING, Dict, Iterable, Optional, Set, Tuple

from sortedcontainers import SortedList

from twisted.internet import defer
from twisted.internet.defer import Deferred

from synapse.api.constants import EventTypes, Membership, ReceiptTypes
from synapse.federation import send_queue
from synapse.federation.sender import FederationSender
from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.replication.tcp.streams import (
    AccountDataStream,
    DeviceListsStream,
    PushersStream,
    PushRulesStream,
    ReceiptsStream,
    ToDeviceStream,
    TypingStream,
    UnPartialStatedEventStream,
    UnPartialStatedRoomStream,
)
from synapse.replication.tcp.streams.events import (
    EventsStream,
    EventsStreamEventRow,
    EventsStreamRow,
)
from synapse.replication.tcp.streams.partial_state import (
    UnPartialStatedEventStreamRow,
    UnPartialStatedRoomStreamRow,
)
from synapse.types import PersistedEventPosition, ReadReceipt, StreamKeyType, UserID
from synapse.util.async_helpers import Linearizer, timeout_deferred
from synapse.util.iterutils import batch_iter
from synapse.util.metrics import Measure

if TYPE_CHECKING:
    from synapse.server import HomeServer

logger = logging.getLogger(__name__)

# How long we allow callers to wait for replication updates before timing out.
_WAIT_FOR_REPLICATION_TIMEOUT_SECONDS = 5


class ReplicationDataHandler:
    """Handles incoming stream updates from replication.

    This instance notifies the data store about updates. Can be subclassed
    to handle updates in additional ways.
    """

    def __init__(self, hs: "HomeServer"):
        self.store = hs.get_datastores().main
        self.notifier = hs.get_notifier()
        self._reactor = hs.get_reactor()
        self._clock = hs.get_clock()
        self._streams = hs.get_replication_streams()
        self._instance_name = hs.get_instance_name()
        self._typing_handler = hs.get_typing_handler()
        self._state_storage_controller = hs.get_storage_controllers().state

        self._notify_pushers = hs.config.worker.start_pushers
        self._pusher_pool = hs.get_pusherpool()
        self._presence_handler = hs.get_presence_handler()

        self.send_handler: Optional[FederationSenderHandler] = None
        if hs.should_send_federation():
            self.send_handler = FederationSenderHandler(hs)

        # Map from stream and instance to list of deferreds waiting for the stream to
        # arrive at a particular position. The lists are sorted by stream position.
        self._streams_to_waiters: Dict[
            Tuple[str, str], SortedList[Tuple[int, Deferred]]
        ] = {}

    async def on_rdata(
        self, stream_name: str, instance_name: str, token: int, rows: list
    ) -> None:
        """Called to handle a batch of replication data with a given stream token.

        By default, this just pokes the data store. Can be overridden in subclasses to
        handle more.

        Args:
            stream_name: name of the replication stream for this batch of rows
            instance_name: the instance that wrote the rows.
            token: stream token for this batch of rows
            rows: a list of Stream.ROW_TYPE objects as returned by Stream.parse_row.
        """
        self.store.process_replication_rows(stream_name, instance_name, token, rows)
        # NOTE: this must be called after process_replication_rows to ensure any
        # cache invalidations are first handled before any stream ID advances.
        self.store.process_replication_position(stream_name, instance_name, token)

        if self.send_handler:
            await self.send_handler.process_replication_rows(stream_name, token, rows)

        if stream_name == TypingStream.NAME:
            self._typing_handler.process_replication_rows(token, rows)
            self.notifier.on_new_event(
                StreamKeyType.TYPING, token, rooms=[row.room_id for row in rows]
            )
        elif stream_name == PushRulesStream.NAME:
            self.notifier.on_new_event(
                StreamKeyType.PUSH_RULES, token, users=[row.user_id for row in rows]
            )
        elif stream_name in AccountDataStream.NAME:
            self.notifier.on_new_event(
                StreamKeyType.ACCOUNT_DATA, token, users=[row.user_id for row in rows]
            )
        elif stream_name == ReceiptsStream.NAME:
            new_token = self.store.get_max_receipt_stream_id()
            self.notifier.on_new_event(
                StreamKeyType.RECEIPT, new_token, rooms=[row.room_id for row in rows]
            )
            await self._pusher_pool.on_new_receipts({row.user_id for row in rows})
        elif stream_name == ToDeviceStream.NAME:
            entities = [row.entity for row in rows if row.entity.startswith("@")]
            if entities:
                self.notifier.on_new_event(
                    StreamKeyType.TO_DEVICE, token, users=entities
                )
        elif stream_name == DeviceListsStream.NAME:
            all_room_ids: Set[str] = set()
            for row in rows:
                if row.entity.startswith("@") and not row.is_signature:
                    room_ids = await self.store.get_rooms_for_user(row.entity)
                    all_room_ids.update(room_ids)

            # `all_room_ids` can be large, so let's wake up those streams in batches
            for batched_room_ids in batch_iter(all_room_ids, 100):
                self.notifier.on_new_event(
                    StreamKeyType.DEVICE_LIST, token, rooms=batched_room_ids
                )

                # Yield to reactor so that we don't block.
                await self._clock.sleep(0)
        elif stream_name == PushersStream.NAME:
            for row in rows:
                if row.deleted:
                    self.stop_pusher(row.user_id, row.app_id, row.pushkey)
                else:
                    await self.process_pusher_change(
                        row.user_id, row.app_id, row.pushkey
                    )
        elif stream_name == EventsStream.NAME:
            # We shouldn't get multiple rows per token for events stream, so
            # we don't need to optimise this for multiple rows.
            for row in rows:
                if row.type != EventsStreamEventRow.TypeId:
                    # The row's data is an `EventsStreamCurrentStateRow`.
                    # When we recompute the current state of a room based on forward
                    # extremities (see `update_current_state`), no new events are
                    # persisted, so we must poke the replication callbacks ourselves.
                    # This functionality is used when finishing up a partial state join.
                    self.notifier.notify_replication()
                    continue
                assert isinstance(row, EventsStreamRow)
                assert isinstance(row.data, EventsStreamEventRow)

                if row.data.rejected:
                    continue

                extra_users: Tuple[UserID, ...] = ()
                if row.data.type == EventTypes.Member and row.data.state_key:
                    extra_users = (UserID.from_string(row.data.state_key),)

                max_token = self.store.get_room_max_token()
                event_pos = PersistedEventPosition(instance_name, token)
                event_entry = self.notifier.create_pending_room_event_entry(
                    event_pos,
                    extra_users,
                    row.data.room_id,
                    row.data.type,
                    row.data.state_key,
                    row.data.membership,
                )
                await self.notifier.notify_new_room_events(
                    [(event_entry, row.data.event_id)], max_token
                )

                # If this event is a join, make a note of it so we have an accurate
                # cross-worker room rate limit.
                # TODO: Erik said we should exclude rows that came from ex_outliers
                #  here, but I don't see how we can determine that. I guess we could
                #  add a flag to row.data?
                if (
                    row.data.type == EventTypes.Member
                    and row.data.membership == Membership.JOIN
                    and not row.data.outlier
                ):
                    # TODO retrieve the previous state, and exclude join -> join transitions
                    self.notifier.notify_user_joined_room(
                        row.data.event_id, row.data.room_id
                    )

                # If this is a server ACL event, clear the cache in the storage controller.
                if row.data.type == EventTypes.ServerACL:
                    self._state_storage_controller.get_server_acl_for_room.invalidate(
                        (row.data.room_id,)
                    )
        elif stream_name == UnPartialStatedRoomStream.NAME:
            for row in rows:
                assert isinstance(row, UnPartialStatedRoomStreamRow)

                # Wake up any tasks waiting for the room to be un-partial-stated.
                self._state_storage_controller.notify_room_un_partial_stated(
                    row.room_id
                )
                await self.notifier.on_un_partial_stated_room(row.room_id, token)
        elif stream_name == UnPartialStatedEventStream.NAME:
            for row in rows:
                assert isinstance(row, UnPartialStatedEventStreamRow)

                # Wake up any tasks waiting for the event to be un-partial-stated.
                self._state_storage_controller.notify_event_un_partial_stated(
                    row.event_id
                )

        await self._presence_handler.process_replication_rows(
            stream_name, instance_name, token, rows
        )

        # Notify any waiting deferreds. The list is ordered by position so we
        # just iterate through the list until we reach a position that is
        # greater than the received row position.
        waiting_list = self._streams_to_waiters.get((stream_name, instance_name))
        if not waiting_list:
            return

        # Index of first item with a position after the current token, i.e we
        # have called all deferreds before this index. If not overwritten by
        # loop below means either a) no items in list so no-op or b) all items
        # in list were called and so the list should be cleared. Setting it to
        # `len(list)` works for both cases.
        index_of_first_deferred_not_called = len(waiting_list)

        # We don't fire the deferreds until after we finish iterating over the
        # list, to avoid the list changing when we fire the deferreds.
        deferreds_to_callback = []

        for idx, (position, deferred) in enumerate(waiting_list):
            if position <= token:
                deferreds_to_callback.append(deferred)
            else:
                # The list is sorted by position so we don't need to continue
                # checking any further entries in the list.
                index_of_first_deferred_not_called = idx
                break

        # Drop all entries in the waiting list that were called in the above
        # loop. (This maintains the order so no need to resort)
        del waiting_list[:index_of_first_deferred_not_called]

        for deferred in deferreds_to_callback:
            try:
                with PreserveLoggingContext():
                    deferred.callback(None)
            except Exception:
                # The deferred has been cancelled or timed out.
                pass

    async def on_position(
        self, stream_name: str, instance_name: str, token: int
    ) -> None:
        await self.on_rdata(stream_name, instance_name, token, [])

        # We poke the generic "replication" notifier to wake anything up that
        # may be streaming.
        self.notifier.notify_replication()

    async def wait_for_stream_position(
        self,
        instance_name: str,
        stream_name: str,
        position: int,
    ) -> None:
        """Wait until this instance has received updates up to and including
        the given stream position.

        Args:
            instance_name
            stream_name
            position
        """

        if instance_name == self._instance_name:
            # We don't get told about updates written by this process, and
            # anyway in that case we don't need to wait.
            return

        current_position = self._streams[stream_name].current_token(instance_name)
        if position <= current_position:
            # We're already past the position
            return

        # Create a new deferred that times out after N seconds, as we don't want
        # to wedge here forever.
        deferred: "Deferred[None]" = Deferred()
        deferred = timeout_deferred(
            deferred, _WAIT_FOR_REPLICATION_TIMEOUT_SECONDS, self._reactor
        )

        waiting_list = self._streams_to_waiters.setdefault(
            (stream_name, instance_name), SortedList(key=lambda t: t[0])
        )

        waiting_list.add((position, deferred))

        # We measure here to get in flight counts and average waiting time.
        with Measure(self._clock, "repl.wait_for_stream_position"):
            logger.info(
                "Waiting for repl stream %r to reach %s (%s); currently at: %s",
                stream_name,
                position,
                instance_name,
                current_position,
            )
            try:
                await make_deferred_yieldable(deferred)
            except defer.TimeoutError:
                logger.warning(
                    "Timed out waiting for repl stream %r to reach %s (%s)"
                    "; currently at: %s",
                    stream_name,
                    position,
                    instance_name,
                    self._streams[stream_name].current_token(instance_name),
                )
                return

            logger.info(
                "Finished waiting for repl stream %r to reach %s (%s)",
                stream_name,
                position,
                instance_name,
            )

    def stop_pusher(self, user_id: str, app_id: str, pushkey: str) -> None:
        if not self._notify_pushers:
            return

        key = "%s:%s" % (app_id, pushkey)
        pushers_for_user = self._pusher_pool.pushers.get(user_id, {})
        pusher = pushers_for_user.pop(key, None)
        if pusher is None:
            return
        logger.info("Stopping pusher %r / %r", user_id, key)
        pusher.on_stop()

    async def process_pusher_change(
        self, user_id: str, app_id: str, pushkey: str
    ) -> None:
        if not self._notify_pushers:
            return

        key = "%s:%s" % (app_id, pushkey)
        logger.info("Starting pusher %r / %r", user_id, key)
        await self._pusher_pool.process_pusher_change_by_id(app_id, pushkey, user_id)


class FederationSenderHandler:
    """Processes the fedration replication stream

    This class is only instantiate on the worker responsible for sending outbound
    federation transactions. It receives rows from the replication stream and forwards
    the appropriate entries to the FederationSender class.
    """

    def __init__(self, hs: "HomeServer"):
        assert hs.should_send_federation()

        self.store = hs.get_datastores().main
        self._is_mine_id = hs.is_mine_id
        self._hs = hs

        # We need to make a temporary value to ensure that mypy picks up the
        # right type. We know we should have a federation sender instance since
        # `should_send_federation` is True.
        sender = hs.get_federation_sender()
        assert isinstance(sender, FederationSender)
        self.federation_sender = sender

        # Stores the latest position in the federation stream we've gotten up
        # to. This is always set before we use it.
        self.federation_position: Optional[int] = None

        self._fed_position_linearizer = Linearizer(name="_fed_position_linearizer")

    async def process_replication_rows(
        self, stream_name: str, token: int, rows: list
    ) -> None:
        # The federation stream contains things that we want to send out, e.g.
        # presence, typing, etc.
        if stream_name == "federation":
            await send_queue.process_rows_for_federation(self.federation_sender, rows)
            await self.update_token(token)

        # ... and when new receipts happen
        elif stream_name == ReceiptsStream.NAME:
            await self._on_new_receipts(rows)

        # ... as well as device updates and messages
        elif stream_name == DeviceListsStream.NAME:
            # The entities are either user IDs (starting with '@') whose devices
            # have changed, or remote servers that we need to tell about
            # changes.
            hosts = {
                row.entity
                for row in rows
                if not row.entity.startswith("@") and not row.is_signature
            }
            await self.federation_sender.send_device_messages(hosts, immediate=False)

        elif stream_name == ToDeviceStream.NAME:
            # The to_device stream includes stuff to be pushed to both local
            # clients and remote servers, so we ignore entities that start with
            # '@' (since they'll be local users rather than destinations).
            hosts = {row.entity for row in rows if not row.entity.startswith("@")}
            await self.federation_sender.send_device_messages(hosts)

    async def _on_new_receipts(
        self, rows: Iterable[ReceiptsStream.ReceiptsStreamRow]
    ) -> None:
        """
        Args:
            rows: new receipts to be processed
        """
        for receipt in rows:
            # we only want to send on receipts for our own users
            if not self._is_mine_id(receipt.user_id):
                continue
            # Private read receipts never get sent over federation.
            if receipt.receipt_type == ReceiptTypes.READ_PRIVATE:
                continue
            receipt_info = ReadReceipt(
                receipt.room_id,
                receipt.receipt_type,
                receipt.user_id,
                [receipt.event_id],
                thread_id=receipt.thread_id,
                data=receipt.data,
            )
            await self.federation_sender.send_read_receipt(receipt_info)

    async def update_token(self, token: int) -> None:
        """Update the record of where we have processed to in the federation stream.

        Called after we have processed a an update received over replication. Sends
        a FEDERATION_ACK back to the master, and stores the token that we have processed
         in `federation_stream_position` so that we can restart where we left off.
        """
        self.federation_position = token

        # We save and send the ACK to master asynchronously, so we don't block
        # processing on persistence. We don't need to do this operation for
        # every single RDATA we receive, we just need to do it periodically.

        if self._fed_position_linearizer.is_queued(None):
            # There is already a task queued up to save and send the token, so
            # no need to queue up another task.
            return

        run_as_background_process("_save_and_send_ack", self._save_and_send_ack)

    async def _save_and_send_ack(self) -> None:
        """Save the current federation position in the database and send an ACK
        to master with where we're up to.
        """
        # We should only be calling this once we've got a token.
        assert self.federation_position is not None

        try:
            # We linearize here to ensure we don't have races updating the token
            #
            # XXX this appears to be redundant, since the ReplicationCommandHandler
            # has a linearizer which ensures that we only process one line of
            # replication data at a time. Should we remove it, or is it doing useful
            # service for robustness? Or could we replace it with an assertion that
            # we're not being re-entered?

            async with self._fed_position_linearizer.queue(None):
                # We persist and ack the same position, so we take a copy of it
                # here as otherwise it can get modified from underneath us.
                current_position = self.federation_position

                await self.store.update_federation_out_pos(
                    "federation", current_position
                )

                # We ACK this token over replication so that the master can drop
                # its in memory queues
                self._hs.get_replication_command_handler().send_federation_ack(
                    current_position
                )
        except Exception:
            logger.exception("Error updating federation stream position")