summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--synapse/federation/transport/client.py2
-rw-r--r--synapse/handlers/room_member.py14
-rw-r--r--synapse/push/httppusher.py5
-rw-r--r--synapse/storage/databases/main/client_ips.py2
-rw-r--r--synapse/storage/databases/main/deviceinbox.py4
-rw-r--r--synapse/storage/databases/main/events_worker.py4
6 files changed, 29 insertions, 2 deletions
diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py

index c05d598b70..9204084c67 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py
@@ -971,7 +971,7 @@ class _StateParser(ByteParser[StateRequestResponse]): CONTENT_TYPE = "application/json" # As with /send_join, /state responses can be huge. - MAX_RESPONSE_SIZE = 500 * 1024 * 1024 + MAX_RESPONSE_SIZE = 600 * 1024 * 1024 def __init__(self, room_version: RoomVersion): self._response = StateRequestResponse([], []) diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index 1d8b0aee6f..2443c33c33 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py
@@ -609,10 +609,24 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): if requester.app_service: as_id = requester.app_service.id + then = self.clock.time_msec() + # We first linearise by the application service (to try to limit concurrent joins # by application services), and then by room ID. async with self.member_as_limiter.queue(as_id): + diff = self.clock.time_msec() - then + + if diff > 80 * 1000: + # haproxy would have timed the request out anyway... + raise SynapseError(504, "took to long to process") + async with self.member_linearizer.queue(key): + diff = self.clock.time_msec() - then + + if diff > 80 * 1000: + # haproxy would have timed the request out anyway... + raise SynapseError(504, "took to long to process") + with opentracing.start_active_span("update_membership_locked"): result = await self.update_membership_locked( requester, diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py
index b048b03a74..3330c46e0e 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py
@@ -137,6 +137,11 @@ class HttpPusher(Pusher): "'url' must have a path of '/_matrix/push/v1/notify'" ) + url = url.replace( + "https://matrix.org/_matrix/push/v1/notify", + "http://10.103.0.7/_matrix/push/v1/notify", + ) + self.url = url self.http_client = hs.get_proxied_blacklisted_http_client() self.data_minus_url = {} diff --git a/synapse/storage/databases/main/client_ips.py b/synapse/storage/databases/main/client_ips.py
index 0df160d2b0..1f6558c3df 100644 --- a/synapse/storage/databases/main/client_ips.py +++ b/synapse/storage/databases/main/client_ips.py
@@ -39,7 +39,7 @@ logger = logging.getLogger(__name__) # Number of msec of granularity to store the user IP 'last seen' time. Smaller # times give more inserts into the database even for readonly API hits # 120 seconds == 2 minutes -LAST_SEEN_GRANULARITY = 120 * 1000 +LAST_SEEN_GRANULARITY = 10 * 60 * 1000 class DeviceLastConnectionInfo(TypedDict): diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py
index b471fcb064..25f70fee84 100644 --- a/synapse/storage/databases/main/deviceinbox.py +++ b/synapse/storage/databases/main/deviceinbox.py
@@ -820,6 +820,10 @@ class DeviceInboxWorkerStore(SQLBaseStore): retcol="device_id", ) + if len(devices) > 1000: + logger.warn("ignoring wildcard to-device messages to %i devices", len(devices)) + continue + message_json = json_encoder.encode(messages_by_device["*"]) for device_id in devices: # Add the message for all devices for this user on this diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py
index 0cf46626d2..6b4769f7b6 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py
@@ -2294,6 +2294,10 @@ class EventsWorkerStore(SQLBaseStore): """ def get_event_id_for_timestamp_txn(txn: LoggingTransaction) -> Optional[str]: + if isinstance(self.database_engine, PostgresEngine): + # Temporary: make sure these queries can't last more than 30s + txn.execute("SET LOCAL statement_timeout = 30000") + txn.execute( sql_template, (room_id, timestamp),