summary refs log tree commit diff
path: root/synapse/handlers
diff options
context:
space:
mode:
Diffstat (limited to 'synapse/handlers')
-rw-r--r--synapse/handlers/admin.py4
-rw-r--r--synapse/handlers/appservice.py52
-rw-r--r--synapse/handlers/auth.py6
-rw-r--r--synapse/handlers/cas_handler.py4
-rw-r--r--synapse/handlers/federation.py14
-rw-r--r--synapse/handlers/groups_local.py9
-rw-r--r--synapse/handlers/message.py2
-rw-r--r--synapse/handlers/oidc_handler.py10
-rw-r--r--synapse/handlers/presence.py4
-rw-r--r--synapse/handlers/profile.py9
-rw-r--r--synapse/handlers/room.py2
-rw-r--r--synapse/handlers/saml_handler.py4
-rw-r--r--synapse/handlers/search.py2
-rw-r--r--synapse/handlers/state_deltas.py2
-rw-r--r--synapse/handlers/sync.py4
-rw-r--r--synapse/handlers/typing.py2
-rw-r--r--synapse/handlers/user_directory.py2
17 files changed, 85 insertions, 47 deletions
diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py
index 1ce2091b46..a703944543 100644
--- a/synapse/handlers/admin.py
+++ b/synapse/handlers/admin.py
@@ -88,7 +88,7 @@ class AdminHandler(BaseHandler):
 
         # We only try and fetch events for rooms the user has been in. If
         # they've been e.g. invited to a room without joining then we handle
-        # those seperately.
+        # those separately.
         rooms_user_has_been_in = await self.store.get_rooms_user_has_been_in(user_id)
 
         for index, room in enumerate(rooms):
@@ -226,7 +226,7 @@ class ExfiltrationWriter:
         """
 
     def finished(self):
-        """Called when all data has succesfully been exported and written.
+        """Called when all data has successfully been exported and written.
 
         This functions return value is passed to the caller of
         `export_user_data`.
diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py
index 64dea23fc5..3ed29a2c16 100644
--- a/synapse/handlers/appservice.py
+++ b/synapse/handlers/appservice.py
@@ -14,7 +14,7 @@
 # limitations under the License.
 
 import logging
-from typing import Dict, List, Optional
+from typing import Dict, List, Optional, Union
 
 from prometheus_client import Counter
 
@@ -30,7 +30,10 @@ from synapse.metrics import (
     event_processing_loop_counter,
     event_processing_loop_room_count,
 )
-from synapse.metrics.background_process_metrics import run_as_background_process
+from synapse.metrics.background_process_metrics import (
+    run_as_background_process,
+    wrap_as_background_process,
+)
 from synapse.types import Collection, JsonDict, RoomStreamToken, UserID
 from synapse.util.metrics import Measure
 
@@ -53,7 +56,7 @@ class ApplicationServicesHandler:
         self.current_max = 0
         self.is_processing = False
 
-    async def notify_interested_services(self, max_token: RoomStreamToken):
+    def notify_interested_services(self, max_token: RoomStreamToken):
         """Notifies (pushes) all application services interested in this event.
 
         Pushing is done asynchronously, so this method won't block for any
@@ -72,6 +75,12 @@ class ApplicationServicesHandler:
         if self.is_processing:
             return
 
+        # We only start a new background process if necessary rather than
+        # optimistically (to cut down on overhead).
+        self._notify_interested_services(max_token)
+
+    @wrap_as_background_process("notify_interested_services")
+    async def _notify_interested_services(self, max_token: RoomStreamToken):
         with Measure(self.clock, "notify_interested_services"):
             self.is_processing = True
             try:
@@ -166,8 +175,11 @@ class ApplicationServicesHandler:
             finally:
                 self.is_processing = False
 
-    async def notify_interested_services_ephemeral(
-        self, stream_key: str, new_token: Optional[int], users: Collection[UserID] = [],
+    def notify_interested_services_ephemeral(
+        self,
+        stream_key: str,
+        new_token: Optional[int],
+        users: Collection[Union[str, UserID]] = [],
     ):
         """This is called by the notifier in the background
         when a ephemeral event handled by the homeserver.
@@ -183,13 +195,34 @@ class ApplicationServicesHandler:
             new_token: The latest stream token
             users: The user(s) involved with the event.
         """
+        if not self.notify_appservices:
+            return
+
+        if stream_key not in ("typing_key", "receipt_key", "presence_key"):
+            return
+
         services = [
             service
             for service in self.store.get_app_services()
             if service.supports_ephemeral
         ]
-        if not services or not self.notify_appservices:
+        if not services:
             return
+
+        # We only start a new background process if necessary rather than
+        # optimistically (to cut down on overhead).
+        self._notify_interested_services_ephemeral(
+            services, stream_key, new_token, users
+        )
+
+    @wrap_as_background_process("notify_interested_services_ephemeral")
+    async def _notify_interested_services_ephemeral(
+        self,
+        services: List[ApplicationService],
+        stream_key: str,
+        new_token: Optional[int],
+        users: Collection[Union[str, UserID]],
+    ):
         logger.info("Checking interested services for %s" % (stream_key))
         with Measure(self.clock, "notify_interested_services_ephemeral"):
             for service in services:
@@ -237,14 +270,17 @@ class ApplicationServicesHandler:
         return receipts
 
     async def _handle_presence(
-        self, service: ApplicationService, users: Collection[UserID]
-    ) -> List[JsonDict]:
+        self, service: ApplicationService, users: Collection[Union[str, UserID]]
+    ):
         events = []  # type: List[JsonDict]
         presence_source = self.event_sources.sources["presence"]
         from_key = await self.store.get_type_stream_id_for_appservice(
             service, "presence"
         )
         for user in users:
+            if isinstance(user, str):
+                user = UserID.from_string(user)
+
             interested = await service.is_interested_in_presence(user, self.store)
             if not interested:
                 continue
diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py
index 8619fbb982..dd14ab69d7 100644
--- a/synapse/handlers/auth.py
+++ b/synapse/handlers/auth.py
@@ -470,9 +470,7 @@ class AuthHandler(BaseHandler):
             # authentication flow.
             await self.store.set_ui_auth_clientdict(sid, clientdict)
 
-        user_agent = request.requestHeaders.getRawHeaders(b"User-Agent", default=[b""])[
-            0
-        ].decode("ascii", "surrogateescape")
+        user_agent = request.get_user_agent("")
 
         await self.store.add_user_agent_ip_to_ui_auth_session(
             session.session_id, user_agent, clientip
@@ -692,7 +690,7 @@ class AuthHandler(BaseHandler):
         Creates a new access token for the user with the given user ID.
 
         The user is assumed to have been authenticated by some other
-        machanism (e.g. CAS), and the user_id converted to the canonical case.
+        mechanism (e.g. CAS), and the user_id converted to the canonical case.
 
         The device will be recorded in the table if it is not there already.
 
diff --git a/synapse/handlers/cas_handler.py b/synapse/handlers/cas_handler.py
index a4cc4b9a5a..048a3b3c0b 100644
--- a/synapse/handlers/cas_handler.py
+++ b/synapse/handlers/cas_handler.py
@@ -212,9 +212,7 @@ class CasHandler:
         else:
             if not registered_user_id:
                 # Pull out the user-agent and IP from the request.
-                user_agent = request.requestHeaders.getRawHeaders(
-                    b"User-Agent", default=[b""]
-                )[0].decode("ascii", "surrogateescape")
+                user_agent = request.get_user_agent("")
                 ip_address = self.hs.get_ip_from_request(request)
 
                 registered_user_id = await self._registration_handler.register_user(
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index fde8f00531..c386957706 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -112,7 +112,7 @@ class FederationHandler(BaseHandler):
     """Handles events that originated from federation.
         Responsible for:
         a) handling received Pdus before handing them on as Events to the rest
-        of the homeserver (including auth and state conflict resoultion)
+        of the homeserver (including auth and state conflict resolutions)
         b) converting events that were produced by local clients that may need
         to be sent to remote homeservers.
         c) doing the necessary dances to invite remote users and join remote
@@ -477,7 +477,7 @@ class FederationHandler(BaseHandler):
         # ----
         #
         # Update richvdh 2018/09/18: There are a number of problems with timing this
-        # request out agressively on the client side:
+        # request out aggressively on the client side:
         #
         # - it plays badly with the server-side rate-limiter, which starts tarpitting you
         #   if you send too many requests at once, so you end up with the server carefully
@@ -495,13 +495,13 @@ class FederationHandler(BaseHandler):
         #   we'll end up back here for the *next* PDU in the list, which exacerbates the
         #   problem.
         #
-        # - the agressive 10s timeout was introduced to deal with incoming federation
+        # - the aggressive 10s timeout was introduced to deal with incoming federation
         #   requests taking 8 hours to process. It's not entirely clear why that was going
         #   on; certainly there were other issues causing traffic storms which are now
         #   resolved, and I think in any case we may be more sensible about our locking
         #   now. We're *certainly* more sensible about our logging.
         #
-        # All that said: Let's try increasing the timout to 60s and see what happens.
+        # All that said: Let's try increasing the timeout to 60s and see what happens.
 
         try:
             missing_events = await self.federation_client.get_missing_events(
@@ -1120,7 +1120,7 @@ class FederationHandler(BaseHandler):
                     logger.info(str(e))
                     continue
                 except RequestSendFailed as e:
-                    logger.info("Falied to get backfill from %s because %s", dom, e)
+                    logger.info("Failed to get backfill from %s because %s", dom, e)
                     continue
                 except FederationDeniedError as e:
                     logger.info(e)
@@ -1545,7 +1545,7 @@ class FederationHandler(BaseHandler):
         #
         # The reasons we have the destination server rather than the origin
         # server send it are slightly mysterious: the origin server should have
-        # all the neccessary state once it gets the response to the send_join,
+        # all the necessary state once it gets the response to the send_join,
         # so it could send the event itself if it wanted to. It may be that
         # doing it this way reduces failure modes, or avoids certain attacks
         # where a new server selectively tells a subset of the federation that
@@ -1649,7 +1649,7 @@ class FederationHandler(BaseHandler):
         event.internal_metadata.outlier = True
         event.internal_metadata.out_of_band_membership = True
 
-        # Try the host that we succesfully called /make_leave/ on first for
+        # Try the host that we successfully called /make_leave/ on first for
         # the /send_leave/ request.
         host_list = list(target_hosts)
         try:
diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py
index 9684e60fc8..abd8d2af44 100644
--- a/synapse/handlers/groups_local.py
+++ b/synapse/handlers/groups_local.py
@@ -17,7 +17,7 @@
 import logging
 
 from synapse.api.errors import HttpResponseException, RequestSendFailed, SynapseError
-from synapse.types import get_domain_from_id
+from synapse.types import GroupID, get_domain_from_id
 
 logger = logging.getLogger(__name__)
 
@@ -28,6 +28,9 @@ def _create_rerouter(func_name):
     """
 
     async def f(self, group_id, *args, **kwargs):
+        if not GroupID.is_valid(group_id):
+            raise SynapseError(400, "%s was not legal group ID" % (group_id,))
+
         if self.is_mine_id(group_id):
             return await getattr(self.groups_server_handler, func_name)(
                 group_id, *args, **kwargs
@@ -346,7 +349,7 @@ class GroupsLocalHandler(GroupsLocalWorkerHandler):
                 server_name=get_domain_from_id(group_id),
             )
 
-        # TODO: Check that the group is public and we're being added publically
+        # TODO: Check that the group is public and we're being added publicly
         is_publicised = content.get("publicise", False)
 
         token = await self.store.register_user_group_membership(
@@ -391,7 +394,7 @@ class GroupsLocalHandler(GroupsLocalWorkerHandler):
                 server_name=get_domain_from_id(group_id),
             )
 
-        # TODO: Check that the group is public and we're being added publically
+        # TODO: Check that the group is public and we're being added publicly
         is_publicised = content.get("publicise", False)
 
         token = await self.store.register_user_group_membership(
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index d6855c60ea..f1b4d35182 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -657,7 +657,7 @@ class EventCreationHandler:
             context: The event context.
 
         Returns:
-            The previous verion of the event is returned, if it is found in the
+            The previous version of the event is returned, if it is found in the
             event context. Otherwise, None is returned.
         """
         prev_state_ids = await context.get_prev_state_ids()
diff --git a/synapse/handlers/oidc_handler.py b/synapse/handlers/oidc_handler.py
index 05ac86e697..331d4e7e96 100644
--- a/synapse/handlers/oidc_handler.py
+++ b/synapse/handlers/oidc_handler.py
@@ -217,7 +217,7 @@ class OidcHandler:
 
         This is based on the requested scopes: if the scopes include
         ``openid``, the provider should give use an ID token containing the
-        user informations. If not, we should fetch them using the
+        user information. If not, we should fetch them using the
         ``access_token`` with the ``userinfo_endpoint``.
         """
 
@@ -426,7 +426,7 @@ class OidcHandler:
         return resp
 
     async def _fetch_userinfo(self, token: Token) -> UserInfo:
-        """Fetch user informations from the ``userinfo_endpoint``.
+        """Fetch user information from the ``userinfo_endpoint``.
 
         Args:
             token: the token given by the ``token_endpoint``.
@@ -695,9 +695,7 @@ class OidcHandler:
                 return
 
         # Pull out the user-agent and IP from the request.
-        user_agent = request.requestHeaders.getRawHeaders(b"User-Agent", default=[b""])[
-            0
-        ].decode("ascii", "surrogateescape")
+        user_agent = request.get_user_agent("")
         ip_address = self.hs.get_ip_from_request(request)
 
         # Call the mapper to register/login the user
@@ -756,7 +754,7 @@ class OidcHandler:
                 Defaults to an hour.
 
         Returns:
-            A signed macaroon token with the session informations.
+            A signed macaroon token with the session information.
         """
         macaroon = pymacaroons.Macaroon(
             location=self._server_name, identifier="key", key=self._macaroon_secret_key,
diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py
index 1000ac95ff..49a00eed9c 100644
--- a/synapse/handlers/presence.py
+++ b/synapse/handlers/presence.py
@@ -802,7 +802,7 @@ class PresenceHandler(BasePresenceHandler):
             between the requested tokens due to the limit.
 
             The token returned can be used in a subsequent call to this
-            function to get further updatees.
+            function to get further updates.
 
             The updates are a list of 2-tuples of stream ID and the row data
         """
@@ -977,7 +977,7 @@ def should_notify(old_state, new_state):
             new_state.last_active_ts - old_state.last_active_ts
             > LAST_ACTIVE_GRANULARITY
         ):
-            # Only notify about last active bumps if we're not currently acive
+            # Only notify about last active bumps if we're not currently active
             if not new_state.currently_active:
                 notify_reason_counter.labels("last_active_change_online").inc()
                 return True
diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py
index 92700b589c..3875e53c08 100644
--- a/synapse/handlers/profile.py
+++ b/synapse/handlers/profile.py
@@ -98,11 +98,18 @@ class ProfileHandler(BaseHandler):
             except RequestSendFailed as e:
                 raise SynapseError(502, "Failed to fetch profile") from e
             except HttpResponseException as e:
+                if e.code < 500 and e.code != 404:
+                    # Other codes are not allowed in c2s API
+                    logger.info(
+                        "Server replied with wrong response: %s %s", e.code, e.msg
+                    )
+
+                    raise SynapseError(502, "Failed to fetch profile")
                 raise e.to_synapse_error()
 
     async def get_profile_from_cache(self, user_id: str) -> JsonDict:
         """Get the profile information from our local cache. If the user is
-        ours then the profile information will always be corect. Otherwise,
+        ours then the profile information will always be correct. Otherwise,
         it may be out of date/missing.
         """
         target_user = UserID.from_string(user_id)
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index ec300d8877..c5b1f1f1e1 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -1268,7 +1268,7 @@ class RoomShutdownHandler:
             )
 
             # We now wait for the create room to come back in via replication so
-            # that we can assume that all the joins/invites have propogated before
+            # that we can assume that all the joins/invites have propagated before
             # we try and auto join below.
             await self._replication.wait_for_stream_position(
                 self.hs.config.worker.events_shard_config.get_instance(new_room_id),
diff --git a/synapse/handlers/saml_handler.py b/synapse/handlers/saml_handler.py
index 285c481a96..fd6c5e9ea8 100644
--- a/synapse/handlers/saml_handler.py
+++ b/synapse/handlers/saml_handler.py
@@ -216,9 +216,7 @@ class SamlHandler:
                 return
 
         # Pull out the user-agent and IP from the request.
-        user_agent = request.requestHeaders.getRawHeaders(b"User-Agent", default=[b""])[
-            0
-        ].decode("ascii", "surrogateescape")
+        user_agent = request.get_user_agent("")
         ip_address = self.hs.get_ip_from_request(request)
 
         # Call the mapper to register/login the user
diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py
index e9402e6e2e..66f1bbcfc4 100644
--- a/synapse/handlers/search.py
+++ b/synapse/handlers/search.py
@@ -139,7 +139,7 @@ class SearchHandler(BaseHandler):
             # Filter to apply to results
             filter_dict = room_cat.get("filter", {})
 
-            # What to order results by (impacts whether pagination can be doen)
+            # What to order results by (impacts whether pagination can be done)
             order_by = room_cat.get("order_by", "rank")
 
             # Return the current state of the rooms?
diff --git a/synapse/handlers/state_deltas.py b/synapse/handlers/state_deltas.py
index 7a4ae0727a..fb4f70e8e2 100644
--- a/synapse/handlers/state_deltas.py
+++ b/synapse/handlers/state_deltas.py
@@ -32,7 +32,7 @@ class StateDeltasHandler:
         Returns:
             None if the field in the events either both match `public_value`
             or if neither do, i.e. there has been no change.
-            True if it didnt match `public_value` but now does
+            True if it didn't match `public_value` but now does
             False if it did match `public_value` but now doesn't
         """
         prev_event = None
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index b527724bc4..32e53c2d25 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -754,7 +754,7 @@ class SyncHandler:
         """
         # TODO(mjark) Check if the state events were received by the server
         # after the previous sync, since we need to include those state
-        # updates even if they occured logically before the previous event.
+        # updates even if they occurred logically before the previous event.
         # TODO(mjark) Check for new redactions in the state events.
 
         with Measure(self.clock, "compute_state_delta"):
@@ -1882,7 +1882,7 @@ class SyncHandler:
         # members (as the client otherwise doesn't have enough info to form
         # the name itself).
         if sync_config.filter_collection.lazy_load_members() and (
-            # we recalulate the summary:
+            # we recalculate the summary:
             #   if there are membership changes in the timeline, or
             #   if membership has changed during a gappy sync, or
             #   if this is an initial sync.
diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py
index d3692842e3..8758066c74 100644
--- a/synapse/handlers/typing.py
+++ b/synapse/handlers/typing.py
@@ -371,7 +371,7 @@ class TypingWriterHandler(FollowerTypingHandler):
             between the requested tokens due to the limit.
 
             The token returned can be used in a subsequent call to this
-            function to get further updatees.
+            function to get further updates.
 
             The updates are a list of 2-tuples of stream ID and the row data
         """
diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py
index 79393c8829..afbebfc200 100644
--- a/synapse/handlers/user_directory.py
+++ b/synapse/handlers/user_directory.py
@@ -31,7 +31,7 @@ class UserDirectoryHandler(StateDeltasHandler):
     N.B.: ASSUMES IT IS THE ONLY THING THAT MODIFIES THE USER DIRECTORY
 
     The user directory is filled with users who this server can see are joined to a
-    world_readable or publically joinable room. We keep a database table up to date
+    world_readable or publicly joinable room. We keep a database table up to date
     by streaming changes of the current state and recalculating whether users should
     be in the directory or not when necessary.
     """