summary refs log tree commit diff
path: root/synapse
diff options
context:
space:
mode:
Diffstat (limited to 'synapse')
-rw-r--r--synapse/config/auth.py2
-rw-r--r--synapse/config/room_directory.py2
-rw-r--r--synapse/handlers/room.py2
-rw-r--r--synapse/push/bulk_push_rule_evaluator.py2
-rw-r--r--synapse/rest/admin/users.py2
-rw-r--r--synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py2
-rw-r--r--synapse/rest/media/v1/_base.py2
-rw-r--r--synapse/rest/media/v1/media_repository.py8
-rw-r--r--synapse/state/__init__.py2
-rw-r--r--synapse/storage/databases/main/deviceinbox.py2
-rw-r--r--synapse/storage/databases/main/event_federation.py4
-rw-r--r--synapse/storage/databases/main/events.py2
-rw-r--r--synapse/storage/databases/main/keys.py4
-rw-r--r--synapse/storage/databases/main/metrics.py4
-rw-r--r--synapse/storage/databases/main/receipts.py6
-rw-r--r--synapse/storage/databases/main/room.py2
-rw-r--r--synapse/storage/databases/main/state_deltas.py4
-rw-r--r--synapse/storage/databases/main/transactions.py2
-rw-r--r--synapse/storage/databases/state/bg_updates.py2
-rw-r--r--synapse/storage/prepare_database.py2
-rw-r--r--synapse/storage/util/id_generators.py6
-rw-r--r--synapse/types.py2
-rw-r--r--synapse/util/async_helpers.py2
23 files changed, 34 insertions, 34 deletions
diff --git a/synapse/config/auth.py b/synapse/config/auth.py
index 2b3e2ce87b..1f4c090cde 100644
--- a/synapse/config/auth.py
+++ b/synapse/config/auth.py
@@ -98,7 +98,7 @@ class AuthConfig(Config):
             # session to be active.
             #
             # This defaults to 0, meaning the user is queried for their credentials
-            # before every action, but this can be overridden to alow a single
+            # before every action, but this can be overridden to allow a single
             # validation to be re-used.  This weakens the protections afforded by
             # the user-interactive authentication process, by allowing for multiple
             # (and potentially different) operations to use the same validation session.
diff --git a/synapse/config/room_directory.py b/synapse/config/room_directory.py
index 9a3e1c3e7d..2dd719c388 100644
--- a/synapse/config/room_directory.py
+++ b/synapse/config/room_directory.py
@@ -123,7 +123,7 @@ class RoomDirectoryConfig(Config):
             alias (str)
 
         Returns:
-            boolean: True if user is allowed to crate the alias
+            boolean: True if user is allowed to create the alias
         """
         for rule in self._alias_creation_rules:
             if rule.matches(user_id, room_id, [alias]):
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 1336a23a3a..3cf6635821 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -829,7 +829,7 @@ class RoomCreationHandler(BaseHandler):
         if room_alias:
             result["room_alias"] = room_alias.to_string()
 
-        # Always wait for room creation to progate before returning
+        # Always wait for room creation to propagate before returning
         await self._replication.wait_for_stream_position(
             self.hs.config.worker.events_shard_config.get_instance(room_id),
             "events",
diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py
index 9018f9e20b..6317f22d3c 100644
--- a/synapse/push/bulk_push_rule_evaluator.py
+++ b/synapse/push/bulk_push_rule_evaluator.py
@@ -524,7 +524,7 @@ class RulesForRoom:
 class _Invalidation:
     # _Invalidation is passed as an `on_invalidate` callback to bulk_get_push_rules,
     # which means that it it is stored on the bulk_get_push_rules cache entry. In order
-    # to ensure that we don't accumulate lots of redunant callbacks on the cache entry,
+    # to ensure that we don't accumulate lots of redundant callbacks on the cache entry,
     # we need to ensure that two _Invalidation objects are "equal" if they refer to the
     # same `cache` and `room_id`.
     #
diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py
index 68c3c64a0d..9350c704b9 100644
--- a/synapse/rest/admin/users.py
+++ b/synapse/rest/admin/users.py
@@ -752,7 +752,7 @@ class PushersRestServlet(RestServlet):
 
     Returns:
         pushers: Dictionary containing pushers information.
-        total: Number of pushers in dictonary `pushers`.
+        total: Number of pushers in dictionary `pushers`.
     """
 
     PATTERNS = admin_patterns("/users/(?P<user_id>[^/]*)/pushers$")
diff --git a/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py
index bf030e0ff4..147920767f 100644
--- a/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py
+++ b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py
@@ -30,7 +30,7 @@ logger = logging.getLogger(__name__)
 
 
 class RoomUpgradeRestServlet(RestServlet):
-    """Handler for room uprade requests.
+    """Handler for room upgrade requests.
 
     Handles requests of the form:
 
diff --git a/synapse/rest/media/v1/_base.py b/synapse/rest/media/v1/_base.py
index f71a03a12d..90bbeca679 100644
--- a/synapse/rest/media/v1/_base.py
+++ b/synapse/rest/media/v1/_base.py
@@ -137,7 +137,7 @@ def add_file_headers(
         # section 3.6 [2] to be a `token` or a `quoted-string`, where a `token`
         # is (essentially) a single US-ASCII word, and a `quoted-string` is a
         # US-ASCII string surrounded by double-quotes, using backslash as an
-        # escape charater. Note that %-encoding is *not* permitted.
+        # escape character. Note that %-encoding is *not* permitted.
         #
         # `filename*` is defined to be an `ext-value`, which is defined in
         # RFC5987 section 3.2.1 [3] to be `charset "'" [ language ] "'" value-chars`,
diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py
index 4c9946a616..635bccf775 100644
--- a/synapse/rest/media/v1/media_repository.py
+++ b/synapse/rest/media/v1/media_repository.py
@@ -184,7 +184,7 @@ class MediaRepository:
     async def get_local_media(
         self, request: Request, media_id: str, name: Optional[str]
     ) -> None:
-        """Responds to reqests for local media, if exists, or returns 404.
+        """Responds to requests for local media, if exists, or returns 404.
 
         Args:
             request: The incoming request.
@@ -306,7 +306,7 @@ class MediaRepository:
         media_info = await self.store.get_cached_remote_media(server_name, media_id)
 
         # file_id is the ID we use to track the file locally. If we've already
-        # seen the file then reuse the existing ID, otherwise genereate a new
+        # seen the file then reuse the existing ID, otherwise generate a new
         # one.
 
         # If we have an entry in the DB, try and look for it
@@ -927,10 +927,10 @@ class MediaRepositoryResource(Resource):
 
            <thumbnail>
 
-    The thumbnail methods are "crop" and "scale". "scale" trys to return an
+    The thumbnail methods are "crop" and "scale". "scale" tries to return an
     image where either the width or the height is smaller than the requested
     size. The client should then scale and letterbox the image if it needs to
-    fit within a given rectangle. "crop" trys to return an image where the
+    fit within a given rectangle. "crop" tries to return an image where the
     width and height are close to the requested size and the aspect matches
     the requested size. The client should scale the image if it needs to fit
     within a given rectangle.
diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py
index 3bd9ff8ca0..28544ccb92 100644
--- a/synapse/state/__init__.py
+++ b/synapse/state/__init__.py
@@ -615,7 +615,7 @@ class StateResolutionHandler:
             event_map:
                 a dict from event_id to event, for any events that we happen to
                 have in flight (eg, those currently being persisted). This will be
-                used as a starting point fof finding the state we need; any missing
+                used as a starting point for finding the state we need; any missing
                 events will be requested via state_map_factory.
 
                 If None, all events will be fetched via state_res_store.
diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py
index 31f70ac5ef..45ca6620a8 100644
--- a/synapse/storage/databases/main/deviceinbox.py
+++ b/synapse/storage/databases/main/deviceinbox.py
@@ -450,7 +450,7 @@ class DeviceInboxWorkerStore(SQLBaseStore):
                 },
             )
 
-            # Add the messages to the approriate local device inboxes so that
+            # Add the messages to the appropriate local device inboxes so that
             # they'll be sent to the devices when they next sync.
             self._add_messages_to_local_device_inbox_txn(
                 txn, stream_id, local_messages_by_user_then_device
diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py
index 8326640d20..ddfb13e3ad 100644
--- a/synapse/storage/databases/main/event_federation.py
+++ b/synapse/storage/databases/main/event_federation.py
@@ -371,7 +371,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
         # and state sets {A} and {B} then walking the auth chains of A and B
         # would immediately show that C is reachable by both. However, if we
         # stopped at C then we'd only reach E via the auth chain of B and so E
-        # would errornously get included in the returned difference.
+        # would erroneously get included in the returned difference.
         #
         # The other thing that we do is limit the number of auth chains we walk
         # at once, due to practical limits (i.e. we can only query the database
@@ -497,7 +497,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
 
                         a_ids = new_aids
 
-                # Mark that the auth event is reachable by the approriate sets.
+                # Mark that the auth event is reachable by the appropriate sets.
                 sets.intersection_update(event_to_missing_sets[event_id])
 
             search.sort()
diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py
index ccda9f1caa..7abfb9112e 100644
--- a/synapse/storage/databases/main/events.py
+++ b/synapse/storage/databases/main/events.py
@@ -1050,7 +1050,7 @@ class PersistEventsStore:
             # Figure out the changes of membership to invalidate the
             # `get_rooms_for_user` cache.
             # We find out which membership events we may have deleted
-            # and which we have added, then we invlidate the caches for all
+            # and which we have added, then we invalidate the caches for all
             # those users.
             members_changed = {
                 state_key
diff --git a/synapse/storage/databases/main/keys.py b/synapse/storage/databases/main/keys.py
index 04ac2d0ced..e97026dc2e 100644
--- a/synapse/storage/databases/main/keys.py
+++ b/synapse/storage/databases/main/keys.py
@@ -155,7 +155,7 @@ class KeyStore(SQLBaseStore):
         (server_name, key_id, from_server) triplet if one already existed.
         Args:
             server_name: The name of the server.
-            key_id: The identifer of the key this JSON is for.
+            key_id: The identifier of the key this JSON is for.
             from_server: The server this JSON was fetched from.
             ts_now_ms: The time now in milliseconds.
             ts_valid_until_ms: The time when this json stops being valid.
@@ -182,7 +182,7 @@ class KeyStore(SQLBaseStore):
     async def get_server_keys_json(
         self, server_keys: Iterable[Tuple[str, Optional[str], Optional[str]]]
     ) -> Dict[Tuple[str, Optional[str], Optional[str]], List[dict]]:
-        """Retrive the key json for a list of server_keys and key ids.
+        """Retrieve the key json for a list of server_keys and key ids.
         If no keys are found for a given server, key_id and source then
         that server, key_id, and source triplet entry will be an empty list.
         The JSON is returned as a byte array so that it can be efficiently
diff --git a/synapse/storage/databases/main/metrics.py b/synapse/storage/databases/main/metrics.py
index 92e65aa640..614a418a15 100644
--- a/synapse/storage/databases/main/metrics.py
+++ b/synapse/storage/databases/main/metrics.py
@@ -111,7 +111,7 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore):
     async def count_daily_sent_e2ee_messages(self):
         def _count_messages(txn):
             # This is good enough as if you have silly characters in your own
-            # hostname then thats your own fault.
+            # hostname then that's your own fault.
             like_clause = "%:" + self.hs.hostname
 
             sql = """
@@ -167,7 +167,7 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore):
     async def count_daily_sent_messages(self):
         def _count_messages(txn):
             # This is good enough as if you have silly characters in your own
-            # hostname then thats your own fault.
+            # hostname then that's your own fault.
             like_clause = "%:" + self.hs.hostname
 
             sql = """
diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py
index e4843a202c..ae9283f52d 100644
--- a/synapse/storage/databases/main/receipts.py
+++ b/synapse/storage/databases/main/receipts.py
@@ -160,7 +160,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
 
         Args:
             room_id: List of room_ids.
-            to_key: Max stream id to fetch receipts upto.
+            to_key: Max stream id to fetch receipts up to.
             from_key: Min stream id to fetch receipts from. None fetches
                 from the start.
 
@@ -189,7 +189,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
 
         Args:
             room_ids: The room id.
-            to_key: Max stream id to fetch receipts upto.
+            to_key: Max stream id to fetch receipts up to.
             from_key: Min stream id to fetch receipts from. None fetches
                 from the start.
 
@@ -312,7 +312,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
         to a limit of the latest 100 read receipts.
 
         Args:
-            to_key: Max stream id to fetch receipts upto.
+            to_key: Max stream id to fetch receipts up to.
             from_key: Min stream id to fetch receipts from. None fetches
                 from the start.
 
diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py
index a9fcb5f59c..cba343aa68 100644
--- a/synapse/storage/databases/main/room.py
+++ b/synapse/storage/databases/main/room.py
@@ -1044,7 +1044,7 @@ class RoomBackgroundUpdateStore(SQLBaseStore):
     async def _background_add_rooms_room_version_column(
         self, progress: dict, batch_size: int
     ):
-        """Background update to go and add room version inforamtion to `rooms`
+        """Background update to go and add room version information to `rooms`
         table from `current_state_events` table.
         """
 
diff --git a/synapse/storage/databases/main/state_deltas.py b/synapse/storage/databases/main/state_deltas.py
index 356623fc6e..0dbb501f16 100644
--- a/synapse/storage/databases/main/state_deltas.py
+++ b/synapse/storage/databases/main/state_deltas.py
@@ -64,7 +64,7 @@ class StateDeltasStore(SQLBaseStore):
         def get_current_state_deltas_txn(txn):
             # First we calculate the max stream id that will give us less than
             # N results.
-            # We arbitarily limit to 100 stream_id entries to ensure we don't
+            # We arbitrarily limit to 100 stream_id entries to ensure we don't
             # select toooo many.
             sql = """
                 SELECT stream_id, count(*)
@@ -81,7 +81,7 @@ class StateDeltasStore(SQLBaseStore):
             for stream_id, count in txn:
                 total += count
                 if total > 100:
-                    # We arbitarily limit to 100 entries to ensure we don't
+                    # We arbitrarily limit to 100 entries to ensure we don't
                     # select toooo many.
                     logger.debug(
                         "Clipping current_state_delta_stream rows to stream_id %i",
diff --git a/synapse/storage/databases/main/transactions.py b/synapse/storage/databases/main/transactions.py
index cea595ff19..248a6c3f25 100644
--- a/synapse/storage/databases/main/transactions.py
+++ b/synapse/storage/databases/main/transactions.py
@@ -198,7 +198,7 @@ class TransactionStore(TransactionWorkerStore):
         retry_interval: int,
     ) -> None:
         """Sets the current retry timings for a given destination.
-        Both timings should be zero if retrying is no longer occuring.
+        Both timings should be zero if retrying is no longer occurring.
 
         Args:
             destination
diff --git a/synapse/storage/databases/state/bg_updates.py b/synapse/storage/databases/state/bg_updates.py
index acb24e33af..1fd333b707 100644
--- a/synapse/storage/databases/state/bg_updates.py
+++ b/synapse/storage/databases/state/bg_updates.py
@@ -27,7 +27,7 @@ MAX_STATE_DELTA_HOPS = 100
 
 
 class StateGroupBackgroundUpdateStore(SQLBaseStore):
-    """Defines functions related to state groups needed to run the state backgroud
+    """Defines functions related to state groups needed to run the state background
     updates.
     """
 
diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py
index 28bb2eb662..cd30e6b80a 100644
--- a/synapse/storage/prepare_database.py
+++ b/synapse/storage/prepare_database.py
@@ -113,7 +113,7 @@ def prepare_database(
             # which should be empty.
             if config is None:
                 raise ValueError(
-                    "config==None in prepare_database, but databse is not empty"
+                    "config==None in prepare_database, but database is not empty"
                 )
 
             # if it's a worker app, refuse to upgrade the database, to avoid multiple
diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py
index 71ef5a72dc..9dd537bf66 100644
--- a/synapse/storage/util/id_generators.py
+++ b/synapse/storage/util/id_generators.py
@@ -245,7 +245,7 @@ class MultiWriterIdGenerator:
         # and b) noting that if we have seen a run of persisted positions
         # without gaps (e.g. 5, 6, 7) then we can skip forward (e.g. to 7).
         #
-        # Note: There is no guarentee that the IDs generated by the sequence
+        # Note: There is no guarantee that the IDs generated by the sequence
         # will be gapless; gaps can form when e.g. a transaction was rolled
         # back. This means that sometimes we won't be able to skip forward the
         # position even though everything has been persisted. However, since
@@ -418,7 +418,7 @@ class MultiWriterIdGenerator:
         # bother, as nothing will read it).
         #
         # We only do this on the success path so that the persisted current
-        # position points to a persited row with the correct instance name.
+        # position points to a persisted row with the correct instance name.
         if self._writers:
             txn.call_after(
                 run_as_background_process,
@@ -509,7 +509,7 @@ class MultiWriterIdGenerator:
             }
 
     def advance(self, instance_name: str, new_id: int):
-        """Advance the postion of the named writer to the given ID, if greater
+        """Advance the position of the named writer to the given ID, if greater
         than existing entry.
         """
 
diff --git a/synapse/types.py b/synapse/types.py
index eafe729dfe..c695558a86 100644
--- a/synapse/types.py
+++ b/synapse/types.py
@@ -675,7 +675,7 @@ class PersistedEventPosition:
         persisted in the same room after this position will be after the
         returned `RoomStreamToken`.
 
-        Note: no guarentees are made about ordering w.r.t. events in other
+        Note: no guarantees are made about ordering w.r.t. events in other
         rooms.
         """
         # Doing the naive thing satisfies the desired properties described in
diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py
index 9a873c8e8e..691dde9a01 100644
--- a/synapse/util/async_helpers.py
+++ b/synapse/util/async_helpers.py
@@ -497,7 +497,7 @@ def timeout_deferred(
     delayed_call = reactor.callLater(timeout, time_it_out)
 
     def convert_cancelled(value: failure.Failure):
-        # if the orgininal deferred was cancelled, and our timeout has fired, then
+        # if the original deferred was cancelled, and our timeout has fired, then
         # the reason it was cancelled was due to our timeout. Turn the CancelledError
         # into a TimeoutError.
         if timed_out[0] and value.check(CancelledError):