diff options
author | Patrick Cloke <patrickc@matrix.org> | 2021-02-12 11:01:48 -0500 |
---|---|---|
committer | Patrick Cloke <patrickc@matrix.org> | 2021-02-12 11:14:12 -0500 |
commit | 7950aa8a27c3f45184c96fda210c62d068dd2591 (patch) | |
tree | bdbb8b61fa05020fdf800818066a8dc1a230da8b /synapse/storage | |
parent | Merge tag 'v1.27.0rc2' into develop (diff) | |
download | synapse-7950aa8a27c3f45184c96fda210c62d068dd2591.tar.xz |
Fix some typos.
Diffstat (limited to 'synapse/storage')
-rw-r--r-- | synapse/storage/databases/main/deviceinbox.py | 2 | ||||
-rw-r--r-- | synapse/storage/databases/main/event_federation.py | 4 | ||||
-rw-r--r-- | synapse/storage/databases/main/events.py | 2 | ||||
-rw-r--r-- | synapse/storage/databases/main/keys.py | 4 | ||||
-rw-r--r-- | synapse/storage/databases/main/metrics.py | 4 | ||||
-rw-r--r-- | synapse/storage/databases/main/receipts.py | 6 | ||||
-rw-r--r-- | synapse/storage/databases/main/room.py | 2 | ||||
-rw-r--r-- | synapse/storage/databases/main/state_deltas.py | 4 | ||||
-rw-r--r-- | synapse/storage/databases/main/transactions.py | 2 | ||||
-rw-r--r-- | synapse/storage/databases/state/bg_updates.py | 2 | ||||
-rw-r--r-- | synapse/storage/prepare_database.py | 2 | ||||
-rw-r--r-- | synapse/storage/util/id_generators.py | 6 |
12 files changed, 20 insertions, 20 deletions
diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py index 31f70ac5ef..45ca6620a8 100644 --- a/synapse/storage/databases/main/deviceinbox.py +++ b/synapse/storage/databases/main/deviceinbox.py @@ -450,7 +450,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): }, ) - # Add the messages to the approriate local device inboxes so that + # Add the messages to the appropriate local device inboxes so that # they'll be sent to the devices when they next sync. self._add_messages_to_local_device_inbox_txn( txn, stream_id, local_messages_by_user_then_device diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index 8326640d20..ddfb13e3ad 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -371,7 +371,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas # and state sets {A} and {B} then walking the auth chains of A and B # would immediately show that C is reachable by both. However, if we # stopped at C then we'd only reach E via the auth chain of B and so E - # would errornously get included in the returned difference. + # would erroneously get included in the returned difference. # # The other thing that we do is limit the number of auth chains we walk # at once, due to practical limits (i.e. we can only query the database @@ -497,7 +497,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas a_ids = new_aids - # Mark that the auth event is reachable by the approriate sets. + # Mark that the auth event is reachable by the appropriate sets. sets.intersection_update(event_to_missing_sets[event_id]) search.sort() diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index ccda9f1caa..7abfb9112e 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -1050,7 +1050,7 @@ class PersistEventsStore: # Figure out the changes of membership to invalidate the # `get_rooms_for_user` cache. # We find out which membership events we may have deleted - # and which we have added, then we invlidate the caches for all + # and which we have added, then we invalidate the caches for all # those users. members_changed = { state_key diff --git a/synapse/storage/databases/main/keys.py b/synapse/storage/databases/main/keys.py index 04ac2d0ced..e97026dc2e 100644 --- a/synapse/storage/databases/main/keys.py +++ b/synapse/storage/databases/main/keys.py @@ -155,7 +155,7 @@ class KeyStore(SQLBaseStore): (server_name, key_id, from_server) triplet if one already existed. Args: server_name: The name of the server. - key_id: The identifer of the key this JSON is for. + key_id: The identifier of the key this JSON is for. from_server: The server this JSON was fetched from. ts_now_ms: The time now in milliseconds. ts_valid_until_ms: The time when this json stops being valid. @@ -182,7 +182,7 @@ class KeyStore(SQLBaseStore): async def get_server_keys_json( self, server_keys: Iterable[Tuple[str, Optional[str], Optional[str]]] ) -> Dict[Tuple[str, Optional[str], Optional[str]], List[dict]]: - """Retrive the key json for a list of server_keys and key ids. + """Retrieve the key json for a list of server_keys and key ids. If no keys are found for a given server, key_id and source then that server, key_id, and source triplet entry will be an empty list. The JSON is returned as a byte array so that it can be efficiently diff --git a/synapse/storage/databases/main/metrics.py b/synapse/storage/databases/main/metrics.py index 92e65aa640..614a418a15 100644 --- a/synapse/storage/databases/main/metrics.py +++ b/synapse/storage/databases/main/metrics.py @@ -111,7 +111,7 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore): async def count_daily_sent_e2ee_messages(self): def _count_messages(txn): # This is good enough as if you have silly characters in your own - # hostname then thats your own fault. + # hostname then that's your own fault. like_clause = "%:" + self.hs.hostname sql = """ @@ -167,7 +167,7 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore): async def count_daily_sent_messages(self): def _count_messages(txn): # This is good enough as if you have silly characters in your own - # hostname then thats your own fault. + # hostname then that's your own fault. like_clause = "%:" + self.hs.hostname sql = """ diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index e4843a202c..ae9283f52d 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -160,7 +160,7 @@ class ReceiptsWorkerStore(SQLBaseStore): Args: room_id: List of room_ids. - to_key: Max stream id to fetch receipts upto. + to_key: Max stream id to fetch receipts up to. from_key: Min stream id to fetch receipts from. None fetches from the start. @@ -189,7 +189,7 @@ class ReceiptsWorkerStore(SQLBaseStore): Args: room_ids: The room id. - to_key: Max stream id to fetch receipts upto. + to_key: Max stream id to fetch receipts up to. from_key: Min stream id to fetch receipts from. None fetches from the start. @@ -312,7 +312,7 @@ class ReceiptsWorkerStore(SQLBaseStore): to a limit of the latest 100 read receipts. Args: - to_key: Max stream id to fetch receipts upto. + to_key: Max stream id to fetch receipts up to. from_key: Min stream id to fetch receipts from. None fetches from the start. diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index a9fcb5f59c..cba343aa68 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -1044,7 +1044,7 @@ class RoomBackgroundUpdateStore(SQLBaseStore): async def _background_add_rooms_room_version_column( self, progress: dict, batch_size: int ): - """Background update to go and add room version inforamtion to `rooms` + """Background update to go and add room version information to `rooms` table from `current_state_events` table. """ diff --git a/synapse/storage/databases/main/state_deltas.py b/synapse/storage/databases/main/state_deltas.py index 356623fc6e..0dbb501f16 100644 --- a/synapse/storage/databases/main/state_deltas.py +++ b/synapse/storage/databases/main/state_deltas.py @@ -64,7 +64,7 @@ class StateDeltasStore(SQLBaseStore): def get_current_state_deltas_txn(txn): # First we calculate the max stream id that will give us less than # N results. - # We arbitarily limit to 100 stream_id entries to ensure we don't + # We arbitrarily limit to 100 stream_id entries to ensure we don't # select toooo many. sql = """ SELECT stream_id, count(*) @@ -81,7 +81,7 @@ class StateDeltasStore(SQLBaseStore): for stream_id, count in txn: total += count if total > 100: - # We arbitarily limit to 100 entries to ensure we don't + # We arbitrarily limit to 100 entries to ensure we don't # select toooo many. logger.debug( "Clipping current_state_delta_stream rows to stream_id %i", diff --git a/synapse/storage/databases/main/transactions.py b/synapse/storage/databases/main/transactions.py index cea595ff19..248a6c3f25 100644 --- a/synapse/storage/databases/main/transactions.py +++ b/synapse/storage/databases/main/transactions.py @@ -198,7 +198,7 @@ class TransactionStore(TransactionWorkerStore): retry_interval: int, ) -> None: """Sets the current retry timings for a given destination. - Both timings should be zero if retrying is no longer occuring. + Both timings should be zero if retrying is no longer occurring. Args: destination diff --git a/synapse/storage/databases/state/bg_updates.py b/synapse/storage/databases/state/bg_updates.py index acb24e33af..1fd333b707 100644 --- a/synapse/storage/databases/state/bg_updates.py +++ b/synapse/storage/databases/state/bg_updates.py @@ -27,7 +27,7 @@ MAX_STATE_DELTA_HOPS = 100 class StateGroupBackgroundUpdateStore(SQLBaseStore): - """Defines functions related to state groups needed to run the state backgroud + """Defines functions related to state groups needed to run the state background updates. """ diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index 28bb2eb662..cd30e6b80a 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -113,7 +113,7 @@ def prepare_database( # which should be empty. if config is None: raise ValueError( - "config==None in prepare_database, but databse is not empty" + "config==None in prepare_database, but database is not empty" ) # if it's a worker app, refuse to upgrade the database, to avoid multiple diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index 71ef5a72dc..9dd537bf66 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -245,7 +245,7 @@ class MultiWriterIdGenerator: # and b) noting that if we have seen a run of persisted positions # without gaps (e.g. 5, 6, 7) then we can skip forward (e.g. to 7). # - # Note: There is no guarentee that the IDs generated by the sequence + # Note: There is no guarantee that the IDs generated by the sequence # will be gapless; gaps can form when e.g. a transaction was rolled # back. This means that sometimes we won't be able to skip forward the # position even though everything has been persisted. However, since @@ -418,7 +418,7 @@ class MultiWriterIdGenerator: # bother, as nothing will read it). # # We only do this on the success path so that the persisted current - # position points to a persited row with the correct instance name. + # position points to a persisted row with the correct instance name. if self._writers: txn.call_after( run_as_background_process, @@ -509,7 +509,7 @@ class MultiWriterIdGenerator: } def advance(self, instance_name: str, new_id: int): - """Advance the postion of the named writer to the given ID, if greater + """Advance the position of the named writer to the given ID, if greater than existing entry. """ |