diff options
Diffstat (limited to 'synapse/storage/databases')
18 files changed, 232 insertions, 79 deletions
diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index 00a644e8f7..1dc347f0c9 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -271,7 +271,7 @@ class DataStore( def get_users_paginate_txn(txn): filters = [] - args = [self.hs.config.server_name] + args = [self.hs.config.server.server_name] # Set ordering order_by_column = UserSortOrder(order_by).value @@ -356,13 +356,13 @@ def check_database_before_upgrade(cur, database_engine, config: HomeServerConfig return user_domain = get_domain_from_id(rows[0][0]) - if user_domain == config.server_name: + if user_domain == config.server.server_name: return raise Exception( "Found users in database not native to %s!\n" "You cannot change a synapse server_name after it's been configured" - % (config.server_name,) + % (config.server.server_name,) ) diff --git a/synapse/storage/databases/main/censor_events.py b/synapse/storage/databases/main/censor_events.py index f22c1f241b..6305414e3d 100644 --- a/synapse/storage/databases/main/censor_events.py +++ b/synapse/storage/databases/main/censor_events.py @@ -35,7 +35,7 @@ class CensorEventsStore(EventsWorkerStore, CacheInvalidationWorkerStore, SQLBase super().__init__(database, db_conn, hs) if ( - hs.config.run_background_tasks + hs.config.worker.run_background_tasks and self.hs.config.redaction_retention_period is not None ): hs.get_clock().looping_call(self._censor_redactions, 5 * 60 * 1000) diff --git a/synapse/storage/databases/main/client_ips.py b/synapse/storage/databases/main/client_ips.py index 074b077bef..7a98275d92 100644 --- a/synapse/storage/databases/main/client_ips.py +++ b/synapse/storage/databases/main/client_ips.py @@ -355,7 +355,7 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore): self.user_ips_max_age = hs.config.user_ips_max_age - if hs.config.run_background_tasks and self.user_ips_max_age: + if hs.config.worker.run_background_tasks and self.user_ips_max_age: self._clock.looping_call(self._prune_old_user_ips, 5 * 1000) @wrap_as_background_process("prune_old_user_ips") diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index 3816a0ca53..6464520386 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -51,7 +51,7 @@ class DeviceWorkerStore(SQLBaseStore): def __init__(self, database: DatabasePool, db_conn, hs): super().__init__(database, db_conn, hs) - if hs.config.run_background_tasks: + if hs.config.worker.run_background_tasks: self._clock.looping_call( self._prune_old_outbound_device_pokes, 60 * 60 * 1000 ) diff --git a/synapse/storage/databases/main/directory.py b/synapse/storage/databases/main/directory.py index 86075bc55b..6daf8b8ffb 100644 --- a/synapse/storage/databases/main/directory.py +++ b/synapse/storage/databases/main/directory.py @@ -75,8 +75,6 @@ class DirectoryWorkerStore(SQLBaseStore): desc="get_aliases_for_room", ) - -class DirectoryStore(DirectoryWorkerStore): async def create_room_alias_association( self, room_alias: RoomAlias, @@ -126,6 +124,8 @@ class DirectoryStore(DirectoryWorkerStore): 409, "Room alias %s already exists" % room_alias.to_string() ) + +class DirectoryStore(DirectoryWorkerStore): async def delete_room_alias(self, room_alias: RoomAlias) -> str: room_id = await self.db_pool.runInteraction( "delete_room_alias", self._delete_room_alias_txn, room_alias diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index bddf5ef192..047782eb06 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -62,7 +62,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas def __init__(self, database: DatabasePool, db_conn, hs): super().__init__(database, db_conn, hs) - if hs.config.run_background_tasks: + if hs.config.worker.run_background_tasks: hs.get_clock().looping_call( self._delete_old_forward_extrem_cache, 60 * 60 * 1000 ) diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py index 55caa6bbe7..97b3e92d3f 100644 --- a/synapse/storage/databases/main/event_push_actions.py +++ b/synapse/storage/databases/main/event_push_actions.py @@ -82,7 +82,7 @@ class EventPushActionsWorkerStore(SQLBaseStore): self._rotate_delay = 3 self._rotate_count = 10000 self._doing_notif_rotation = False - if hs.config.run_background_tasks: + if hs.config.worker.run_background_tasks: self._rotate_notif_loop = self._clock.looping_call( self._rotate_notifs, 30 * 60 * 1000 ) diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 40b53274fb..8e691678e5 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -575,7 +575,13 @@ class PersistEventsStore: missing_auth_chains.clear() - for auth_id, event_type, state_key, chain_id, sequence_number in txn: + for ( + auth_id, + event_type, + state_key, + chain_id, + sequence_number, + ) in txn.fetchall(): event_to_types[auth_id] = (event_type, state_key) if chain_id is None: @@ -1379,18 +1385,18 @@ class PersistEventsStore: # If we're persisting an unredacted event we go and ensure # that we mark any redactions that reference this event as # requiring censoring. - sql = "UPDATE redactions SET have_censored = ? WHERE redacts = ?" - txn.execute_batch( - sql, - ( - ( - False, - event.event_id, - ) - for event, _ in events_and_contexts - if not event.internal_metadata.is_redacted() - ), + unredacted_events = [ + event.event_id + for event, _ in events_and_contexts + if not event.internal_metadata.is_redacted() + ] + sql = "UPDATE redactions SET have_censored = ? WHERE " + clause, args = make_in_list_sql_clause( + self.database_engine, + "redacts", + unredacted_events, ) + txn.execute(sql + clause, [False] + args) state_events_and_contexts = [ ec for ec in events_and_contexts if ec[0].is_state() @@ -1541,35 +1547,32 @@ class PersistEventsStore: to_prefill = [] rows = [] - N = 200 - for i in range(0, len(events_and_contexts), N): - ev_map = {e[0].event_id: e[0] for e in events_and_contexts[i : i + N]} - if not ev_map: - break - - sql = ( - "SELECT " - " e.event_id as event_id, " - " r.redacts as redacts," - " rej.event_id as rejects " - " FROM events as e" - " LEFT JOIN rejections as rej USING (event_id)" - " LEFT JOIN redactions as r ON e.event_id = r.redacts" - " WHERE " - ) - clause, args = make_in_list_sql_clause( - self.database_engine, "e.event_id", list(ev_map) - ) + ev_map = {e.event_id: e for e, _ in events_and_contexts} + if not ev_map: + return - txn.execute(sql + clause, args) - rows = self.db_pool.cursor_to_dict(txn) - for row in rows: - event = ev_map[row["event_id"]] - if not row["rejects"] and not row["redacts"]: - to_prefill.append( - _EventCacheEntry(event=event, redacted_event=None) - ) + sql = ( + "SELECT " + " e.event_id as event_id, " + " r.redacts as redacts," + " rej.event_id as rejects " + " FROM events as e" + " LEFT JOIN rejections as rej USING (event_id)" + " LEFT JOIN redactions as r ON e.event_id = r.redacts" + " WHERE " + ) + + clause, args = make_in_list_sql_clause( + self.database_engine, "e.event_id", list(ev_map) + ) + + txn.execute(sql + clause, args) + rows = self.db_pool.cursor_to_dict(txn) + for row in rows: + event = ev_map[row["event_id"]] + if not row["rejects"] and not row["redacts"]: + to_prefill.append(_EventCacheEntry(event=event, redacted_event=None)) def prefill(): for cache_entry in to_prefill: @@ -1770,10 +1773,21 @@ class PersistEventsStore: # Not a insertion event return - # Skip processing a insertion event if the room version doesn't - # support it. + # Skip processing an insertion event if the room version doesn't + # support it or the event is not from the room creator. room_version = self.store.get_room_version_txn(txn, event.room_id) - if not room_version.msc2716_historical: + room_creator = self.db_pool.simple_select_one_onecol_txn( + txn, + table="rooms", + keyvalues={"room_id": event.room_id}, + retcol="creator", + allow_none=True, + ) + if ( + not room_version.msc2716_historical + or not self.hs.config.experimental.msc2716_enabled + or event.sender != room_creator + ): return next_chunk_id = event.content.get(EventContentFields.MSC2716_NEXT_CHUNK_ID) @@ -1822,9 +1836,20 @@ class PersistEventsStore: return # Skip processing a chunk event if the room version doesn't - # support it. + # support it or the event is not from the room creator. room_version = self.store.get_room_version_txn(txn, event.room_id) - if not room_version.msc2716_historical: + room_creator = self.db_pool.simple_select_one_onecol_txn( + txn, + table="rooms", + keyvalues={"room_id": event.room_id}, + retcol="creator", + allow_none=True, + ) + if ( + not room_version.msc2716_historical + or not self.hs.config.experimental.msc2716_enabled + or event.sender != room_creator + ): return chunk_id = event.content.get(EventContentFields.MSC2716_CHUNK_ID) @@ -1962,6 +1987,15 @@ class PersistEventsStore: events_and_context. """ + # Only non outlier events will have push actions associated with them, + # so let's filter them out. (This makes joining large rooms faster, as + # these queries took seconds to process all the state events). + non_outlier_events = [ + event + for event, _ in events_and_contexts + if not event.internal_metadata.is_outlier() + ] + sql = """ INSERT INTO event_push_actions ( room_id, event_id, user_id, actions, stream_ordering, @@ -1972,7 +2006,7 @@ class PersistEventsStore: WHERE event_id = ? """ - if events_and_contexts: + if non_outlier_events: txn.execute_batch( sql, ( @@ -1982,12 +2016,12 @@ class PersistEventsStore: event.depth, event.event_id, ) - for event, _ in events_and_contexts + for event in non_outlier_events ), ) room_to_event_ids: Dict[str, List[str]] = {} - for e, _ in events_and_contexts: + for e in non_outlier_events: room_to_event_ids.setdefault(e.room_id, []).append(e.event_id) for room_id, event_ids in room_to_event_ids.items(): @@ -2012,7 +2046,11 @@ class PersistEventsStore: # persisted. txn.execute_batch( "DELETE FROM event_push_actions_staging WHERE event_id = ?", - ((event.event_id,) for event, _ in all_events_and_contexts), + ( + (event.event_id,) + for event, _ in all_events_and_contexts + if not event.internal_metadata.is_outlier() + ), ) def _remove_push_actions_for_event_id_txn(self, txn, room_id, event_id): diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 9501f00f3b..d72e716b5c 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -158,7 +158,7 @@ class EventsWorkerStore(SQLBaseStore): db_conn, "events", "stream_ordering", step=-1 ) - if hs.config.run_background_tasks: + if hs.config.worker.run_background_tasks: # We periodically clean out old transaction ID mappings self._clock.looping_call( self._cleanup_old_transaction_ids, diff --git a/synapse/storage/databases/main/metrics.py b/synapse/storage/databases/main/metrics.py index dc0bbc56ac..dac3d14da8 100644 --- a/synapse/storage/databases/main/metrics.py +++ b/synapse/storage/databases/main/metrics.py @@ -56,7 +56,7 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore): super().__init__(database, db_conn, hs) # Read the extrems every 60 minutes - if hs.config.run_background_tasks: + if hs.config.worker.run_background_tasks: self._clock.looping_call(self._read_forward_extremities, 60 * 60 * 1000) # Used in _generate_user_daily_visits to keep track of progress diff --git a/synapse/storage/databases/main/presence.py b/synapse/storage/databases/main/presence.py index 1388771c40..12cf6995eb 100644 --- a/synapse/storage/databases/main/presence.py +++ b/synapse/storage/databases/main/presence.py @@ -29,7 +29,26 @@ if TYPE_CHECKING: from synapse.server import HomeServer -class PresenceStore(SQLBaseStore): +class PresenceBackgroundUpdateStore(SQLBaseStore): + def __init__( + self, + database: DatabasePool, + db_conn: Connection, + hs: "HomeServer", + ): + super().__init__(database, db_conn, hs) + + # Used by `PresenceStore._get_active_presence()` + self.db_pool.updates.register_background_index_update( + "presence_stream_not_offline_index", + index_name="presence_stream_state_not_offline_idx", + table="presence_stream", + columns=["state"], + where_clause="state != 'offline'", + ) + + +class PresenceStore(PresenceBackgroundUpdateStore): def __init__( self, database: DatabasePool, @@ -332,6 +351,8 @@ class PresenceStore(SQLBaseStore): the appropriate time outs. """ + # The `presence_stream_state_not_offline_idx` index should be used for this + # query. sql = ( "SELECT user_id, state, last_active_ts, last_federation_update_ts," " last_user_sync_ts, status_msg, currently_active FROM presence_stream" diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index 1fe8a255bd..713afb039d 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -132,14 +132,14 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): hs.config.account_validity.account_validity_startup_job_max_delta ) - if hs.config.run_background_tasks: + if hs.config.worker.run_background_tasks: self._clock.call_later( 0.0, self._set_expiration_date_when_missing, ) # Create a background job for culling expired 3PID validity tokens - if hs.config.run_background_tasks: + if hs.config.worker.run_background_tasks: self._clock.looping_call( self.cull_expired_threepid_validation_tokens, THIRTY_MINUTES_IN_MS ) @@ -1171,6 +1171,7 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore): delta equal to 10% of the validity period. """ now_ms = self._clock.time_msec() + assert self._account_validity_period is not None expiration_ts = now_ms + self._account_validity_period if use_delta: diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index 10ef734d8d..6129148843 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -18,9 +18,10 @@ from abc import abstractmethod from enum import Enum from typing import Any, Dict, List, Optional, Tuple -from synapse.api.constants import EventTypes, JoinRules +from synapse.api.constants import EventContentFields, EventTypes, JoinRules from synapse.api.errors import StoreError from synapse.api.room_versions import RoomVersion, RoomVersions +from synapse.events import EventBase from synapse.storage._base import SQLBaseStore, db_to_json from synapse.storage.database import DatabasePool, LoggingTransaction from synapse.storage.databases.main.search import SearchStore @@ -835,7 +836,7 @@ class RoomWorkerStore(SQLBaseStore): If it is `None` media will be removed from quarantine """ logger.info("Quarantining media: %s/%s", server_name, media_id) - is_local = server_name == self.config.server_name + is_local = server_name == self.config.server.server_name def _quarantine_media_by_id_txn(txn): local_mxcs = [media_id] if is_local else [] @@ -1034,6 +1035,7 @@ class _BackgroundUpdates: ADD_ROOMS_ROOM_VERSION_COLUMN = "add_rooms_room_version_column" POPULATE_ROOM_DEPTH_MIN_DEPTH2 = "populate_room_depth_min_depth2" REPLACE_ROOM_DEPTH_MIN_DEPTH = "replace_room_depth_min_depth" + POPULATE_ROOMS_CREATOR_COLUMN = "populate_rooms_creator_column" _REPLACE_ROOM_DEPTH_SQL_COMMANDS = ( @@ -1075,6 +1077,11 @@ class RoomBackgroundUpdateStore(SQLBaseStore): self._background_replace_room_depth_min_depth, ) + self.db_pool.updates.register_background_update_handler( + _BackgroundUpdates.POPULATE_ROOMS_CREATOR_COLUMN, + self._background_populate_rooms_creator_column, + ) + async def _background_insert_retention(self, progress, batch_size): """Retrieves a list of all rooms within a range and inserts an entry for each of them into the room_retention table. @@ -1294,7 +1301,7 @@ class RoomBackgroundUpdateStore(SQLBaseStore): keyvalues={"room_id": room_id}, retcol="MAX(stream_ordering)", allow_none=True, - desc="upsert_room_on_join", + desc="has_auth_chain_index_fallback", ) return max_ordering is None @@ -1364,6 +1371,65 @@ class RoomBackgroundUpdateStore(SQLBaseStore): return 0 + async def _background_populate_rooms_creator_column( + self, progress: dict, batch_size: int + ): + """Background update to go and add creator information to `rooms` + table from `current_state_events` table. + """ + + last_room_id = progress.get("room_id", "") + + def _background_populate_rooms_creator_column_txn(txn: LoggingTransaction): + sql = """ + SELECT room_id, json FROM event_json + INNER JOIN rooms AS room USING (room_id) + INNER JOIN current_state_events AS state_event USING (room_id, event_id) + WHERE room_id > ? AND (room.creator IS NULL OR room.creator = '') AND state_event.type = 'm.room.create' AND state_event.state_key = '' + ORDER BY room_id + LIMIT ? + """ + + txn.execute(sql, (last_room_id, batch_size)) + room_id_to_create_event_results = txn.fetchall() + + new_last_room_id = "" + for room_id, event_json in room_id_to_create_event_results: + event_dict = db_to_json(event_json) + + creator = event_dict.get("content").get(EventContentFields.ROOM_CREATOR) + + self.db_pool.simple_update_txn( + txn, + table="rooms", + keyvalues={"room_id": room_id}, + updatevalues={"creator": creator}, + ) + new_last_room_id = room_id + + if new_last_room_id == "": + return True + + self.db_pool.updates._background_update_progress_txn( + txn, + _BackgroundUpdates.POPULATE_ROOMS_CREATOR_COLUMN, + {"room_id": new_last_room_id}, + ) + + return False + + end = await self.db_pool.runInteraction( + "_background_populate_rooms_creator_column", + _background_populate_rooms_creator_column_txn, + ) + + if end: + await self.db_pool.updates._end_background_update( + _BackgroundUpdates.POPULATE_ROOMS_CREATOR_COLUMN + ) + + return batch_size + class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore): def __init__(self, database: DatabasePool, db_conn, hs): @@ -1371,7 +1437,9 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore): self.config = hs.config - async def upsert_room_on_join(self, room_id: str, room_version: RoomVersion): + async def upsert_room_on_join( + self, room_id: str, room_version: RoomVersion, auth_events: List[EventBase] + ): """Ensure that the room is stored in the table Called when we join a room over federation, and overwrites any room version @@ -1382,6 +1450,24 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore): # mark the room as having an auth chain cover index. has_auth_chain_index = await self.has_auth_chain_index(room_id) + create_event = None + for e in auth_events: + if (e.type, e.state_key) == (EventTypes.Create, ""): + create_event = e + break + + if create_event is None: + # If the state doesn't have a create event then the room is + # invalid, and it would fail auth checks anyway. + raise StoreError(400, "No create event in state") + + room_creator = create_event.content.get(EventContentFields.ROOM_CREATOR) + + if not isinstance(room_creator, str): + # If the create event does not have a creator then the room is + # invalid, and it would fail auth checks anyway. + raise StoreError(400, "No creator defined on the create event") + await self.db_pool.simple_upsert( desc="upsert_room_on_join", table="rooms", @@ -1389,7 +1475,7 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore): values={"room_version": room_version.identifier}, insertion_values={ "is_public": False, - "creator": "", + "creator": room_creator, "has_auth_chain_index": has_auth_chain_index, }, # rooms has a unique constraint on room_id, so no need to lock when doing an @@ -1417,6 +1503,9 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore): insertion_values={ "room_version": room_version.identifier, "is_public": False, + # We don't worry about setting the `creator` here because + # we don't process any messages in a room while a user is + # invited (only after the join). "creator": "", "has_auth_chain_index": has_auth_chain_index, }, diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index c58a4b8690..9beeb96aa9 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -81,7 +81,7 @@ class RoomMemberWorkerStore(EventsWorkerStore): txn.close() if ( - self.hs.config.run_background_tasks + self.hs.config.worker.run_background_tasks and self.hs.config.metrics_flags.known_servers ): self._known_servers_count = 1 @@ -196,6 +196,11 @@ class RoomMemberWorkerStore(EventsWorkerStore): ) -> Dict[str, ProfileInfo]: """Get a mapping from user ID to profile information for all users in a given room. + The profile information comes directly from this room's `m.room.member` + events, and so may be specific to this room rather than part of a user's + global profile. To avoid privacy leaks, the profile data should only be + revealed to users who are already in this room. + Args: room_id: The ID of the room to retrieve the users of. diff --git a/synapse/storage/databases/main/session.py b/synapse/storage/databases/main/session.py index 172f27d109..5a97120437 100644 --- a/synapse/storage/databases/main/session.py +++ b/synapse/storage/databases/main/session.py @@ -48,7 +48,7 @@ class SessionStore(SQLBaseStore): super().__init__(database, db_conn, hs) # Create a background job for culling expired sessions. - if hs.config.run_background_tasks: + if hs.config.worker.run_background_tasks: self._clock.looping_call(self._delete_expired_sessions, 30 * 60 * 1000) async def create_session( diff --git a/synapse/storage/databases/main/stats.py b/synapse/storage/databases/main/stats.py index 42edbcc057..343d6efc92 100644 --- a/synapse/storage/databases/main/stats.py +++ b/synapse/storage/databases/main/stats.py @@ -22,7 +22,7 @@ from typing_extensions import Counter from twisted.internet.defer import DeferredLock -from synapse.api.constants import EventTypes, Membership +from synapse.api.constants import EventContentFields, EventTypes, Membership from synapse.api.errors import StoreError from synapse.storage.database import DatabasePool from synapse.storage.databases.main.state_deltas import StateDeltasStore @@ -590,7 +590,7 @@ class StatsStore(StateDeltasStore): room_state["canonical_alias"] = event.content.get("alias") elif event.type == EventTypes.Create: room_state["is_federatable"] = ( - event.content.get("m.federate", True) is True + event.content.get(EventContentFields.FEDERATE, True) is True ) await self.update_room_state(room_id, room_state) @@ -672,7 +672,7 @@ class StatsStore(StateDeltasStore): def get_users_media_usage_paginate_txn(txn): filters = [] - args = [self.hs.config.server_name] + args = [self.hs.config.server.server_name] if search_term: filters.append("(lmr.user_id LIKE ? OR displayname LIKE ?)") diff --git a/synapse/storage/databases/main/transactions.py b/synapse/storage/databases/main/transactions.py index 7728d5f102..860146cd1b 100644 --- a/synapse/storage/databases/main/transactions.py +++ b/synapse/storage/databases/main/transactions.py @@ -60,7 +60,7 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore): def __init__(self, database: DatabasePool, db_conn, hs): super().__init__(database, db_conn, hs) - if hs.config.run_background_tasks: + if hs.config.worker.run_background_tasks: self._clock.looping_call(self._cleanup_transactions, 30 * 60 * 1000) @wrap_as_background_process("cleanup_transactions") diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py index 65dde67ae9..8aebdc2817 100644 --- a/synapse/storage/databases/main/user_directory.py +++ b/synapse/storage/databases/main/user_directory.py @@ -196,7 +196,6 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): ) users_with_profile = await self.get_users_in_room_with_profiles(room_id) - user_ids = set(users_with_profile) # Update each user in the user directory. for user_id, profile in users_with_profile.items(): @@ -207,7 +206,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): to_insert = set() if is_public: - for user_id in user_ids: + for user_id in users_with_profile: if self.get_if_app_services_interested_in_user(user_id): continue @@ -217,14 +216,14 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): await self.add_users_in_public_rooms(room_id, to_insert) to_insert.clear() else: - for user_id in user_ids: + for user_id in users_with_profile: if not self.hs.is_mine_id(user_id): continue if self.get_if_app_services_interested_in_user(user_id): continue - for other_user_id in user_ids: + for other_user_id in users_with_profile: if user_id == other_user_id: continue @@ -511,7 +510,7 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore): self._prefer_local_users_in_search = ( hs.config.user_directory_search_prefer_local_users ) - self._server_name = hs.config.server_name + self._server_name = hs.config.server.server_name async def remove_from_user_dir(self, user_id: str) -> None: def _remove_from_user_dir_txn(txn): |