diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py
index da3b99f93d..13de5f1f62 100644
--- a/synapse/storage/_base.py
+++ b/synapse/storage/_base.py
@@ -56,7 +56,7 @@ class SQLBaseStore(metaclass=ABCMeta):
members_changed (iterable[str]): The user_ids of members that have
changed
"""
- for host in set(get_domain_from_id(u) for u in members_changed):
+ for host in {get_domain_from_id(u) for u in members_changed}:
self._attempt_to_invalidate_cache("is_host_joined", (room_id, host))
self._attempt_to_invalidate_cache("was_host_joined", (room_id, host))
diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py
index bd547f35cf..eb1a7e5002 100644
--- a/synapse/storage/background_updates.py
+++ b/synapse/storage/background_updates.py
@@ -189,7 +189,7 @@ class BackgroundUpdater(object):
keyvalues=None,
retcols=("update_name", "depends_on"),
)
- in_flight = set(update["update_name"] for update in updates)
+ in_flight = {update["update_name"] for update in updates}
for update in updates:
if update["depends_on"] not in in_flight:
self._background_update_queue.append(update["update_name"])
diff --git a/synapse/storage/data_stores/main/appservice.py b/synapse/storage/data_stores/main/appservice.py
index b2f39649fd..efbc06c796 100644
--- a/synapse/storage/data_stores/main/appservice.py
+++ b/synapse/storage/data_stores/main/appservice.py
@@ -135,7 +135,7 @@ class ApplicationServiceTransactionWorkerStore(
may be empty.
"""
results = yield self.db.simple_select_list(
- "application_services_state", dict(state=state), ["as_id"]
+ "application_services_state", {"state": state}, ["as_id"]
)
# NB: This assumes this class is linked with ApplicationServiceStore
as_list = self.get_app_services()
@@ -158,7 +158,7 @@ class ApplicationServiceTransactionWorkerStore(
"""
result = yield self.db.simple_select_one(
"application_services_state",
- dict(as_id=service.id),
+ {"as_id": service.id},
["state"],
allow_none=True,
desc="get_appservice_state",
@@ -177,7 +177,7 @@ class ApplicationServiceTransactionWorkerStore(
A Deferred which resolves when the state was set successfully.
"""
return self.db.simple_upsert(
- "application_services_state", dict(as_id=service.id), dict(state=state)
+ "application_services_state", {"as_id": service.id}, {"state": state}
)
def create_appservice_txn(self, service, events):
@@ -253,13 +253,15 @@ class ApplicationServiceTransactionWorkerStore(
self.db.simple_upsert_txn(
txn,
"application_services_state",
- dict(as_id=service.id),
- dict(last_txn=txn_id),
+ {"as_id": service.id},
+ {"last_txn": txn_id},
)
# Delete txn
self.db.simple_delete_txn(
- txn, "application_services_txns", dict(txn_id=txn_id, as_id=service.id)
+ txn,
+ "application_services_txns",
+ {"txn_id": txn_id, "as_id": service.id},
)
return self.db.runInteraction(
diff --git a/synapse/storage/data_stores/main/client_ips.py b/synapse/storage/data_stores/main/client_ips.py
index 13f4c9c72e..e1ccb27142 100644
--- a/synapse/storage/data_stores/main/client_ips.py
+++ b/synapse/storage/data_stores/main/client_ips.py
@@ -530,7 +530,7 @@ class ClientIpStore(ClientIpBackgroundUpdateStore):
((row["access_token"], row["ip"]), (row["user_agent"], row["last_seen"]))
for row in rows
)
- return list(
+ return [
{
"access_token": access_token,
"ip": ip,
@@ -538,7 +538,7 @@ class ClientIpStore(ClientIpBackgroundUpdateStore):
"last_seen": last_seen,
}
for (access_token, ip), (user_agent, last_seen) in iteritems(results)
- )
+ ]
@wrap_as_background_process("prune_old_user_ips")
async def _prune_old_user_ips(self):
diff --git a/synapse/storage/data_stores/main/devices.py b/synapse/storage/data_stores/main/devices.py
index b7617efb80..d55733a4cd 100644
--- a/synapse/storage/data_stores/main/devices.py
+++ b/synapse/storage/data_stores/main/devices.py
@@ -137,7 +137,7 @@ class DeviceWorkerStore(SQLBaseStore):
# get the cross-signing keys of the users in the list, so that we can
# determine which of the device changes were cross-signing keys
- users = set(r[0] for r in updates)
+ users = {r[0] for r in updates}
master_key_by_user = {}
self_signing_key_by_user = {}
for user in users:
@@ -446,7 +446,7 @@ class DeviceWorkerStore(SQLBaseStore):
a set of user_ids and results_map is a mapping of
user_id -> device_id -> device_info
"""
- user_ids = set(user_id for user_id, _ in query_list)
+ user_ids = {user_id for user_id, _ in query_list}
user_map = yield self.get_device_list_last_stream_id_for_remotes(list(user_ids))
# We go and check if any of the users need to have their device lists
@@ -454,10 +454,9 @@ class DeviceWorkerStore(SQLBaseStore):
users_needing_resync = yield self.get_user_ids_requiring_device_list_resync(
user_ids
)
- user_ids_in_cache = (
- set(user_id for user_id, stream_id in user_map.items() if stream_id)
- - users_needing_resync
- )
+ user_ids_in_cache = {
+ user_id for user_id, stream_id in user_map.items() if stream_id
+ } - users_needing_resync
user_ids_not_in_cache = user_ids - user_ids_in_cache
results = {}
@@ -604,7 +603,7 @@ class DeviceWorkerStore(SQLBaseStore):
rows = yield self.db.execute(
"get_users_whose_signatures_changed", None, sql, user_id, from_key
)
- return set(user for row in rows for user in json.loads(row[0]))
+ return {user for row in rows for user in json.loads(row[0])}
else:
return set()
diff --git a/synapse/storage/data_stores/main/event_federation.py b/synapse/storage/data_stores/main/event_federation.py
index 750ec1b70d..49a7b8b433 100644
--- a/synapse/storage/data_stores/main/event_federation.py
+++ b/synapse/storage/data_stores/main/event_federation.py
@@ -426,7 +426,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
query, (room_id, event_id, False, limit - len(event_results))
)
- new_results = set(t[0] for t in txn) - seen_events
+ new_results = {t[0] for t in txn} - seen_events
new_front |= new_results
seen_events |= new_results
diff --git a/synapse/storage/data_stores/main/events.py b/synapse/storage/data_stores/main/events.py
index c9d0d68c3a..8ae23df00a 100644
--- a/synapse/storage/data_stores/main/events.py
+++ b/synapse/storage/data_stores/main/events.py
@@ -145,7 +145,7 @@ class EventsStore(
return txn.fetchall()
res = yield self.db.runInteraction("read_forward_extremities", fetch)
- self._current_forward_extremities_amount = c_counter(list(x[0] for x in res))
+ self._current_forward_extremities_amount = c_counter([x[0] for x in res])
@_retry_on_integrity_error
@defer.inlineCallbacks
@@ -598,11 +598,11 @@ class EventsStore(
# We find out which membership events we may have deleted
# and which we have added, then we invlidate the caches for all
# those users.
- members_changed = set(
+ members_changed = {
state_key
for ev_type, state_key in itertools.chain(to_delete, to_insert)
if ev_type == EventTypes.Member
- )
+ }
for member in members_changed:
txn.call_after(
@@ -1615,7 +1615,7 @@ class EventsStore(
"""
)
- referenced_state_groups = set(sg for sg, in txn)
+ referenced_state_groups = {sg for sg, in txn}
logger.info(
"[purge] found %i referenced state groups", len(referenced_state_groups)
)
diff --git a/synapse/storage/data_stores/main/events_bg_updates.py b/synapse/storage/data_stores/main/events_bg_updates.py
index 5177b71016..f54c8b1ee0 100644
--- a/synapse/storage/data_stores/main/events_bg_updates.py
+++ b/synapse/storage/data_stores/main/events_bg_updates.py
@@ -402,7 +402,7 @@ class EventsBackgroundUpdatesStore(SQLBaseStore):
keyvalues={},
retcols=("room_id",),
)
- room_ids = set(row["room_id"] for row in rows)
+ room_ids = {row["room_id"] for row in rows}
for room_id in room_ids:
txn.call_after(
self.get_latest_event_ids_in_room.invalidate, (room_id,)
diff --git a/synapse/storage/data_stores/main/events_worker.py b/synapse/storage/data_stores/main/events_worker.py
index 7251e819f5..47a3a26072 100644
--- a/synapse/storage/data_stores/main/events_worker.py
+++ b/synapse/storage/data_stores/main/events_worker.py
@@ -494,9 +494,9 @@ class EventsWorkerStore(SQLBaseStore):
"""
with Measure(self._clock, "_fetch_event_list"):
try:
- events_to_fetch = set(
+ events_to_fetch = {
event_id for events, _ in event_list for event_id in events
- )
+ }
row_dict = self.db.new_transaction(
conn, "do_fetch", [], [], self._fetch_event_rows, events_to_fetch
@@ -804,7 +804,7 @@ class EventsWorkerStore(SQLBaseStore):
desc="have_events_in_timeline",
)
- return set(r["event_id"] for r in rows)
+ return {r["event_id"] for r in rows}
@defer.inlineCallbacks
def have_seen_events(self, event_ids):
diff --git a/synapse/storage/data_stores/main/push_rule.py b/synapse/storage/data_stores/main/push_rule.py
index e2673ae073..62ac88d9f2 100644
--- a/synapse/storage/data_stores/main/push_rule.py
+++ b/synapse/storage/data_stores/main/push_rule.py
@@ -276,21 +276,21 @@ class PushRulesWorkerStore(
# We ignore app service users for now. This is so that we don't fill
# up the `get_if_users_have_pushers` cache with AS entries that we
# know don't have pushers, nor even read receipts.
- local_users_in_room = set(
+ local_users_in_room = {
u
for u in users_in_room
if self.hs.is_mine_id(u)
and not self.get_if_app_services_interested_in_user(u)
- )
+ }
# users in the room who have pushers need to get push rules run because
# that's how their pushers work
if_users_with_pushers = yield self.get_if_users_have_pushers(
local_users_in_room, on_invalidate=cache_context.invalidate
)
- user_ids = set(
+ user_ids = {
uid for uid, have_pusher in if_users_with_pushers.items() if have_pusher
- )
+ }
users_with_receipts = yield self.get_users_with_read_receipts_in_room(
room_id, on_invalidate=cache_context.invalidate
diff --git a/synapse/storage/data_stores/main/receipts.py b/synapse/storage/data_stores/main/receipts.py
index 96e54d145e..0d932a0672 100644
--- a/synapse/storage/data_stores/main/receipts.py
+++ b/synapse/storage/data_stores/main/receipts.py
@@ -58,7 +58,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
@cachedInlineCallbacks()
def get_users_with_read_receipts_in_room(self, room_id):
receipts = yield self.get_receipts_for_room(room_id, "m.read")
- return set(r["user_id"] for r in receipts)
+ return {r["user_id"] for r in receipts}
@cached(num_args=2)
def get_receipts_for_room(self, room_id, receipt_type):
@@ -283,7 +283,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
args.append(limit)
txn.execute(sql, args)
- return list(r[0:5] + (json.loads(r[5]),) for r in txn)
+ return [r[0:5] + (json.loads(r[5]),) for r in txn]
return self.db.runInteraction(
"get_all_updated_receipts", get_all_updated_receipts_txn
diff --git a/synapse/storage/data_stores/main/roommember.py b/synapse/storage/data_stores/main/roommember.py
index d5ced05701..d5bd0cb5cf 100644
--- a/synapse/storage/data_stores/main/roommember.py
+++ b/synapse/storage/data_stores/main/roommember.py
@@ -465,7 +465,7 @@ class RoomMemberWorkerStore(EventsWorkerStore):
txn.execute(sql % (clause,), args)
- return set(row[0] for row in txn)
+ return {row[0] for row in txn}
return await self.db.runInteraction(
"get_users_server_still_shares_room_with",
@@ -826,7 +826,7 @@ class RoomMemberWorkerStore(EventsWorkerStore):
GROUP BY room_id, user_id;
"""
txn.execute(sql, (user_id,))
- return set(row[0] for row in txn if row[1] == 0)
+ return {row[0] for row in txn if row[1] == 0}
return self.db.runInteraction(
"get_forgotten_rooms_for_user", _get_forgotten_rooms_for_user_txn
diff --git a/synapse/storage/data_stores/main/state.py b/synapse/storage/data_stores/main/state.py
index 3d34103e67..3a3b9a8e72 100644
--- a/synapse/storage/data_stores/main/state.py
+++ b/synapse/storage/data_stores/main/state.py
@@ -321,7 +321,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
desc="get_referenced_state_groups",
)
- return set(row["state_group"] for row in rows)
+ return {row["state_group"] for row in rows}
class MainStateBackgroundUpdateStore(RoomMemberWorkerStore):
@@ -367,7 +367,7 @@ class MainStateBackgroundUpdateStore(RoomMemberWorkerStore):
"""
txn.execute(sql, (last_room_id, batch_size))
- room_ids = list(row[0] for row in txn)
+ room_ids = [row[0] for row in txn]
if not room_ids:
return True, set()
@@ -384,7 +384,7 @@ class MainStateBackgroundUpdateStore(RoomMemberWorkerStore):
txn.execute(sql, (last_room_id, room_ids[-1], "%:" + self.server_name))
- joined_room_ids = set(row[0] for row in txn)
+ joined_room_ids = {row[0] for row in txn}
left_rooms = set(room_ids) - joined_room_ids
@@ -404,7 +404,7 @@ class MainStateBackgroundUpdateStore(RoomMemberWorkerStore):
retcols=("state_key",),
)
- potentially_left_users = set(row["state_key"] for row in rows)
+ potentially_left_users = {row["state_key"] for row in rows}
# Now lets actually delete the rooms from the DB.
self.db.simple_delete_many_txn(
diff --git a/synapse/storage/data_stores/main/stream.py b/synapse/storage/data_stores/main/stream.py
index 056b25b13a..ada5cce6c2 100644
--- a/synapse/storage/data_stores/main/stream.py
+++ b/synapse/storage/data_stores/main/stream.py
@@ -346,11 +346,11 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
from_key (str): The room_key portion of a StreamToken
"""
from_key = RoomStreamToken.parse_stream_token(from_key).stream
- return set(
+ return {
room_id
for room_id in room_ids
if self._events_stream_cache.has_entity_changed(room_id, from_key)
- )
+ }
@defer.inlineCallbacks
def get_room_events_stream_for_room(
@@ -679,11 +679,11 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
)
events_before = yield self.get_events_as_list(
- [e for e in results["before"]["event_ids"]], get_prev_content=True
+ list(results["before"]["event_ids"]), get_prev_content=True
)
events_after = yield self.get_events_as_list(
- [e for e in results["after"]["event_ids"]], get_prev_content=True
+ list(results["after"]["event_ids"]), get_prev_content=True
)
return {
diff --git a/synapse/storage/data_stores/main/user_erasure_store.py b/synapse/storage/data_stores/main/user_erasure_store.py
index af8025bc17..ec6b8a4ffd 100644
--- a/synapse/storage/data_stores/main/user_erasure_store.py
+++ b/synapse/storage/data_stores/main/user_erasure_store.py
@@ -63,9 +63,9 @@ class UserErasureWorkerStore(SQLBaseStore):
retcols=("user_id",),
desc="are_users_erased",
)
- erased_users = set(row["user_id"] for row in rows)
+ erased_users = {row["user_id"] for row in rows}
- res = dict((u, u in erased_users) for u in user_ids)
+ res = {u: u in erased_users for u in user_ids}
return res
diff --git a/synapse/storage/data_stores/state/store.py b/synapse/storage/data_stores/state/store.py
index c4ee9b7ccb..57a5267663 100644
--- a/synapse/storage/data_stores/state/store.py
+++ b/synapse/storage/data_stores/state/store.py
@@ -520,11 +520,11 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
retcols=("state_group",),
)
- remaining_state_groups = set(
+ remaining_state_groups = {
row["state_group"]
for row in rows
if row["state_group"] not in state_groups_to_delete
- )
+ }
logger.info(
"[purge] de-delta-ing %i remaining state groups",
diff --git a/synapse/storage/database.py b/synapse/storage/database.py
index 6dcb5c04da..1953614401 100644
--- a/synapse/storage/database.py
+++ b/synapse/storage/database.py
@@ -554,8 +554,8 @@ class Database(object):
Returns:
A list of dicts where the key is the column header.
"""
- col_headers = list(intern(str(column[0])) for column in cursor.description)
- results = list(dict(zip(col_headers, row)) for row in cursor)
+ col_headers = [intern(str(column[0])) for column in cursor.description]
+ results = [dict(zip(col_headers, row)) for row in cursor]
return results
def execute(self, desc, decoder, query, *args):
diff --git a/synapse/storage/persist_events.py b/synapse/storage/persist_events.py
index b950550f23..0f9ac1cf09 100644
--- a/synapse/storage/persist_events.py
+++ b/synapse/storage/persist_events.py
@@ -602,14 +602,14 @@ class EventsPersistenceStorage(object):
event_id_to_state_group.update(event_to_groups)
# State groups of old_latest_event_ids
- old_state_groups = set(
+ old_state_groups = {
event_id_to_state_group[evid] for evid in old_latest_event_ids
- )
+ }
# State groups of new_latest_event_ids
- new_state_groups = set(
+ new_state_groups = {
event_id_to_state_group[evid] for evid in new_latest_event_ids
- )
+ }
# If they old and new groups are the same then we don't need to do
# anything.
diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py
index c285ef52a0..fc69c32a0a 100644
--- a/synapse/storage/prepare_database.py
+++ b/synapse/storage/prepare_database.py
@@ -345,9 +345,9 @@ def _upgrade_existing_database(
"Could not open delta dir for version %d: %s" % (v, directory)
)
- duplicates = set(
+ duplicates = {
file_name for file_name, count in file_name_counter.items() if count > 1
- )
+ }
if duplicates:
# We don't support using the same file name in the same delta version.
raise PrepareDatabaseException(
@@ -454,7 +454,7 @@ def _apply_module_schema_files(cur, database_engine, modname, names_and_streams)
),
(modname,),
)
- applied_deltas = set(d for d, in cur)
+ applied_deltas = {d for d, in cur}
for (name, stream) in names_and_streams:
if name in applied_deltas:
continue
|